mirror of
https://github.com/rust-lang/rust.git
synced 2025-05-04 22:17:38 +00:00
Auto merge of #70951 - cjgillot:anarchy, r=oli-obk
Move the query engine out of rustc_middle The handling of queries is moved to a trait `QueryEngine`. It replaces `query::Queries` in the `TyCtxt`, allowing to move the query engine out of librustc_middle. There are 2 modes to access the query engine: through `TyCtxt` and dynamic dispatch, or through a `QueryCtxt`. The `QueryCtxt` is required for everything touching the `OnDiskCache`. For now, I put it in librustc_incremental, which is very small. This may not be the best place. A significant part of the codegen time for librustc_middle is moved to the recipient crate. This PR may require a perf run. cc #65031 r? `@Zoxc`
This commit is contained in:
commit
83b30a639d
26
Cargo.lock
26
Cargo.lock
@ -3878,6 +3878,7 @@ version = "0.0.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"rustc-rayon",
|
"rustc-rayon",
|
||||||
|
"rustc-rayon-core",
|
||||||
"rustc_ast",
|
"rustc_ast",
|
||||||
"rustc_ast_lowering",
|
"rustc_ast_lowering",
|
||||||
"rustc_ast_passes",
|
"rustc_ast_passes",
|
||||||
@ -3890,6 +3891,7 @@ dependencies = [
|
|||||||
"rustc_expand",
|
"rustc_expand",
|
||||||
"rustc_hir",
|
"rustc_hir",
|
||||||
"rustc_incremental",
|
"rustc_incremental",
|
||||||
|
"rustc_index",
|
||||||
"rustc_lint",
|
"rustc_lint",
|
||||||
"rustc_metadata",
|
"rustc_metadata",
|
||||||
"rustc_middle",
|
"rustc_middle",
|
||||||
@ -3899,6 +3901,7 @@ dependencies = [
|
|||||||
"rustc_passes",
|
"rustc_passes",
|
||||||
"rustc_plugin_impl",
|
"rustc_plugin_impl",
|
||||||
"rustc_privacy",
|
"rustc_privacy",
|
||||||
|
"rustc_query_impl",
|
||||||
"rustc_resolve",
|
"rustc_resolve",
|
||||||
"rustc_serialize",
|
"rustc_serialize",
|
||||||
"rustc_session",
|
"rustc_session",
|
||||||
@ -4165,6 +4168,29 @@ dependencies = [
|
|||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rustc_query_impl"
|
||||||
|
version = "0.0.0"
|
||||||
|
dependencies = [
|
||||||
|
"measureme",
|
||||||
|
"rustc-rayon-core",
|
||||||
|
"rustc_ast",
|
||||||
|
"rustc_attr",
|
||||||
|
"rustc_data_structures",
|
||||||
|
"rustc_errors",
|
||||||
|
"rustc_feature",
|
||||||
|
"rustc_hir",
|
||||||
|
"rustc_index",
|
||||||
|
"rustc_macros",
|
||||||
|
"rustc_middle",
|
||||||
|
"rustc_query_system",
|
||||||
|
"rustc_serialize",
|
||||||
|
"rustc_session",
|
||||||
|
"rustc_span",
|
||||||
|
"rustc_target",
|
||||||
|
"tracing",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustc_query_system"
|
name = "rustc_query_system"
|
||||||
version = "0.0.0"
|
version = "0.0.0"
|
||||||
|
@ -465,9 +465,5 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
|
|||||||
cgu.name()
|
cgu.name()
|
||||||
);
|
);
|
||||||
|
|
||||||
if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
|
if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
|
||||||
CguReuse::PreLto
|
|
||||||
} else {
|
|
||||||
CguReuse::No
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -867,7 +867,7 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
|
|||||||
cgu.name()
|
cgu.name()
|
||||||
);
|
);
|
||||||
|
|
||||||
if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
|
if tcx.try_mark_green(&dep_node) {
|
||||||
// We can re-use either the pre- or the post-thinlto state. If no LTO is
|
// We can re-use either the pre- or the post-thinlto state. If no LTO is
|
||||||
// being performed then we can use post-LTO artifacts, otherwise we must
|
// being performed then we can use post-LTO artifacts, otherwise we must
|
||||||
// reuse pre-LTO artifacts
|
// reuse pre-LTO artifacts
|
||||||
|
@ -27,7 +27,6 @@ use rustc_interface::{interface, Queries};
|
|||||||
use rustc_lint::LintStore;
|
use rustc_lint::LintStore;
|
||||||
use rustc_metadata::locator;
|
use rustc_metadata::locator;
|
||||||
use rustc_middle::middle::cstore::MetadataLoader;
|
use rustc_middle::middle::cstore::MetadataLoader;
|
||||||
use rustc_middle::ty::TyCtxt;
|
|
||||||
use rustc_save_analysis as save;
|
use rustc_save_analysis as save;
|
||||||
use rustc_save_analysis::DumpHandler;
|
use rustc_save_analysis::DumpHandler;
|
||||||
use rustc_serialize::json::{self, ToJson};
|
use rustc_serialize::json::{self, ToJson};
|
||||||
@ -1232,7 +1231,7 @@ pub fn report_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str) {
|
|||||||
|
|
||||||
let num_frames = if backtrace { None } else { Some(2) };
|
let num_frames = if backtrace { None } else { Some(2) };
|
||||||
|
|
||||||
TyCtxt::try_print_query_stack(&handler, num_frames);
|
interface::try_print_query_stack(&handler, num_frames);
|
||||||
|
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -10,6 +10,7 @@ doctest = false
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
|
rustc-rayon-core = "0.3.0"
|
||||||
rayon = { version = "0.3.0", package = "rustc-rayon" }
|
rayon = { version = "0.3.0", package = "rustc-rayon" }
|
||||||
smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
|
smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
|
||||||
rustc_ast = { path = "../rustc_ast" }
|
rustc_ast = { path = "../rustc_ast" }
|
||||||
@ -30,6 +31,7 @@ rustc_codegen_ssa = { path = "../rustc_codegen_ssa" }
|
|||||||
rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
|
rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
|
||||||
rustc_codegen_llvm = { path = "../rustc_codegen_llvm", optional = true }
|
rustc_codegen_llvm = { path = "../rustc_codegen_llvm", optional = true }
|
||||||
rustc_hir = { path = "../rustc_hir" }
|
rustc_hir = { path = "../rustc_hir" }
|
||||||
|
rustc_index = { path = "../rustc_index" }
|
||||||
rustc_metadata = { path = "../rustc_metadata" }
|
rustc_metadata = { path = "../rustc_metadata" }
|
||||||
rustc_mir = { path = "../rustc_mir" }
|
rustc_mir = { path = "../rustc_mir" }
|
||||||
rustc_mir_build = { path = "../rustc_mir_build" }
|
rustc_mir_build = { path = "../rustc_mir_build" }
|
||||||
@ -39,6 +41,7 @@ rustc_lint = { path = "../rustc_lint" }
|
|||||||
rustc_errors = { path = "../rustc_errors" }
|
rustc_errors = { path = "../rustc_errors" }
|
||||||
rustc_plugin_impl = { path = "../rustc_plugin_impl" }
|
rustc_plugin_impl = { path = "../rustc_plugin_impl" }
|
||||||
rustc_privacy = { path = "../rustc_privacy" }
|
rustc_privacy = { path = "../rustc_privacy" }
|
||||||
|
rustc_query_impl = { path = "../rustc_query_impl" }
|
||||||
rustc_resolve = { path = "../rustc_resolve" }
|
rustc_resolve = { path = "../rustc_resolve" }
|
||||||
rustc_trait_selection = { path = "../rustc_trait_selection" }
|
rustc_trait_selection = { path = "../rustc_trait_selection" }
|
||||||
rustc_ty_utils = { path = "../rustc_ty_utils" }
|
rustc_ty_utils = { path = "../rustc_ty_utils" }
|
||||||
|
@ -8,7 +8,7 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
|||||||
use rustc_data_structures::sync::Lrc;
|
use rustc_data_structures::sync::Lrc;
|
||||||
use rustc_data_structures::OnDrop;
|
use rustc_data_structures::OnDrop;
|
||||||
use rustc_errors::registry::Registry;
|
use rustc_errors::registry::Registry;
|
||||||
use rustc_errors::ErrorReported;
|
use rustc_errors::{ErrorReported, Handler};
|
||||||
use rustc_lint::LintStore;
|
use rustc_lint::LintStore;
|
||||||
use rustc_middle::ty;
|
use rustc_middle::ty;
|
||||||
use rustc_parse::new_parser_from_source_str;
|
use rustc_parse::new_parser_from_source_str;
|
||||||
@ -213,3 +213,24 @@ pub fn run_compiler<R: Send>(mut config: Config, f: impl FnOnce(&Compiler) -> R
|
|||||||
|| create_compiler_and_run(config, f),
|
|| create_compiler_and_run(config, f),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn try_print_query_stack(handler: &Handler, num_frames: Option<usize>) {
|
||||||
|
eprintln!("query stack during panic:");
|
||||||
|
|
||||||
|
// Be careful relying on global state here: this code is called from
|
||||||
|
// a panic hook, which means that the global `Handler` may be in a weird
|
||||||
|
// state if it was responsible for triggering the panic.
|
||||||
|
let i = ty::tls::with_context_opt(|icx| {
|
||||||
|
if let Some(icx) = icx {
|
||||||
|
icx.tcx.queries.try_print_query_stack(icx.tcx, icx.query, handler, num_frames)
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if num_frames == None || num_frames >= Some(i) {
|
||||||
|
eprintln!("end of query stack");
|
||||||
|
} else {
|
||||||
|
eprintln!("we're just showing a limited slice of the query stack");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -15,6 +15,7 @@ use rustc_expand::base::ExtCtxt;
|
|||||||
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
|
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
|
||||||
use rustc_hir::definitions::Definitions;
|
use rustc_hir::definitions::Definitions;
|
||||||
use rustc_hir::Crate;
|
use rustc_hir::Crate;
|
||||||
|
use rustc_index::vec::IndexVec;
|
||||||
use rustc_lint::LintStore;
|
use rustc_lint::LintStore;
|
||||||
use rustc_middle::arena::Arena;
|
use rustc_middle::arena::Arena;
|
||||||
use rustc_middle::dep_graph::DepGraph;
|
use rustc_middle::dep_graph::DepGraph;
|
||||||
@ -27,6 +28,7 @@ use rustc_mir_build as mir_build;
|
|||||||
use rustc_parse::{parse_crate_from_file, parse_crate_from_source_str};
|
use rustc_parse::{parse_crate_from_file, parse_crate_from_source_str};
|
||||||
use rustc_passes::{self, hir_stats, layout_test};
|
use rustc_passes::{self, hir_stats, layout_test};
|
||||||
use rustc_plugin_impl as plugin;
|
use rustc_plugin_impl as plugin;
|
||||||
|
use rustc_query_impl::Queries as TcxQueries;
|
||||||
use rustc_resolve::{Resolver, ResolverArenas};
|
use rustc_resolve::{Resolver, ResolverArenas};
|
||||||
use rustc_session::config::{CrateType, Input, OutputFilenames, OutputType, PpMode, PpSourceMode};
|
use rustc_session::config::{CrateType, Input, OutputFilenames, OutputType, PpMode, PpSourceMode};
|
||||||
use rustc_session::lint;
|
use rustc_session::lint;
|
||||||
@ -738,20 +740,18 @@ pub static DEFAULT_EXTERN_QUERY_PROVIDERS: SyncLazy<Providers> = SyncLazy::new(|
|
|||||||
extern_providers
|
extern_providers
|
||||||
});
|
});
|
||||||
|
|
||||||
pub struct QueryContext<'tcx>(&'tcx GlobalCtxt<'tcx>);
|
pub struct QueryContext<'tcx> {
|
||||||
|
gcx: &'tcx GlobalCtxt<'tcx>,
|
||||||
|
}
|
||||||
|
|
||||||
impl<'tcx> QueryContext<'tcx> {
|
impl<'tcx> QueryContext<'tcx> {
|
||||||
pub fn enter<F, R>(&mut self, f: F) -> R
|
pub fn enter<F, R>(&mut self, f: F) -> R
|
||||||
where
|
where
|
||||||
F: FnOnce(TyCtxt<'tcx>) -> R,
|
F: FnOnce(TyCtxt<'tcx>) -> R,
|
||||||
{
|
{
|
||||||
let icx = ty::tls::ImplicitCtxt::new(self.0);
|
let icx = ty::tls::ImplicitCtxt::new(self.gcx);
|
||||||
ty::tls::enter_context(&icx, |_| f(icx.tcx))
|
ty::tls::enter_context(&icx, |_| f(icx.tcx))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn print_stats(&mut self) {
|
|
||||||
self.enter(ty::query::print_stats)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_global_ctxt<'tcx>(
|
pub fn create_global_ctxt<'tcx>(
|
||||||
@ -762,6 +762,7 @@ pub fn create_global_ctxt<'tcx>(
|
|||||||
mut resolver_outputs: ResolverOutputs,
|
mut resolver_outputs: ResolverOutputs,
|
||||||
outputs: OutputFilenames,
|
outputs: OutputFilenames,
|
||||||
crate_name: &str,
|
crate_name: &str,
|
||||||
|
queries: &'tcx OnceCell<TcxQueries<'tcx>>,
|
||||||
global_ctxt: &'tcx OnceCell<GlobalCtxt<'tcx>>,
|
global_ctxt: &'tcx OnceCell<GlobalCtxt<'tcx>>,
|
||||||
arena: &'tcx WorkerLocal<Arena<'tcx>>,
|
arena: &'tcx WorkerLocal<Arena<'tcx>>,
|
||||||
) -> QueryContext<'tcx> {
|
) -> QueryContext<'tcx> {
|
||||||
@ -785,26 +786,33 @@ pub fn create_global_ctxt<'tcx>(
|
|||||||
callback(sess, &mut local_providers, &mut extern_providers);
|
callback(sess, &mut local_providers, &mut extern_providers);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let queries = {
|
||||||
|
let crates = resolver_outputs.cstore.crates_untracked();
|
||||||
|
let max_cnum = crates.iter().map(|c| c.as_usize()).max().unwrap_or(0);
|
||||||
|
let mut providers = IndexVec::from_elem_n(extern_providers, max_cnum + 1);
|
||||||
|
providers[LOCAL_CRATE] = local_providers;
|
||||||
|
queries.get_or_init(|| TcxQueries::new(providers, extern_providers))
|
||||||
|
};
|
||||||
|
|
||||||
let gcx = sess.time("setup_global_ctxt", || {
|
let gcx = sess.time("setup_global_ctxt", || {
|
||||||
global_ctxt.get_or_init(|| {
|
global_ctxt.get_or_init(|| {
|
||||||
TyCtxt::create_global_ctxt(
|
TyCtxt::create_global_ctxt(
|
||||||
sess,
|
sess,
|
||||||
lint_store,
|
lint_store,
|
||||||
local_providers,
|
|
||||||
extern_providers,
|
|
||||||
arena,
|
arena,
|
||||||
resolver_outputs,
|
resolver_outputs,
|
||||||
krate,
|
krate,
|
||||||
defs,
|
defs,
|
||||||
dep_graph,
|
dep_graph,
|
||||||
query_result_on_disk_cache,
|
query_result_on_disk_cache,
|
||||||
|
queries.as_dyn(),
|
||||||
&crate_name,
|
&crate_name,
|
||||||
&outputs,
|
&outputs,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
QueryContext(gcx)
|
QueryContext { gcx }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Runs the resolution, type-checking, region checking and other
|
/// Runs the resolution, type-checking, region checking and other
|
||||||
|
@ -14,6 +14,7 @@ use rustc_lint::LintStore;
|
|||||||
use rustc_middle::arena::Arena;
|
use rustc_middle::arena::Arena;
|
||||||
use rustc_middle::dep_graph::DepGraph;
|
use rustc_middle::dep_graph::DepGraph;
|
||||||
use rustc_middle::ty::{GlobalCtxt, ResolverOutputs, TyCtxt};
|
use rustc_middle::ty::{GlobalCtxt, ResolverOutputs, TyCtxt};
|
||||||
|
use rustc_query_impl::Queries as TcxQueries;
|
||||||
use rustc_serialize::json;
|
use rustc_serialize::json;
|
||||||
use rustc_session::config::{self, OutputFilenames, OutputType};
|
use rustc_session::config::{self, OutputFilenames, OutputType};
|
||||||
use rustc_session::{output::find_crate_name, Session};
|
use rustc_session::{output::find_crate_name, Session};
|
||||||
@ -71,6 +72,7 @@ impl<T> Default for Query<T> {
|
|||||||
pub struct Queries<'tcx> {
|
pub struct Queries<'tcx> {
|
||||||
compiler: &'tcx Compiler,
|
compiler: &'tcx Compiler,
|
||||||
gcx: OnceCell<GlobalCtxt<'tcx>>,
|
gcx: OnceCell<GlobalCtxt<'tcx>>,
|
||||||
|
queries: OnceCell<TcxQueries<'tcx>>,
|
||||||
|
|
||||||
arena: WorkerLocal<Arena<'tcx>>,
|
arena: WorkerLocal<Arena<'tcx>>,
|
||||||
hir_arena: WorkerLocal<rustc_ast_lowering::Arena<'tcx>>,
|
hir_arena: WorkerLocal<rustc_ast_lowering::Arena<'tcx>>,
|
||||||
@ -92,6 +94,7 @@ impl<'tcx> Queries<'tcx> {
|
|||||||
Queries {
|
Queries {
|
||||||
compiler,
|
compiler,
|
||||||
gcx: OnceCell::new(),
|
gcx: OnceCell::new(),
|
||||||
|
queries: OnceCell::new(),
|
||||||
arena: WorkerLocal::new(|_| Arena::default()),
|
arena: WorkerLocal::new(|_| Arena::default()),
|
||||||
hir_arena: WorkerLocal::new(|_| rustc_ast_lowering::Arena::default()),
|
hir_arena: WorkerLocal::new(|_| rustc_ast_lowering::Arena::default()),
|
||||||
dep_graph_future: Default::default(),
|
dep_graph_future: Default::default(),
|
||||||
@ -265,6 +268,7 @@ impl<'tcx> Queries<'tcx> {
|
|||||||
resolver_outputs.steal(),
|
resolver_outputs.steal(),
|
||||||
outputs,
|
outputs,
|
||||||
&crate_name,
|
&crate_name,
|
||||||
|
&self.queries,
|
||||||
&self.gcx,
|
&self.gcx,
|
||||||
&self.arena,
|
&self.arena,
|
||||||
))
|
))
|
||||||
@ -425,11 +429,11 @@ impl Compiler {
|
|||||||
{
|
{
|
||||||
let _prof_timer =
|
let _prof_timer =
|
||||||
queries.session().prof.generic_activity("self_profile_alloc_query_strings");
|
queries.session().prof.generic_activity("self_profile_alloc_query_strings");
|
||||||
gcx.enter(|tcx| tcx.alloc_self_profile_query_strings());
|
gcx.enter(rustc_query_impl::alloc_self_profile_query_strings);
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.session().opts.debugging_opts.query_stats {
|
if self.session().opts.debugging_opts.query_stats {
|
||||||
gcx.print_stats();
|
gcx.enter(rustc_query_impl::print_stats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,6 +10,8 @@ use rustc_data_structures::stable_hasher::StableHasher;
|
|||||||
use rustc_data_structures::sync::Lrc;
|
use rustc_data_structures::sync::Lrc;
|
||||||
use rustc_errors::registry::Registry;
|
use rustc_errors::registry::Registry;
|
||||||
use rustc_metadata::dynamic_lib::DynamicLibrary;
|
use rustc_metadata::dynamic_lib::DynamicLibrary;
|
||||||
|
#[cfg(parallel_compiler)]
|
||||||
|
use rustc_middle::ty::tls;
|
||||||
use rustc_resolve::{self, Resolver};
|
use rustc_resolve::{self, Resolver};
|
||||||
use rustc_session as session;
|
use rustc_session as session;
|
||||||
use rustc_session::config::{self, CrateType};
|
use rustc_session::config::{self, CrateType};
|
||||||
@ -29,11 +31,12 @@ use std::io;
|
|||||||
use std::lazy::SyncOnceCell;
|
use std::lazy::SyncOnceCell;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::ops::DerefMut;
|
use std::ops::DerefMut;
|
||||||
|
#[cfg(not(parallel_compiler))]
|
||||||
|
use std::panic;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::{Arc, Mutex, Once};
|
use std::sync::{Arc, Mutex, Once};
|
||||||
#[cfg(not(parallel_compiler))]
|
use std::thread;
|
||||||
use std::{panic, thread};
|
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
/// Adds `target_feature = "..."` cfgs for a variety of platform
|
/// Adds `target_feature = "..."` cfgs for a variety of platform
|
||||||
@ -156,6 +159,28 @@ pub fn setup_callbacks_and_run_in_thread_pool_with_globals<F: FnOnce() -> R + Se
|
|||||||
scoped_thread(cfg, main_handler)
|
scoped_thread(cfg, main_handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates a new thread and forwards information in thread locals to it.
|
||||||
|
/// The new thread runs the deadlock handler.
|
||||||
|
/// Must only be called when a deadlock is about to happen.
|
||||||
|
#[cfg(parallel_compiler)]
|
||||||
|
unsafe fn handle_deadlock() {
|
||||||
|
let registry = rustc_rayon_core::Registry::current();
|
||||||
|
|
||||||
|
let context = tls::get_tlv();
|
||||||
|
assert!(context != 0);
|
||||||
|
rustc_data_structures::sync::assert_sync::<tls::ImplicitCtxt<'_, '_>>();
|
||||||
|
let icx: &tls::ImplicitCtxt<'_, '_> = &*(context as *const tls::ImplicitCtxt<'_, '_>);
|
||||||
|
|
||||||
|
let session_globals = rustc_span::SESSION_GLOBALS.with(|sg| sg as *const _);
|
||||||
|
let session_globals = &*session_globals;
|
||||||
|
thread::spawn(move || {
|
||||||
|
tls::enter_context(icx, |_| {
|
||||||
|
rustc_span::SESSION_GLOBALS
|
||||||
|
.set(session_globals, || tls::with(|tcx| tcx.queries.deadlock(tcx, ®istry)))
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
pub fn setup_callbacks_and_run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
|
pub fn setup_callbacks_and_run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
|
||||||
edition: Edition,
|
edition: Edition,
|
||||||
@ -163,7 +188,6 @@ pub fn setup_callbacks_and_run_in_thread_pool_with_globals<F: FnOnce() -> R + Se
|
|||||||
stderr: &Option<Arc<Mutex<Vec<u8>>>>,
|
stderr: &Option<Arc<Mutex<Vec<u8>>>>,
|
||||||
f: F,
|
f: F,
|
||||||
) -> R {
|
) -> R {
|
||||||
use rustc_middle::ty;
|
|
||||||
crate::callbacks::setup_callbacks();
|
crate::callbacks::setup_callbacks();
|
||||||
|
|
||||||
let mut config = rayon::ThreadPoolBuilder::new()
|
let mut config = rayon::ThreadPoolBuilder::new()
|
||||||
@ -171,7 +195,7 @@ pub fn setup_callbacks_and_run_in_thread_pool_with_globals<F: FnOnce() -> R + Se
|
|||||||
.acquire_thread_handler(jobserver::acquire_thread)
|
.acquire_thread_handler(jobserver::acquire_thread)
|
||||||
.release_thread_handler(jobserver::release_thread)
|
.release_thread_handler(jobserver::release_thread)
|
||||||
.num_threads(threads)
|
.num_threads(threads)
|
||||||
.deadlock_handler(|| unsafe { ty::query::handle_deadlock() });
|
.deadlock_handler(|| unsafe { handle_deadlock() });
|
||||||
|
|
||||||
if let Some(size) = get_stack_size() {
|
if let Some(size) = get_stack_size() {
|
||||||
config = config.stack_size(size);
|
config = config.stack_size(size);
|
||||||
|
@ -97,7 +97,7 @@ impl Parse for QueryModifier {
|
|||||||
Ok(QueryModifier::Cache(args, block))
|
Ok(QueryModifier::Cache(args, block))
|
||||||
} else if modifier == "load_cached" {
|
} else if modifier == "load_cached" {
|
||||||
// Parse a load_cached modifier like:
|
// Parse a load_cached modifier like:
|
||||||
// `load_cached(tcx, id) { tcx.queries.on_disk_cache.try_load_query_result(tcx, id) }`
|
// `load_cached(tcx, id) { tcx.on_disk_cache.try_load_query_result(tcx, id) }`
|
||||||
let args;
|
let args;
|
||||||
parenthesized!(args in input);
|
parenthesized!(args in input);
|
||||||
let tcx = args.parse()?;
|
let tcx = args.parse()?;
|
||||||
@ -344,7 +344,6 @@ fn add_query_description_impl(
|
|||||||
impls: &mut proc_macro2::TokenStream,
|
impls: &mut proc_macro2::TokenStream,
|
||||||
) {
|
) {
|
||||||
let name = &query.name;
|
let name = &query.name;
|
||||||
let arg = &query.arg;
|
|
||||||
let key = &query.key.0;
|
let key = &query.key.0;
|
||||||
|
|
||||||
// Find out if we should cache the query on disk
|
// Find out if we should cache the query on disk
|
||||||
@ -354,7 +353,7 @@ fn add_query_description_impl(
|
|||||||
quote! {
|
quote! {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn try_load_from_disk(
|
fn try_load_from_disk(
|
||||||
#tcx: TyCtxt<'tcx>,
|
#tcx: QueryCtxt<'tcx>,
|
||||||
#id: SerializedDepNodeIndex
|
#id: SerializedDepNodeIndex
|
||||||
) -> Option<Self::Value> {
|
) -> Option<Self::Value> {
|
||||||
#block
|
#block
|
||||||
@ -365,10 +364,10 @@ fn add_query_description_impl(
|
|||||||
quote! {
|
quote! {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn try_load_from_disk(
|
fn try_load_from_disk(
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: QueryCtxt<'tcx>,
|
||||||
id: SerializedDepNodeIndex
|
id: SerializedDepNodeIndex
|
||||||
) -> Option<Self::Value> {
|
) -> Option<Self::Value> {
|
||||||
tcx.queries.on_disk_cache.as_ref().and_then(|c| c.try_load_query_result(tcx, id))
|
tcx.on_disk_cache.as_ref()?.try_load_query_result(*tcx, id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -393,7 +392,7 @@ fn add_query_description_impl(
|
|||||||
#[inline]
|
#[inline]
|
||||||
#[allow(unused_variables, unused_braces)]
|
#[allow(unused_variables, unused_braces)]
|
||||||
fn cache_on_disk(
|
fn cache_on_disk(
|
||||||
#tcx: TyCtxt<'tcx>,
|
#tcx: QueryCtxt<'tcx>,
|
||||||
#key: &Self::Key,
|
#key: &Self::Key,
|
||||||
#value: Option<&Self::Value>
|
#value: Option<&Self::Value>
|
||||||
) -> bool {
|
) -> bool {
|
||||||
@ -414,16 +413,14 @@ fn add_query_description_impl(
|
|||||||
|
|
||||||
let desc = quote! {
|
let desc = quote! {
|
||||||
#[allow(unused_variables)]
|
#[allow(unused_variables)]
|
||||||
fn describe(
|
fn describe(tcx: QueryCtxt<'tcx>, key: Self::Key) -> String {
|
||||||
#tcx: TyCtxt<'tcx>,
|
let (#tcx, #key) = (*tcx, key);
|
||||||
#key: #arg,
|
::rustc_middle::ty::print::with_no_trimmed_paths(|| format!(#desc).into())
|
||||||
) -> String {
|
|
||||||
::rustc_middle::ty::print::with_no_trimmed_paths(|| format!(#desc))
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
impls.extend(quote! {
|
impls.extend(quote! {
|
||||||
impl<'tcx> QueryDescription<TyCtxt<'tcx>> for queries::#name<'tcx> {
|
impl<'tcx> QueryDescription<QueryCtxt<'tcx>> for queries::#name<'tcx> {
|
||||||
#desc
|
#desc
|
||||||
#cache
|
#cache
|
||||||
}
|
}
|
||||||
@ -498,6 +495,7 @@ pub fn rustc_queries(input: TokenStream) -> TokenStream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TokenStream::from(quote! {
|
TokenStream::from(quote! {
|
||||||
|
#[macro_export]
|
||||||
macro_rules! rustc_query_append {
|
macro_rules! rustc_query_append {
|
||||||
([$($macro:tt)*][$($other:tt)*]) => {
|
([$($macro:tt)*][$($other:tt)*]) => {
|
||||||
$($macro)* {
|
$($macro)* {
|
||||||
@ -517,12 +515,15 @@ pub fn rustc_queries(input: TokenStream) -> TokenStream {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[macro_export]
|
||||||
macro_rules! rustc_cached_queries {
|
macro_rules! rustc_cached_queries {
|
||||||
($($macro:tt)*) => {
|
($($macro:tt)*) => {
|
||||||
$($macro)*(#cached_queries);
|
$($macro)*(#cached_queries);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[macro_export]
|
||||||
#query_description_stream
|
macro_rules! rustc_query_description {
|
||||||
|
() => { #query_description_stream }
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,6 @@ use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, CRATE_DEF_INDEX};
|
|||||||
use rustc_hir::definitions::DefPathHash;
|
use rustc_hir::definitions::DefPathHash;
|
||||||
use rustc_hir::HirId;
|
use rustc_hir::HirId;
|
||||||
use rustc_span::symbol::Symbol;
|
use rustc_span::symbol::Symbol;
|
||||||
use rustc_span::DUMMY_SP;
|
|
||||||
use std::hash::Hash;
|
use std::hash::Hash;
|
||||||
|
|
||||||
pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams};
|
pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams};
|
||||||
@ -91,53 +90,6 @@ pub struct DepKindStruct {
|
|||||||
// FIXME: Make this a simple boolean once DepNodeParams::can_reconstruct_query_key
|
// FIXME: Make this a simple boolean once DepNodeParams::can_reconstruct_query_key
|
||||||
// can be made a specialized associated const.
|
// can be made a specialized associated const.
|
||||||
can_reconstruct_query_key: fn() -> bool,
|
can_reconstruct_query_key: fn() -> bool,
|
||||||
|
|
||||||
/// The red/green evaluation system will try to mark a specific DepNode in the
|
|
||||||
/// dependency graph as green by recursively trying to mark the dependencies of
|
|
||||||
/// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
|
|
||||||
/// where we don't know if it is red or green and we therefore actually have
|
|
||||||
/// to recompute its value in order to find out. Since the only piece of
|
|
||||||
/// information that we have at that point is the `DepNode` we are trying to
|
|
||||||
/// re-evaluate, we need some way to re-run a query from just that. This is what
|
|
||||||
/// `force_from_dep_node()` implements.
|
|
||||||
///
|
|
||||||
/// In the general case, a `DepNode` consists of a `DepKind` and an opaque
|
|
||||||
/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
|
|
||||||
/// is usually constructed by computing a stable hash of the query-key that the
|
|
||||||
/// `DepNode` corresponds to. Consequently, it is not in general possible to go
|
|
||||||
/// back from hash to query-key (since hash functions are not reversible). For
|
|
||||||
/// this reason `force_from_dep_node()` is expected to fail from time to time
|
|
||||||
/// because we just cannot find out, from the `DepNode` alone, what the
|
|
||||||
/// corresponding query-key is and therefore cannot re-run the query.
|
|
||||||
///
|
|
||||||
/// The system deals with this case letting `try_mark_green` fail which forces
|
|
||||||
/// the root query to be re-evaluated.
|
|
||||||
///
|
|
||||||
/// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
|
|
||||||
/// Fortunately, we can use some contextual information that will allow us to
|
|
||||||
/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
|
|
||||||
/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
|
|
||||||
/// valid `DefPathHash`. Since we also always build a huge table that maps every
|
|
||||||
/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
|
|
||||||
/// everything we need to re-run the query.
|
|
||||||
///
|
|
||||||
/// Take the `mir_promoted` query as an example. Like many other queries, it
|
|
||||||
/// just has a single parameter: the `DefId` of the item it will compute the
|
|
||||||
/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
|
|
||||||
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
|
|
||||||
/// is actually a `DefPathHash`, and can therefore just look up the corresponding
|
|
||||||
/// `DefId` in `tcx.def_path_hash_to_def_id`.
|
|
||||||
///
|
|
||||||
/// When you implement a new query, it will likely have a corresponding new
|
|
||||||
/// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As
|
|
||||||
/// a rule of thumb, if your query takes a `DefId` or `LocalDefId` as sole parameter,
|
|
||||||
/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
|
|
||||||
/// add it to the "We don't have enough information to reconstruct..." group in
|
|
||||||
/// the match below.
|
|
||||||
pub(super) force_from_dep_node: fn(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool,
|
|
||||||
|
|
||||||
/// Invoke a query to put the on-disk cached value in memory.
|
|
||||||
pub(super) try_load_from_on_disk_cache: fn(TyCtxt<'_>, &DepNode),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::ops::Deref for DepKind {
|
impl std::ops::Deref for DepKind {
|
||||||
@ -196,8 +148,7 @@ macro_rules! contains_eval_always_attr {
|
|||||||
#[allow(non_upper_case_globals)]
|
#[allow(non_upper_case_globals)]
|
||||||
pub mod dep_kind {
|
pub mod dep_kind {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::ty::query::{queries, query_keys};
|
use crate::ty::query::query_keys;
|
||||||
use rustc_query_system::query::{force_query, QueryDescription};
|
|
||||||
|
|
||||||
// We use this for most things when incr. comp. is turned off.
|
// We use this for most things when incr. comp. is turned off.
|
||||||
pub const Null: DepKindStruct = DepKindStruct {
|
pub const Null: DepKindStruct = DepKindStruct {
|
||||||
@ -206,8 +157,6 @@ pub mod dep_kind {
|
|||||||
is_eval_always: false,
|
is_eval_always: false,
|
||||||
|
|
||||||
can_reconstruct_query_key: || true,
|
can_reconstruct_query_key: || true,
|
||||||
force_from_dep_node: |_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node),
|
|
||||||
try_load_from_on_disk_cache: |_, _| {},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const TraitSelect: DepKindStruct = DepKindStruct {
|
pub const TraitSelect: DepKindStruct = DepKindStruct {
|
||||||
@ -216,8 +165,6 @@ pub mod dep_kind {
|
|||||||
is_eval_always: false,
|
is_eval_always: false,
|
||||||
|
|
||||||
can_reconstruct_query_key: || true,
|
can_reconstruct_query_key: || true,
|
||||||
force_from_dep_node: |_, _| false,
|
|
||||||
try_load_from_on_disk_cache: |_, _| {},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const CompileCodegenUnit: DepKindStruct = DepKindStruct {
|
pub const CompileCodegenUnit: DepKindStruct = DepKindStruct {
|
||||||
@ -226,8 +173,6 @@ pub mod dep_kind {
|
|||||||
is_eval_always: false,
|
is_eval_always: false,
|
||||||
|
|
||||||
can_reconstruct_query_key: || false,
|
can_reconstruct_query_key: || false,
|
||||||
force_from_dep_node: |_, _| false,
|
|
||||||
try_load_from_on_disk_cache: |_, _| {},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
macro_rules! define_query_dep_kinds {
|
macro_rules! define_query_dep_kinds {
|
||||||
@ -246,59 +191,11 @@ pub mod dep_kind {
|
|||||||
::can_reconstruct_query_key()
|
::can_reconstruct_query_key()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn recover<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<query_keys::$variant<'tcx>> {
|
|
||||||
<query_keys::$variant<'_> as DepNodeParams<TyCtxt<'_>>>::recover(tcx, dep_node)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn force_from_dep_node(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool {
|
|
||||||
if is_anon {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if !can_reconstruct_query_key() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(key) = recover(tcx, dep_node) {
|
|
||||||
force_query::<queries::$variant<'_>, _>(
|
|
||||||
tcx,
|
|
||||||
key,
|
|
||||||
DUMMY_SP,
|
|
||||||
*dep_node
|
|
||||||
);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
fn try_load_from_on_disk_cache(tcx: TyCtxt<'_>, dep_node: &DepNode) {
|
|
||||||
if is_anon {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !can_reconstruct_query_key() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
debug_assert!(tcx.dep_graph
|
|
||||||
.node_color(dep_node)
|
|
||||||
.map(|c| c.is_green())
|
|
||||||
.unwrap_or(false));
|
|
||||||
|
|
||||||
let key = recover(tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
|
|
||||||
if queries::$variant::cache_on_disk(tcx, &key, None) {
|
|
||||||
let _ = tcx.$variant(key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
DepKindStruct {
|
DepKindStruct {
|
||||||
has_params,
|
has_params,
|
||||||
is_anon,
|
is_anon,
|
||||||
is_eval_always,
|
is_eval_always,
|
||||||
can_reconstruct_query_key,
|
can_reconstruct_query_key,
|
||||||
force_from_dep_node,
|
|
||||||
try_load_from_on_disk_cache,
|
|
||||||
}
|
}
|
||||||
};)*
|
};)*
|
||||||
);
|
);
|
||||||
@ -314,7 +211,12 @@ macro_rules! define_dep_nodes {
|
|||||||
$variant:ident $(( $tuple_arg_ty:ty $(,)? ))*
|
$variant:ident $(( $tuple_arg_ty:ty $(,)? ))*
|
||||||
,)*
|
,)*
|
||||||
) => (
|
) => (
|
||||||
static DEP_KINDS: &[DepKindStruct] = &[ $(dep_kind::$variant),* ];
|
#[macro_export]
|
||||||
|
macro_rules! make_dep_kind_array {
|
||||||
|
($mod:ident) => {[ $(($mod::$variant),)* ]};
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEP_KINDS: &[DepKindStruct] = &make_dep_kind_array!(dep_kind);
|
||||||
|
|
||||||
/// This enum serves as an index into the `DEP_KINDS` array.
|
/// This enum serves as an index into the `DEP_KINDS` array.
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
|
||||||
@ -414,10 +316,7 @@ impl DepNodeExt for DepNode {
|
|||||||
/// has been removed.
|
/// has been removed.
|
||||||
fn extract_def_id(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> {
|
fn extract_def_id(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> {
|
||||||
if self.kind.can_reconstruct_query_key() {
|
if self.kind.can_reconstruct_query_key() {
|
||||||
tcx.queries
|
tcx.on_disk_cache.as_ref()?.def_path_hash_to_def_id(tcx, DefPathHash(self.hash.into()))
|
||||||
.on_disk_cache
|
|
||||||
.as_ref()?
|
|
||||||
.def_path_hash_to_def_id(tcx, DefPathHash(self.hash.into()))
|
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@ -472,7 +371,7 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for DefId {
|
|||||||
// we will use the old DefIndex as an initial guess for
|
// we will use the old DefIndex as an initial guess for
|
||||||
// a lookup into the crate metadata.
|
// a lookup into the crate metadata.
|
||||||
if !self.is_local() {
|
if !self.is_local() {
|
||||||
if let Some(cache) = &tcx.queries.on_disk_cache {
|
if let Some(cache) = &tcx.on_disk_cache {
|
||||||
cache.store_foreign_def_id_hash(*self, hash);
|
cache.store_foreign_def_id_hash(*self, hash);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,10 +2,8 @@ use crate::ich::StableHashingContext;
|
|||||||
use crate::ty::{self, TyCtxt};
|
use crate::ty::{self, TyCtxt};
|
||||||
use rustc_data_structures::profiling::SelfProfilerRef;
|
use rustc_data_structures::profiling::SelfProfilerRef;
|
||||||
use rustc_data_structures::sync::Lock;
|
use rustc_data_structures::sync::Lock;
|
||||||
use rustc_data_structures::thin_vec::ThinVec;
|
|
||||||
use rustc_errors::Diagnostic;
|
|
||||||
use rustc_hir::def_id::LocalDefId;
|
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
mod dep_node;
|
mod dep_node;
|
||||||
|
|
||||||
pub use rustc_query_system::dep_graph::{
|
pub use rustc_query_system::dep_graph::{
|
||||||
@ -94,7 +92,7 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
|
|||||||
type StableHashingContext = StableHashingContext<'tcx>;
|
type StableHashingContext = StableHashingContext<'tcx>;
|
||||||
|
|
||||||
fn register_reused_dep_node(&self, dep_node: &DepNode) {
|
fn register_reused_dep_node(&self, dep_node: &DepNode) {
|
||||||
if let Some(cache) = self.queries.on_disk_cache.as_ref() {
|
if let Some(cache) = self.on_disk_cache.as_ref() {
|
||||||
cache.register_reused_dep_node(*self, dep_node)
|
cache.register_reused_dep_node(*self, dep_node)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -111,104 +109,12 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
|
|||||||
|| self.sess.opts.debugging_opts.query_dep_graph
|
|| self.sess.opts.debugging_opts.query_dep_graph
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
|
#[inline]
|
||||||
// FIXME: This match is just a workaround for incremental bugs and should
|
fn dep_graph(&self) -> &DepGraph {
|
||||||
// be removed. https://github.com/rust-lang/rust/issues/62649 is one such
|
&self.dep_graph
|
||||||
// bug that must be fixed before removing this.
|
|
||||||
match dep_node.kind {
|
|
||||||
DepKind::hir_owner | DepKind::hir_owner_nodes => {
|
|
||||||
if let Some(def_id) = dep_node.extract_def_id(*self) {
|
|
||||||
if !def_id_corresponds_to_hir_dep_node(*self, def_id.expect_local()) {
|
|
||||||
// This `DefPath` does not have a
|
|
||||||
// corresponding `DepNode` (e.g. a
|
|
||||||
// struct field), and the ` DefPath`
|
|
||||||
// collided with the `DefPath` of a
|
|
||||||
// proper item that existed in the
|
|
||||||
// previous compilation session.
|
|
||||||
//
|
|
||||||
// Since the given `DefPath` does not
|
|
||||||
// denote the item that previously
|
|
||||||
// existed, we just fail to mark green.
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// If the node does not exist anymore, we
|
|
||||||
// just fail to mark green.
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
// For other kinds of nodes it's OK to be
|
|
||||||
// forced.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
|
|
||||||
|
|
||||||
// We must avoid ever having to call `force_from_dep_node()` for a
|
|
||||||
// `DepNode::codegen_unit`:
|
|
||||||
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
|
|
||||||
// would always end up having to evaluate the first caller of the
|
|
||||||
// `codegen_unit` query that *is* reconstructible. This might very well be
|
|
||||||
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
|
|
||||||
// to re-trigger calling the `codegen_unit` query with the right key. At
|
|
||||||
// that point we would already have re-done all the work we are trying to
|
|
||||||
// avoid doing in the first place.
|
|
||||||
// The solution is simple: Just explicitly call the `codegen_unit` query for
|
|
||||||
// each CGU, right after partitioning. This way `try_mark_green` will always
|
|
||||||
// hit the cache instead of having to go through `force_from_dep_node`.
|
|
||||||
// This assertion makes sure, we actually keep applying the solution above.
|
|
||||||
debug_assert!(
|
|
||||||
dep_node.kind != DepKind::codegen_unit,
|
|
||||||
"calling force_from_dep_node() on DepKind::codegen_unit"
|
|
||||||
);
|
|
||||||
|
|
||||||
(dep_node.kind.force_from_dep_node)(*self, dep_node)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn has_errors_or_delayed_span_bugs(&self) -> bool {
|
|
||||||
self.sess.has_errors_or_delayed_span_bugs()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn diagnostic(&self) -> &rustc_errors::Handler {
|
|
||||||
self.sess.diagnostic()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interactions with on_disk_cache
|
|
||||||
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
|
|
||||||
(dep_node.kind.try_load_from_on_disk_cache)(*self, dep_node)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
|
|
||||||
self.queries
|
|
||||||
.on_disk_cache
|
|
||||||
.as_ref()
|
|
||||||
.map(|c| c.load_diagnostics(*self, prev_dep_node_index))
|
|
||||||
.unwrap_or_default()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
|
|
||||||
if let Some(c) = self.queries.on_disk_cache.as_ref() {
|
|
||||||
c.store_diagnostics(dep_node_index, diagnostics)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn store_diagnostics_for_anon_node(
|
|
||||||
&self,
|
|
||||||
dep_node_index: DepNodeIndex,
|
|
||||||
diagnostics: ThinVec<Diagnostic>,
|
|
||||||
) {
|
|
||||||
if let Some(c) = self.queries.on_disk_cache.as_ref() {
|
|
||||||
c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn profiler(&self) -> &SelfProfilerRef {
|
fn profiler(&self) -> &SelfProfilerRef {
|
||||||
&self.prof
|
&self.prof
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
|
|
||||||
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
|
|
||||||
def_id == hir_id.owner
|
|
||||||
}
|
|
||||||
|
@ -76,6 +76,7 @@ pub mod query;
|
|||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod arena;
|
pub mod arena;
|
||||||
|
#[macro_use]
|
||||||
pub mod dep_graph;
|
pub mod dep_graph;
|
||||||
pub mod hir;
|
pub mod hir;
|
||||||
pub mod ich;
|
pub mod ich;
|
||||||
|
@ -1,27 +1,3 @@
|
|||||||
use crate::dep_graph::SerializedDepNodeIndex;
|
|
||||||
use crate::mir::interpret::{GlobalId, LitToConstInput};
|
|
||||||
use crate::traits;
|
|
||||||
use crate::traits::query::{
|
|
||||||
CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal,
|
|
||||||
CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal,
|
|
||||||
CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal,
|
|
||||||
};
|
|
||||||
use crate::ty::query::queries;
|
|
||||||
use crate::ty::subst::{GenericArg, SubstsRef};
|
|
||||||
use crate::ty::{self, ParamEnvAnd, Ty, TyCtxt};
|
|
||||||
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
|
|
||||||
use rustc_query_system::query::QueryDescription;
|
|
||||||
|
|
||||||
use rustc_span::symbol::Symbol;
|
|
||||||
|
|
||||||
fn describe_as_module(def_id: LocalDefId, tcx: TyCtxt<'_>) -> String {
|
|
||||||
if def_id.is_top_level_module() {
|
|
||||||
"top-level module".to_string()
|
|
||||||
} else {
|
|
||||||
format!("module `{}`", tcx.def_path_str(def_id.to_def_id()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Each of these queries corresponds to a function pointer field in the
|
// Each of these queries corresponds to a function pointer field in the
|
||||||
// `Providers` struct for requesting a value of that type, and a method
|
// `Providers` struct for requesting a value of that type, and a method
|
||||||
// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
|
// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
|
||||||
@ -125,11 +101,6 @@ rustc_queries! {
|
|||||||
desc { |tcx| "computing generics of `{}`", tcx.def_path_str(key) }
|
desc { |tcx| "computing generics of `{}`", tcx.def_path_str(key) }
|
||||||
storage(ArenaCacheSelector<'tcx>)
|
storage(ArenaCacheSelector<'tcx>)
|
||||||
cache_on_disk_if { key.is_local() }
|
cache_on_disk_if { key.is_local() }
|
||||||
load_cached(tcx, id) {
|
|
||||||
let generics: Option<ty::Generics> = tcx.queries.on_disk_cache.as_ref()
|
|
||||||
.and_then(|c| c.try_load_query_result(tcx, id));
|
|
||||||
generics
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maps from the `DefId` of an item (trait/struct/enum/fn) to the
|
/// Maps from the `DefId` of an item (trait/struct/enum/fn) to the
|
||||||
@ -702,8 +673,8 @@ rustc_queries! {
|
|||||||
cache_on_disk_if { true }
|
cache_on_disk_if { true }
|
||||||
load_cached(tcx, id) {
|
load_cached(tcx, id) {
|
||||||
let typeck_results: Option<ty::TypeckResults<'tcx>> = tcx
|
let typeck_results: Option<ty::TypeckResults<'tcx>> = tcx
|
||||||
.queries.on_disk_cache.as_ref()
|
.on_disk_cache.as_ref()
|
||||||
.and_then(|c| c.try_load_query_result(tcx, id));
|
.and_then(|c| c.try_load_query_result(*tcx, id));
|
||||||
|
|
||||||
typeck_results.map(|x| &*tcx.arena.alloc(x))
|
typeck_results.map(|x| &*tcx.arena.alloc(x))
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@ use crate::middle::stability;
|
|||||||
use crate::mir::interpret::{self, Allocation, ConstValue, Scalar};
|
use crate::mir::interpret::{self, Allocation, ConstValue, Scalar};
|
||||||
use crate::mir::{Body, Field, Local, Place, PlaceElem, ProjectionKind, Promoted};
|
use crate::mir::{Body, Field, Local, Place, PlaceElem, ProjectionKind, Promoted};
|
||||||
use crate::traits;
|
use crate::traits;
|
||||||
use crate::ty::query::{self, TyCtxtAt};
|
use crate::ty::query::{self, OnDiskCache, TyCtxtAt};
|
||||||
use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts};
|
use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts};
|
||||||
use crate::ty::TyKind::*;
|
use crate::ty::TyKind::*;
|
||||||
use crate::ty::{
|
use crate::ty::{
|
||||||
@ -962,7 +962,13 @@ pub struct GlobalCtxt<'tcx> {
|
|||||||
pub(crate) untracked_crate: &'tcx hir::Crate<'tcx>,
|
pub(crate) untracked_crate: &'tcx hir::Crate<'tcx>,
|
||||||
pub(crate) definitions: &'tcx Definitions,
|
pub(crate) definitions: &'tcx Definitions,
|
||||||
|
|
||||||
pub queries: query::Queries<'tcx>,
|
/// This provides access to the incremental compilation on-disk cache for query results.
|
||||||
|
/// Do not access this directly. It is only meant to be used by
|
||||||
|
/// `DepGraph::try_mark_green()` and the query infrastructure.
|
||||||
|
/// This is `None` if we are not incremental compilation mode
|
||||||
|
pub on_disk_cache: Option<OnDiskCache<'tcx>>,
|
||||||
|
|
||||||
|
pub queries: &'tcx dyn query::QueryEngine<'tcx>,
|
||||||
pub query_caches: query::QueryCaches<'tcx>,
|
pub query_caches: query::QueryCaches<'tcx>,
|
||||||
|
|
||||||
maybe_unused_trait_imports: FxHashSet<LocalDefId>,
|
maybe_unused_trait_imports: FxHashSet<LocalDefId>,
|
||||||
@ -1103,14 +1109,13 @@ impl<'tcx> TyCtxt<'tcx> {
|
|||||||
pub fn create_global_ctxt(
|
pub fn create_global_ctxt(
|
||||||
s: &'tcx Session,
|
s: &'tcx Session,
|
||||||
lint_store: Lrc<dyn Any + sync::Send + sync::Sync>,
|
lint_store: Lrc<dyn Any + sync::Send + sync::Sync>,
|
||||||
local_providers: ty::query::Providers,
|
|
||||||
extern_providers: ty::query::Providers,
|
|
||||||
arena: &'tcx WorkerLocal<Arena<'tcx>>,
|
arena: &'tcx WorkerLocal<Arena<'tcx>>,
|
||||||
resolutions: ty::ResolverOutputs,
|
resolutions: ty::ResolverOutputs,
|
||||||
krate: &'tcx hir::Crate<'tcx>,
|
krate: &'tcx hir::Crate<'tcx>,
|
||||||
definitions: &'tcx Definitions,
|
definitions: &'tcx Definitions,
|
||||||
dep_graph: DepGraph,
|
dep_graph: DepGraph,
|
||||||
on_disk_query_result_cache: Option<query::OnDiskCache<'tcx>>,
|
on_disk_cache: Option<query::OnDiskCache<'tcx>>,
|
||||||
|
queries: &'tcx dyn query::QueryEngine<'tcx>,
|
||||||
crate_name: &str,
|
crate_name: &str,
|
||||||
output_filenames: &OutputFilenames,
|
output_filenames: &OutputFilenames,
|
||||||
) -> GlobalCtxt<'tcx> {
|
) -> GlobalCtxt<'tcx> {
|
||||||
@ -1122,10 +1127,6 @@ impl<'tcx> TyCtxt<'tcx> {
|
|||||||
let common_lifetimes = CommonLifetimes::new(&interners);
|
let common_lifetimes = CommonLifetimes::new(&interners);
|
||||||
let common_consts = CommonConsts::new(&interners, &common_types);
|
let common_consts = CommonConsts::new(&interners, &common_types);
|
||||||
let cstore = resolutions.cstore;
|
let cstore = resolutions.cstore;
|
||||||
let crates = cstore.crates_untracked();
|
|
||||||
let max_cnum = crates.iter().map(|c| c.as_usize()).max().unwrap_or(0);
|
|
||||||
let mut providers = IndexVec::from_elem_n(extern_providers, max_cnum + 1);
|
|
||||||
providers[LOCAL_CRATE] = local_providers;
|
|
||||||
|
|
||||||
let mut trait_map: FxHashMap<_, FxHashMap<_, _>> = FxHashMap::default();
|
let mut trait_map: FxHashMap<_, FxHashMap<_, _>> = FxHashMap::default();
|
||||||
for (hir_id, v) in krate.trait_map.iter() {
|
for (hir_id, v) in krate.trait_map.iter() {
|
||||||
@ -1154,7 +1155,8 @@ impl<'tcx> TyCtxt<'tcx> {
|
|||||||
extern_prelude: resolutions.extern_prelude,
|
extern_prelude: resolutions.extern_prelude,
|
||||||
untracked_crate: krate,
|
untracked_crate: krate,
|
||||||
definitions,
|
definitions,
|
||||||
queries: query::Queries::new(providers, extern_providers, on_disk_query_result_cache),
|
on_disk_cache,
|
||||||
|
queries,
|
||||||
query_caches: query::QueryCaches::default(),
|
query_caches: query::QueryCaches::default(),
|
||||||
ty_rcache: Default::default(),
|
ty_rcache: Default::default(),
|
||||||
pred_rcache: Default::default(),
|
pred_rcache: Default::default(),
|
||||||
@ -1320,7 +1322,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn serialize_query_result_cache(self, encoder: &mut FileEncoder) -> FileEncodeResult {
|
pub fn serialize_query_result_cache(self, encoder: &mut FileEncoder) -> FileEncodeResult {
|
||||||
self.queries.on_disk_cache.as_ref().map_or(Ok(()), |c| c.serialize(self, encoder))
|
self.on_disk_cache.as_ref().map_or(Ok(()), |c| c.serialize(self, encoder))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If `true`, we should use the MIR-based borrowck, but also
|
/// If `true`, we should use the MIR-based borrowck, but also
|
||||||
|
@ -100,8 +100,6 @@ pub use self::list::List;
|
|||||||
|
|
||||||
pub use self::trait_def::TraitDef;
|
pub use self::trait_def::TraitDef;
|
||||||
|
|
||||||
pub use self::query::queries;
|
|
||||||
|
|
||||||
pub use self::consts::{Const, ConstInt, ConstKind, InferConst, ScalarInt};
|
pub use self::consts::{Const, ConstInt, ConstKind, InferConst, ScalarInt};
|
||||||
|
|
||||||
pub mod _match;
|
pub mod _match;
|
||||||
|
@ -1,26 +0,0 @@
|
|||||||
use crate::ty::tls;
|
|
||||||
|
|
||||||
use rustc_query_system::query::deadlock;
|
|
||||||
use rustc_rayon_core as rayon_core;
|
|
||||||
use std::thread;
|
|
||||||
|
|
||||||
/// Creates a new thread and forwards information in thread locals to it.
|
|
||||||
/// The new thread runs the deadlock handler.
|
|
||||||
/// Must only be called when a deadlock is about to happen.
|
|
||||||
pub unsafe fn handle_deadlock() {
|
|
||||||
let registry = rayon_core::Registry::current();
|
|
||||||
|
|
||||||
let context = tls::get_tlv();
|
|
||||||
assert!(context != 0);
|
|
||||||
rustc_data_structures::sync::assert_sync::<tls::ImplicitCtxt<'_, '_>>();
|
|
||||||
let icx: &tls::ImplicitCtxt<'_, '_> = &*(context as *const tls::ImplicitCtxt<'_, '_>);
|
|
||||||
|
|
||||||
let session_globals = rustc_span::SESSION_GLOBALS.with(|sg| sg as *const _);
|
|
||||||
let session_globals = &*session_globals;
|
|
||||||
thread::spawn(move || {
|
|
||||||
tls::enter_context(icx, |_| {
|
|
||||||
rustc_span::SESSION_GLOBALS
|
|
||||||
.set(session_globals, || tls::with(|tcx| deadlock(tcx, ®istry)))
|
|
||||||
})
|
|
||||||
});
|
|
||||||
}
|
|
@ -31,19 +31,19 @@ use crate::traits::{self, ImplSource};
|
|||||||
use crate::ty::subst::{GenericArg, SubstsRef};
|
use crate::ty::subst::{GenericArg, SubstsRef};
|
||||||
use crate::ty::util::AlwaysRequiresDrop;
|
use crate::ty::util::AlwaysRequiresDrop;
|
||||||
use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
|
use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
|
||||||
use rustc_data_structures::fingerprint::Fingerprint;
|
|
||||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
|
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
|
||||||
use rustc_data_structures::stable_hasher::StableVec;
|
use rustc_data_structures::stable_hasher::StableVec;
|
||||||
use rustc_data_structures::steal::Steal;
|
use rustc_data_structures::steal::Steal;
|
||||||
use rustc_data_structures::svh::Svh;
|
use rustc_data_structures::svh::Svh;
|
||||||
use rustc_data_structures::sync::Lrc;
|
use rustc_data_structures::sync::Lrc;
|
||||||
use rustc_errors::ErrorReported;
|
use rustc_errors::{ErrorReported, Handler};
|
||||||
use rustc_hir as hir;
|
use rustc_hir as hir;
|
||||||
use rustc_hir::def::DefKind;
|
use rustc_hir::def::DefKind;
|
||||||
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId};
|
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId};
|
||||||
use rustc_hir::lang_items::{LangItem, LanguageItems};
|
use rustc_hir::lang_items::{LangItem, LanguageItems};
|
||||||
use rustc_hir::{Crate, ItemLocalId, TraitCandidate};
|
use rustc_hir::{Crate, ItemLocalId, TraitCandidate};
|
||||||
use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec};
|
use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec};
|
||||||
|
use rustc_serialize::opaque;
|
||||||
use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
|
use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
|
||||||
use rustc_session::utils::NativeLibKind;
|
use rustc_session::utils::NativeLibKind;
|
||||||
use rustc_session::CrateDisambiguator;
|
use rustc_session::CrateDisambiguator;
|
||||||
@ -58,35 +58,211 @@ use std::ops::Deref;
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[macro_use]
|
pub(crate) use rustc_query_system::query::QueryJobId;
|
||||||
mod plumbing;
|
|
||||||
pub(crate) use rustc_query_system::query::CycleError;
|
|
||||||
use rustc_query_system::query::*;
|
use rustc_query_system::query::*;
|
||||||
|
|
||||||
mod stats;
|
pub mod on_disk_cache;
|
||||||
pub use self::stats::print_stats;
|
|
||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
|
||||||
mod job;
|
|
||||||
#[cfg(parallel_compiler)]
|
|
||||||
pub use self::job::handle_deadlock;
|
|
||||||
pub use rustc_query_system::query::{QueryInfo, QueryJob, QueryJobId};
|
|
||||||
|
|
||||||
mod keys;
|
|
||||||
use self::keys::Key;
|
|
||||||
|
|
||||||
mod values;
|
|
||||||
use self::values::Value;
|
|
||||||
|
|
||||||
use rustc_query_system::query::QueryAccessors;
|
|
||||||
pub use rustc_query_system::query::QueryConfig;
|
|
||||||
pub(crate) use rustc_query_system::query::QueryDescription;
|
|
||||||
|
|
||||||
mod on_disk_cache;
|
|
||||||
pub use self::on_disk_cache::OnDiskCache;
|
pub use self::on_disk_cache::OnDiskCache;
|
||||||
|
|
||||||
mod profiling_support;
|
#[derive(Copy, Clone)]
|
||||||
pub use self::profiling_support::{IntoSelfProfilingString, QueryKeyStringBuilder};
|
pub struct TyCtxtAt<'tcx> {
|
||||||
|
pub tcx: TyCtxt<'tcx>,
|
||||||
|
pub span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for TyCtxtAt<'tcx> {
|
||||||
|
type Target = TyCtxt<'tcx>;
|
||||||
|
#[inline(always)]
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.tcx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
|
pub struct TyCtxtEnsure<'tcx> {
|
||||||
|
pub tcx: TyCtxt<'tcx>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TyCtxt<'tcx> {
|
||||||
|
/// Returns a transparent wrapper for `TyCtxt`, which ensures queries
|
||||||
|
/// are executed instead of just returning their results.
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn ensure(self) -> TyCtxtEnsure<'tcx> {
|
||||||
|
TyCtxtEnsure { tcx: self }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a transparent wrapper for `TyCtxt` which uses
|
||||||
|
/// `span` as the location of queries performed through it.
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn at(self, span: Span) -> TyCtxtAt<'tcx> {
|
||||||
|
TyCtxtAt { tcx: self, span }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn try_mark_green(self, dep_node: &dep_graph::DepNode) -> bool {
|
||||||
|
self.queries.try_mark_green(self, dep_node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! query_helper_param_ty {
|
||||||
|
(DefId) => { impl IntoQueryParam<DefId> };
|
||||||
|
($K:ty) => { $K };
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! query_storage {
|
||||||
|
([][$K:ty, $V:ty]) => {
|
||||||
|
<DefaultCacheSelector as CacheSelector<$K, $V>>::Cache
|
||||||
|
};
|
||||||
|
([storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
|
||||||
|
<$ty as CacheSelector<$K, $V>>::Cache
|
||||||
|
};
|
||||||
|
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
|
||||||
|
query_storage!([$($($modifiers)*)*][$($args)*])
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! define_callbacks {
|
||||||
|
(<$tcx:tt>
|
||||||
|
$($(#[$attr:meta])*
|
||||||
|
[$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
|
||||||
|
|
||||||
|
// HACK(eddyb) this is like the `impl QueryConfig for queries::$name`
|
||||||
|
// below, but using type aliases instead of associated types, to bypass
|
||||||
|
// the limitations around normalizing under HRTB - for example, this:
|
||||||
|
// `for<'tcx> fn(...) -> <queries::$name<'tcx> as QueryConfig<TyCtxt<'tcx>>>::Value`
|
||||||
|
// doesn't currently normalize to `for<'tcx> fn(...) -> query_values::$name<'tcx>`.
|
||||||
|
// This is primarily used by the `provide!` macro in `rustc_metadata`.
|
||||||
|
#[allow(nonstandard_style, unused_lifetimes)]
|
||||||
|
pub mod query_keys {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
$(pub type $name<$tcx> = $($K)*;)*
|
||||||
|
}
|
||||||
|
#[allow(nonstandard_style, unused_lifetimes)]
|
||||||
|
pub mod query_values {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
$(pub type $name<$tcx> = $V;)*
|
||||||
|
}
|
||||||
|
#[allow(nonstandard_style, unused_lifetimes)]
|
||||||
|
pub mod query_storage {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
$(pub type $name<$tcx> = query_storage!([$($modifiers)*][$($K)*, $V]);)*
|
||||||
|
}
|
||||||
|
#[allow(nonstandard_style, unused_lifetimes)]
|
||||||
|
pub mod query_stored {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
$(pub type $name<$tcx> = <query_storage::$name<$tcx> as QueryStorage>::Stored;)*
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct QueryCaches<$tcx> {
|
||||||
|
$($(#[$attr])* pub $name: QueryCacheStore<query_storage::$name<$tcx>>,)*
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TyCtxtEnsure<$tcx> {
|
||||||
|
$($(#[$attr])*
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
|
||||||
|
let key = key.into_query_param();
|
||||||
|
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |_| {});
|
||||||
|
|
||||||
|
let lookup = match cached {
|
||||||
|
Ok(()) => return,
|
||||||
|
Err(lookup) => lookup,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.tcx.queries.$name(self.tcx, DUMMY_SP, key, lookup, QueryMode::Ensure);
|
||||||
|
})*
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TyCtxt<$tcx> {
|
||||||
|
$($(#[$attr])*
|
||||||
|
#[inline(always)]
|
||||||
|
#[must_use]
|
||||||
|
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
|
||||||
|
{
|
||||||
|
self.at(DUMMY_SP).$name(key)
|
||||||
|
})*
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TyCtxtAt<$tcx> {
|
||||||
|
$($(#[$attr])*
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
|
||||||
|
{
|
||||||
|
let key = key.into_query_param();
|
||||||
|
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |value| {
|
||||||
|
value.clone()
|
||||||
|
});
|
||||||
|
|
||||||
|
let lookup = match cached {
|
||||||
|
Ok(value) => return value,
|
||||||
|
Err(lookup) => lookup,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.tcx.queries.$name(self.tcx, self.span, key, lookup, QueryMode::Get).unwrap()
|
||||||
|
})*
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Providers {
|
||||||
|
$(pub $name: for<'tcx> fn(
|
||||||
|
TyCtxt<'tcx>,
|
||||||
|
query_keys::$name<'tcx>,
|
||||||
|
) -> query_values::$name<'tcx>,)*
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Providers {
|
||||||
|
fn default() -> Self {
|
||||||
|
Providers {
|
||||||
|
$($name: |_, key| bug!(
|
||||||
|
"`tcx.{}({:?})` unsupported by its crate",
|
||||||
|
stringify!($name), key
|
||||||
|
),)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Copy for Providers {}
|
||||||
|
impl Clone for Providers {
|
||||||
|
fn clone(&self) -> Self { *self }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait QueryEngine<'tcx>: rustc_data_structures::sync::Sync {
|
||||||
|
unsafe fn deadlock(&'tcx self, tcx: TyCtxt<'tcx>, registry: &rustc_rayon_core::Registry);
|
||||||
|
|
||||||
|
fn encode_query_results(
|
||||||
|
&'tcx self,
|
||||||
|
tcx: TyCtxt<'tcx>,
|
||||||
|
encoder: &mut on_disk_cache::CacheEncoder<'a, 'tcx, opaque::FileEncoder>,
|
||||||
|
query_result_index: &mut on_disk_cache::EncodedQueryResultIndex,
|
||||||
|
) -> opaque::FileEncodeResult;
|
||||||
|
|
||||||
|
fn exec_cache_promotions(&'tcx self, tcx: TyCtxt<'tcx>);
|
||||||
|
|
||||||
|
fn try_mark_green(&'tcx self, tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool;
|
||||||
|
|
||||||
|
fn try_print_query_stack(
|
||||||
|
&'tcx self,
|
||||||
|
tcx: TyCtxt<'tcx>,
|
||||||
|
query: Option<QueryJobId<dep_graph::DepKind>>,
|
||||||
|
handler: &Handler,
|
||||||
|
num_frames: Option<usize>,
|
||||||
|
) -> usize;
|
||||||
|
|
||||||
|
$($(#[$attr])*
|
||||||
|
fn $name(
|
||||||
|
&'tcx self,
|
||||||
|
tcx: TyCtxt<$tcx>,
|
||||||
|
span: Span,
|
||||||
|
key: query_keys::$name<$tcx>,
|
||||||
|
lookup: QueryLookup,
|
||||||
|
mode: QueryMode,
|
||||||
|
) -> Option<query_stored::$name<$tcx>>;)*
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Each of these queries corresponds to a function pointer field in the
|
// Each of these queries corresponds to a function pointer field in the
|
||||||
// `Providers` struct for requesting a value of that type, and a method
|
// `Providers` struct for requesting a value of that type, and a method
|
||||||
@ -100,7 +276,7 @@ pub use self::profiling_support::{IntoSelfProfilingString, QueryKeyStringBuilder
|
|||||||
// Queries marked with `fatal_cycle` do not need the latter implementation,
|
// Queries marked with `fatal_cycle` do not need the latter implementation,
|
||||||
// as they will raise an fatal error on query cycles instead.
|
// as they will raise an fatal error on query cycles instead.
|
||||||
|
|
||||||
rustc_query_append! { [define_queries!][<'tcx>] }
|
rustc_query_append! { [define_callbacks!][<'tcx>] }
|
||||||
|
|
||||||
mod sealed {
|
mod sealed {
|
||||||
use super::{DefId, LocalDefId};
|
use super::{DefId, LocalDefId};
|
||||||
|
@ -14,6 +14,8 @@ use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, LOCAL_CRATE};
|
|||||||
use rustc_hir::definitions::DefPathHash;
|
use rustc_hir::definitions::DefPathHash;
|
||||||
use rustc_hir::definitions::Definitions;
|
use rustc_hir::definitions::Definitions;
|
||||||
use rustc_index::vec::{Idx, IndexVec};
|
use rustc_index::vec::{Idx, IndexVec};
|
||||||
|
use rustc_query_system::dep_graph::DepContext;
|
||||||
|
use rustc_query_system::query::QueryContext;
|
||||||
use rustc_serialize::{
|
use rustc_serialize::{
|
||||||
opaque::{self, FileEncodeResult, FileEncoder},
|
opaque::{self, FileEncodeResult, FileEncoder},
|
||||||
Decodable, Decoder, Encodable, Encoder,
|
Decodable, Decoder, Encodable, Encoder,
|
||||||
@ -132,7 +134,7 @@ struct Footer {
|
|||||||
foreign_def_path_hashes: UnhashMap<DefPathHash, RawDefId>,
|
foreign_def_path_hashes: UnhashMap<DefPathHash, RawDefId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
|
pub type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
|
||||||
type EncodedDiagnosticsIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
|
type EncodedDiagnosticsIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
|
||||||
type EncodedDiagnostics = Vec<Diagnostic>;
|
type EncodedDiagnostics = Vec<Diagnostic>;
|
||||||
|
|
||||||
@ -140,7 +142,7 @@ type EncodedDiagnostics = Vec<Diagnostic>;
|
|||||||
struct SourceFileIndex(u32);
|
struct SourceFileIndex(u32);
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Encodable, Decodable)]
|
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Encodable, Decodable)]
|
||||||
struct AbsoluteBytePos(u32);
|
pub struct AbsoluteBytePos(u32);
|
||||||
|
|
||||||
impl AbsoluteBytePos {
|
impl AbsoluteBytePos {
|
||||||
fn new(pos: usize) -> AbsoluteBytePos {
|
fn new(pos: usize) -> AbsoluteBytePos {
|
||||||
@ -284,7 +286,7 @@ impl<'sess> OnDiskCache<'sess> {
|
|||||||
// Do this *before* we clone 'latest_foreign_def_path_hashes', since
|
// Do this *before* we clone 'latest_foreign_def_path_hashes', since
|
||||||
// loading existing queries may cause us to create new DepNodes, which
|
// loading existing queries may cause us to create new DepNodes, which
|
||||||
// may in turn end up invoking `store_foreign_def_id_hash`
|
// may in turn end up invoking `store_foreign_def_id_hash`
|
||||||
tcx.dep_graph.exec_cache_promotions(tcx);
|
tcx.queries.exec_cache_promotions(tcx);
|
||||||
|
|
||||||
let latest_foreign_def_path_hashes = self.latest_foreign_def_path_hashes.lock().clone();
|
let latest_foreign_def_path_hashes = self.latest_foreign_def_path_hashes.lock().clone();
|
||||||
let hygiene_encode_context = HygieneEncodeContext::default();
|
let hygiene_encode_context = HygieneEncodeContext::default();
|
||||||
@ -307,22 +309,7 @@ impl<'sess> OnDiskCache<'sess> {
|
|||||||
tcx.sess.time("encode_query_results", || -> FileEncodeResult {
|
tcx.sess.time("encode_query_results", || -> FileEncodeResult {
|
||||||
let enc = &mut encoder;
|
let enc = &mut encoder;
|
||||||
let qri = &mut query_result_index;
|
let qri = &mut query_result_index;
|
||||||
|
tcx.queries.encode_query_results(tcx, enc, qri)
|
||||||
macro_rules! encode_queries {
|
|
||||||
($($query:ident,)*) => {
|
|
||||||
$(
|
|
||||||
encode_query_results::<ty::query::queries::$query<'_>>(
|
|
||||||
tcx,
|
|
||||||
enc,
|
|
||||||
qri
|
|
||||||
)?;
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rustc_cached_queries!(encode_queries!);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// Encode diagnostics.
|
// Encode diagnostics.
|
||||||
@ -515,7 +502,7 @@ impl<'sess> OnDiskCache<'sess> {
|
|||||||
|
|
||||||
/// Returns the cached query result if there is something in the cache for
|
/// Returns the cached query result if there is something in the cache for
|
||||||
/// the given `SerializedDepNodeIndex`; otherwise returns `None`.
|
/// the given `SerializedDepNodeIndex`; otherwise returns `None`.
|
||||||
crate fn try_load_query_result<'tcx, T>(
|
pub fn try_load_query_result<'tcx, T>(
|
||||||
&self,
|
&self,
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
dep_node_index: SerializedDepNodeIndex,
|
dep_node_index: SerializedDepNodeIndex,
|
||||||
@ -678,7 +665,7 @@ impl<'sess> OnDiskCache<'sess> {
|
|||||||
/// A decoder that can read from the incremental compilation cache. It is similar to the one
|
/// A decoder that can read from the incremental compilation cache. It is similar to the one
|
||||||
/// we use for crate metadata decoding in that it can rebase spans and eventually
|
/// we use for crate metadata decoding in that it can rebase spans and eventually
|
||||||
/// will also handle things that contain `Ty` instances.
|
/// will also handle things that contain `Ty` instances.
|
||||||
crate struct CacheDecoder<'a, 'tcx> {
|
pub struct CacheDecoder<'a, 'tcx> {
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
opaque: opaque::Decoder<'a>,
|
opaque: opaque::Decoder<'a>,
|
||||||
source_map: &'a SourceMap,
|
source_map: &'a SourceMap,
|
||||||
@ -918,7 +905,6 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefId {
|
|||||||
// which means that the definition with this hash is guaranteed to
|
// which means that the definition with this hash is guaranteed to
|
||||||
// still exist in the current compilation session.
|
// still exist in the current compilation session.
|
||||||
Ok(d.tcx()
|
Ok(d.tcx()
|
||||||
.queries
|
|
||||||
.on_disk_cache
|
.on_disk_cache
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@ -973,7 +959,7 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [Span] {
|
|||||||
|
|
||||||
//- ENCODING -------------------------------------------------------------------
|
//- ENCODING -------------------------------------------------------------------
|
||||||
|
|
||||||
trait OpaqueEncoder: Encoder {
|
pub trait OpaqueEncoder: Encoder {
|
||||||
fn position(&self) -> usize;
|
fn position(&self) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -985,7 +971,7 @@ impl OpaqueEncoder for FileEncoder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// An encoder that can write to the incremental compilation cache.
|
/// An encoder that can write to the incremental compilation cache.
|
||||||
struct CacheEncoder<'a, 'tcx, E: OpaqueEncoder> {
|
pub struct CacheEncoder<'a, 'tcx, E: OpaqueEncoder> {
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
encoder: &'a mut E,
|
encoder: &'a mut E,
|
||||||
type_shorthands: FxHashMap<Ty<'tcx>, usize>,
|
type_shorthands: FxHashMap<Ty<'tcx>, usize>,
|
||||||
@ -1230,18 +1216,19 @@ impl<'a> Decodable<opaque::Decoder<'a>> for IntEncodedWithFixedSize {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode_query_results<'a, 'tcx, Q>(
|
pub fn encode_query_results<'a, 'tcx, CTX, Q>(
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: CTX,
|
||||||
encoder: &mut CacheEncoder<'a, 'tcx, FileEncoder>,
|
encoder: &mut CacheEncoder<'a, 'tcx, FileEncoder>,
|
||||||
query_result_index: &mut EncodedQueryResultIndex,
|
query_result_index: &mut EncodedQueryResultIndex,
|
||||||
) -> FileEncodeResult
|
) -> FileEncodeResult
|
||||||
where
|
where
|
||||||
Q: super::QueryDescription<TyCtxt<'tcx>> + super::QueryAccessors<TyCtxt<'tcx>>,
|
CTX: QueryContext + 'tcx,
|
||||||
|
Q: super::QueryDescription<CTX> + super::QueryAccessors<CTX>,
|
||||||
Q::Value: Encodable<CacheEncoder<'a, 'tcx, FileEncoder>>,
|
Q::Value: Encodable<CacheEncoder<'a, 'tcx, FileEncoder>>,
|
||||||
{
|
{
|
||||||
let _timer = tcx
|
let _timer = tcx
|
||||||
.sess
|
.dep_context()
|
||||||
.prof
|
.profiler()
|
||||||
.extra_verbose_generic_activity("encode_query_results_for", std::any::type_name::<Q>());
|
.extra_verbose_generic_activity("encode_query_results_for", std::any::type_name::<Q>());
|
||||||
|
|
||||||
assert!(Q::query_state(tcx).all_inactive());
|
assert!(Q::query_state(tcx).all_inactive());
|
||||||
|
@ -1,613 +0,0 @@
|
|||||||
//! The implementation of the query system itself. This defines the macros that
|
|
||||||
//! generate the actual methods on tcx which find and execute the provider,
|
|
||||||
//! manage the caches, and so forth.
|
|
||||||
|
|
||||||
use crate::dep_graph::DepGraph;
|
|
||||||
use crate::ty::query::Query;
|
|
||||||
use crate::ty::tls::{self, ImplicitCtxt};
|
|
||||||
use crate::ty::{self, TyCtxt};
|
|
||||||
use rustc_query_system::query::QueryContext;
|
|
||||||
use rustc_query_system::query::{CycleError, QueryJobId, QueryJobInfo};
|
|
||||||
|
|
||||||
use rustc_data_structures::fx::FxHashMap;
|
|
||||||
use rustc_data_structures::sync::Lock;
|
|
||||||
use rustc_data_structures::thin_vec::ThinVec;
|
|
||||||
use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level};
|
|
||||||
use rustc_span::def_id::DefId;
|
|
||||||
use rustc_span::Span;
|
|
||||||
|
|
||||||
impl QueryContext for TyCtxt<'tcx> {
|
|
||||||
type Query = Query<'tcx>;
|
|
||||||
|
|
||||||
fn incremental_verify_ich(&self) -> bool {
|
|
||||||
self.sess.opts.debugging_opts.incremental_verify_ich
|
|
||||||
}
|
|
||||||
fn verbose(&self) -> bool {
|
|
||||||
self.sess.verbose()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn def_path_str(&self, def_id: DefId) -> String {
|
|
||||||
TyCtxt::def_path_str(*self, def_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dep_graph(&self) -> &DepGraph {
|
|
||||||
&self.dep_graph
|
|
||||||
}
|
|
||||||
|
|
||||||
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>> {
|
|
||||||
tls::with_related_context(*self, |icx| icx.query)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn try_collect_active_jobs(
|
|
||||||
&self,
|
|
||||||
) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self::DepKind, Self::Query>>>
|
|
||||||
{
|
|
||||||
self.queries.try_collect_active_jobs()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Executes a job by changing the `ImplicitCtxt` to point to the
|
|
||||||
/// new query job while it executes. It returns the diagnostics
|
|
||||||
/// captured during execution and the actual result.
|
|
||||||
#[inline(always)]
|
|
||||||
fn start_query<R>(
|
|
||||||
&self,
|
|
||||||
token: QueryJobId<Self::DepKind>,
|
|
||||||
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
|
||||||
compute: impl FnOnce(Self) -> R,
|
|
||||||
) -> R {
|
|
||||||
// The `TyCtxt` stored in TLS has the same global interner lifetime
|
|
||||||
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
|
|
||||||
// when accessing the `ImplicitCtxt`.
|
|
||||||
tls::with_related_context(*self, move |current_icx| {
|
|
||||||
// Update the `ImplicitCtxt` to point to our new query job.
|
|
||||||
let new_icx = ImplicitCtxt {
|
|
||||||
tcx: *self,
|
|
||||||
query: Some(token),
|
|
||||||
diagnostics,
|
|
||||||
layout_depth: current_icx.layout_depth,
|
|
||||||
task_deps: current_icx.task_deps,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use the `ImplicitCtxt` while we execute the query.
|
|
||||||
tls::enter_context(&new_icx, |_| {
|
|
||||||
rustc_data_structures::stack::ensure_sufficient_stack(|| compute(*self))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'tcx> TyCtxt<'tcx> {
|
|
||||||
#[inline(never)]
|
|
||||||
#[cold]
|
|
||||||
pub(super) fn report_cycle(
|
|
||||||
self,
|
|
||||||
CycleError { usage, cycle: stack }: CycleError<Query<'tcx>>,
|
|
||||||
) -> DiagnosticBuilder<'tcx> {
|
|
||||||
assert!(!stack.is_empty());
|
|
||||||
|
|
||||||
let fix_span = |span: Span, query: &Query<'tcx>| {
|
|
||||||
self.sess.source_map().guess_head_span(query.default_span(self, span))
|
|
||||||
};
|
|
||||||
|
|
||||||
// Disable naming impls with types in this path, since that
|
|
||||||
// sometimes cycles itself, leading to extra cycle errors.
|
|
||||||
// (And cycle errors around impls tend to occur during the
|
|
||||||
// collect/coherence phases anyhow.)
|
|
||||||
ty::print::with_forced_impl_filename_line(|| {
|
|
||||||
let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
|
|
||||||
let mut err = struct_span_err!(
|
|
||||||
self.sess,
|
|
||||||
span,
|
|
||||||
E0391,
|
|
||||||
"cycle detected when {}",
|
|
||||||
stack[0].query.describe(self)
|
|
||||||
);
|
|
||||||
|
|
||||||
for i in 1..stack.len() {
|
|
||||||
let query = &stack[i].query;
|
|
||||||
let span = fix_span(stack[(i + 1) % stack.len()].span, query);
|
|
||||||
err.span_note(span, &format!("...which requires {}...", query.describe(self)));
|
|
||||||
}
|
|
||||||
|
|
||||||
err.note(&format!(
|
|
||||||
"...which again requires {}, completing the cycle",
|
|
||||||
stack[0].query.describe(self)
|
|
||||||
));
|
|
||||||
|
|
||||||
if let Some((span, query)) = usage {
|
|
||||||
err.span_note(
|
|
||||||
fix_span(span, &query),
|
|
||||||
&format!("cycle used when {}", query.describe(self)),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn try_print_query_stack(handler: &Handler, num_frames: Option<usize>) {
|
|
||||||
eprintln!("query stack during panic:");
|
|
||||||
|
|
||||||
// Be careful relying on global state here: this code is called from
|
|
||||||
// a panic hook, which means that the global `Handler` may be in a weird
|
|
||||||
// state if it was responsible for triggering the panic.
|
|
||||||
let mut i = 0;
|
|
||||||
ty::tls::with_context_opt(|icx| {
|
|
||||||
if let Some(icx) = icx {
|
|
||||||
let query_map = icx.tcx.queries.try_collect_active_jobs();
|
|
||||||
|
|
||||||
let mut current_query = icx.query;
|
|
||||||
|
|
||||||
while let Some(query) = current_query {
|
|
||||||
if Some(i) == num_frames {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
let query_info =
|
|
||||||
if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
|
|
||||||
info
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
let mut diag = Diagnostic::new(
|
|
||||||
Level::FailureNote,
|
|
||||||
&format!(
|
|
||||||
"#{} [{}] {}",
|
|
||||||
i,
|
|
||||||
query_info.info.query.name(),
|
|
||||||
query_info.info.query.describe(icx.tcx)
|
|
||||||
),
|
|
||||||
);
|
|
||||||
diag.span =
|
|
||||||
icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into();
|
|
||||||
handler.force_print_diagnostic(diag);
|
|
||||||
|
|
||||||
current_query = query_info.job.parent;
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if num_frames == None || num_frames >= Some(i) {
|
|
||||||
eprintln!("end of query stack");
|
|
||||||
} else {
|
|
||||||
eprintln!("we're just showing a limited slice of the query stack");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! handle_cycle_error {
|
|
||||||
([][$tcx: expr, $error:expr]) => {{
|
|
||||||
$tcx.report_cycle($error).emit();
|
|
||||||
Value::from_cycle_error($tcx)
|
|
||||||
}};
|
|
||||||
([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{
|
|
||||||
$tcx.report_cycle($error).emit();
|
|
||||||
$tcx.sess.abort_if_errors();
|
|
||||||
unreachable!()
|
|
||||||
}};
|
|
||||||
([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{
|
|
||||||
$tcx.report_cycle($error).delay_as_bug();
|
|
||||||
Value::from_cycle_error($tcx)
|
|
||||||
}};
|
|
||||||
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
|
|
||||||
handle_cycle_error!([$($($modifiers)*)*][$($args)*])
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! is_anon {
|
|
||||||
([]) => {{
|
|
||||||
false
|
|
||||||
}};
|
|
||||||
([anon $($rest:tt)*]) => {{
|
|
||||||
true
|
|
||||||
}};
|
|
||||||
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
|
|
||||||
is_anon!([$($($modifiers)*)*])
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! is_eval_always {
|
|
||||||
([]) => {{
|
|
||||||
false
|
|
||||||
}};
|
|
||||||
([eval_always $($rest:tt)*]) => {{
|
|
||||||
true
|
|
||||||
}};
|
|
||||||
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
|
|
||||||
is_eval_always!([$($($modifiers)*)*])
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! query_storage {
|
|
||||||
([][$K:ty, $V:ty]) => {
|
|
||||||
<<$K as Key>::CacheSelector as CacheSelector<$K, $V>>::Cache
|
|
||||||
};
|
|
||||||
([storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
|
|
||||||
<$ty as CacheSelector<$K, $V>>::Cache
|
|
||||||
};
|
|
||||||
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
|
|
||||||
query_storage!([$($($modifiers)*)*][$($args)*])
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! hash_result {
|
|
||||||
([][$hcx:expr, $result:expr]) => {{
|
|
||||||
dep_graph::hash_result($hcx, &$result)
|
|
||||||
}};
|
|
||||||
([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{
|
|
||||||
None
|
|
||||||
}};
|
|
||||||
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
|
|
||||||
hash_result!([$($($modifiers)*)*][$($args)*])
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! query_helper_param_ty {
|
|
||||||
(DefId) => { impl IntoQueryParam<DefId> };
|
|
||||||
($K:ty) => { $K };
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! define_queries {
|
|
||||||
(<$tcx:tt>
|
|
||||||
$($(#[$attr:meta])*
|
|
||||||
[$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
|
|
||||||
|
|
||||||
use std::mem;
|
|
||||||
use crate::{
|
|
||||||
rustc_data_structures::stable_hasher::HashStable,
|
|
||||||
rustc_data_structures::stable_hasher::StableHasher,
|
|
||||||
ich::StableHashingContext
|
|
||||||
};
|
|
||||||
|
|
||||||
define_queries_struct! {
|
|
||||||
tcx: $tcx,
|
|
||||||
input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(nonstandard_style)]
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub enum Query<$tcx> {
|
|
||||||
$($(#[$attr])* $name($($K)*)),*
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<$tcx> Query<$tcx> {
|
|
||||||
pub fn name(&self) -> &'static str {
|
|
||||||
match *self {
|
|
||||||
$(Query::$name(_) => stringify!($name),)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn describe(&self, tcx: TyCtxt<$tcx>) -> String {
|
|
||||||
let (r, name) = match *self {
|
|
||||||
$(Query::$name(key) => {
|
|
||||||
(queries::$name::describe(tcx, key), stringify!($name))
|
|
||||||
})*
|
|
||||||
};
|
|
||||||
if tcx.sess.verbose() {
|
|
||||||
format!("{} [{}]", r, name)
|
|
||||||
} else {
|
|
||||||
r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME(eddyb) Get more valid `Span`s on queries.
|
|
||||||
pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span {
|
|
||||||
if !span.is_dummy() {
|
|
||||||
return span;
|
|
||||||
}
|
|
||||||
// The `def_span` query is used to calculate `default_span`,
|
|
||||||
// so exit to avoid infinite recursion.
|
|
||||||
if let Query::def_span(..) = *self {
|
|
||||||
return span
|
|
||||||
}
|
|
||||||
match *self {
|
|
||||||
$(Query::$name(key) => key.default_span(tcx),)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
|
|
||||||
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
|
|
||||||
mem::discriminant(self).hash_stable(hcx, hasher);
|
|
||||||
match *self {
|
|
||||||
$(Query::$name(key) => key.hash_stable(hcx, hasher),)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(nonstandard_style)]
|
|
||||||
pub mod queries {
|
|
||||||
use std::marker::PhantomData;
|
|
||||||
|
|
||||||
$(pub struct $name<$tcx> {
|
|
||||||
data: PhantomData<&$tcx ()>
|
|
||||||
})*
|
|
||||||
}
|
|
||||||
|
|
||||||
// HACK(eddyb) this is like the `impl QueryConfig for queries::$name`
|
|
||||||
// below, but using type aliases instead of associated types, to bypass
|
|
||||||
// the limitations around normalizing under HRTB - for example, this:
|
|
||||||
// `for<'tcx> fn(...) -> <queries::$name<'tcx> as QueryConfig<TyCtxt<'tcx>>>::Value`
|
|
||||||
// doesn't currently normalize to `for<'tcx> fn(...) -> query_values::$name<'tcx>`.
|
|
||||||
// This is primarily used by the `provide!` macro in `rustc_metadata`.
|
|
||||||
#[allow(nonstandard_style, unused_lifetimes)]
|
|
||||||
pub mod query_keys {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
$(pub type $name<$tcx> = $($K)*;)*
|
|
||||||
}
|
|
||||||
#[allow(nonstandard_style, unused_lifetimes)]
|
|
||||||
pub mod query_values {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
$(pub type $name<$tcx> = $V;)*
|
|
||||||
}
|
|
||||||
#[allow(nonstandard_style, unused_lifetimes)]
|
|
||||||
pub mod query_storage {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
$(pub type $name<$tcx> = query_storage!([$($modifiers)*][$($K)*, $V]);)*
|
|
||||||
}
|
|
||||||
#[allow(nonstandard_style, unused_lifetimes)]
|
|
||||||
pub mod query_stored {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
$(pub type $name<$tcx> = <query_storage::$name<$tcx> as QueryStorage>::Stored;)*
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct QueryCaches<$tcx> {
|
|
||||||
$($(#[$attr])* $name: QueryCacheStore<query_storage::$name<$tcx>>,)*
|
|
||||||
}
|
|
||||||
|
|
||||||
$(impl<$tcx> QueryConfig for queries::$name<$tcx> {
|
|
||||||
type Key = $($K)*;
|
|
||||||
type Value = $V;
|
|
||||||
type Stored = query_stored::$name<$tcx>;
|
|
||||||
const NAME: &'static str = stringify!($name);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<$tcx> QueryAccessors<TyCtxt<$tcx>> for queries::$name<$tcx> {
|
|
||||||
const ANON: bool = is_anon!([$($modifiers)*]);
|
|
||||||
const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
|
|
||||||
const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$name;
|
|
||||||
|
|
||||||
type Cache = query_storage::$name<$tcx>;
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, Query<$tcx>, Self::Key> {
|
|
||||||
&tcx.queries.$name
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn query_cache<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryCacheStore<Self::Cache>
|
|
||||||
where 'tcx:'a
|
|
||||||
{
|
|
||||||
&tcx.query_caches.$name
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
|
|
||||||
let provider = tcx.queries.providers.get(key.query_crate())
|
|
||||||
// HACK(eddyb) it's possible crates may be loaded after
|
|
||||||
// the query engine is created, and because crate loading
|
|
||||||
// is not yet integrated with the query engine, such crates
|
|
||||||
// would be missing appropriate entries in `providers`.
|
|
||||||
.unwrap_or(&tcx.queries.fallback_extern_providers)
|
|
||||||
.$name;
|
|
||||||
provider(tcx, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash_result(
|
|
||||||
_hcx: &mut StableHashingContext<'_>,
|
|
||||||
_result: &Self::Value
|
|
||||||
) -> Option<Fingerprint> {
|
|
||||||
hash_result!([$($modifiers)*][_hcx, _result])
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_cycle_error(
|
|
||||||
tcx: TyCtxt<'tcx>,
|
|
||||||
error: CycleError<Query<'tcx>>
|
|
||||||
) -> Self::Value {
|
|
||||||
handle_cycle_error!([$($modifiers)*][tcx, error])
|
|
||||||
}
|
|
||||||
})*
|
|
||||||
|
|
||||||
#[derive(Copy, Clone)]
|
|
||||||
pub struct TyCtxtEnsure<'tcx> {
|
|
||||||
pub tcx: TyCtxt<'tcx>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TyCtxtEnsure<$tcx> {
|
|
||||||
$($(#[$attr])*
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
|
|
||||||
let key = key.into_query_param();
|
|
||||||
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |_| {});
|
|
||||||
|
|
||||||
let lookup = match cached {
|
|
||||||
Ok(()) => return,
|
|
||||||
Err(lookup) => lookup,
|
|
||||||
};
|
|
||||||
|
|
||||||
get_query::<queries::$name<'_>, _>(self.tcx, DUMMY_SP, key, lookup, QueryMode::Ensure);
|
|
||||||
})*
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Copy, Clone)]
|
|
||||||
pub struct TyCtxtAt<'tcx> {
|
|
||||||
pub tcx: TyCtxt<'tcx>,
|
|
||||||
pub span: Span,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Deref for TyCtxtAt<'tcx> {
|
|
||||||
type Target = TyCtxt<'tcx>;
|
|
||||||
#[inline(always)]
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.tcx
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TyCtxt<$tcx> {
|
|
||||||
/// Returns a transparent wrapper for `TyCtxt`, which ensures queries
|
|
||||||
/// are executed instead of just returning their results.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn ensure(self) -> TyCtxtEnsure<$tcx> {
|
|
||||||
TyCtxtEnsure {
|
|
||||||
tcx: self,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a transparent wrapper for `TyCtxt` which uses
|
|
||||||
/// `span` as the location of queries performed through it.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn at(self, span: Span) -> TyCtxtAt<$tcx> {
|
|
||||||
TyCtxtAt {
|
|
||||||
tcx: self,
|
|
||||||
span
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
$($(#[$attr])*
|
|
||||||
#[inline(always)]
|
|
||||||
#[must_use]
|
|
||||||
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
|
|
||||||
{
|
|
||||||
self.at(DUMMY_SP).$name(key)
|
|
||||||
})*
|
|
||||||
|
|
||||||
/// All self-profiling events generated by the query engine use
|
|
||||||
/// virtual `StringId`s for their `event_id`. This method makes all
|
|
||||||
/// those virtual `StringId`s point to actual strings.
|
|
||||||
///
|
|
||||||
/// If we are recording only summary data, the ids will point to
|
|
||||||
/// just the query names. If we are recording query keys too, we
|
|
||||||
/// allocate the corresponding strings here.
|
|
||||||
pub fn alloc_self_profile_query_strings(self) {
|
|
||||||
use crate::ty::query::profiling_support::{
|
|
||||||
alloc_self_profile_query_strings_for_query_cache,
|
|
||||||
QueryKeyStringCache,
|
|
||||||
};
|
|
||||||
|
|
||||||
if !self.prof.enabled() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut string_cache = QueryKeyStringCache::new();
|
|
||||||
|
|
||||||
$({
|
|
||||||
alloc_self_profile_query_strings_for_query_cache(
|
|
||||||
self,
|
|
||||||
stringify!($name),
|
|
||||||
&self.query_caches.$name,
|
|
||||||
&mut string_cache,
|
|
||||||
);
|
|
||||||
})*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TyCtxtAt<$tcx> {
|
|
||||||
$($(#[$attr])*
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
|
|
||||||
{
|
|
||||||
let key = key.into_query_param();
|
|
||||||
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |value| {
|
|
||||||
value.clone()
|
|
||||||
});
|
|
||||||
|
|
||||||
let lookup = match cached {
|
|
||||||
Ok(value) => return value,
|
|
||||||
Err(lookup) => lookup,
|
|
||||||
};
|
|
||||||
|
|
||||||
get_query::<queries::$name<'_>, _>(self.tcx, self.span, key, lookup, QueryMode::Get).unwrap()
|
|
||||||
})*
|
|
||||||
}
|
|
||||||
|
|
||||||
define_provider_struct! {
|
|
||||||
tcx: $tcx,
|
|
||||||
input: ($(([$($modifiers)*] [$name] [$($K)*] [$V]))*)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Copy for Providers {}
|
|
||||||
impl Clone for Providers {
|
|
||||||
fn clone(&self) -> Self { *self }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME(eddyb) this macro (and others?) use `$tcx` and `'tcx` interchangeably.
|
|
||||||
// We should either not take `$tcx` at all and use `'tcx` everywhere, or use
|
|
||||||
// `$tcx` everywhere (even if that isn't necessary due to lack of hygiene).
|
|
||||||
macro_rules! define_queries_struct {
|
|
||||||
(tcx: $tcx:tt,
|
|
||||||
input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
|
|
||||||
pub struct Queries<$tcx> {
|
|
||||||
/// This provides access to the incremental compilation on-disk cache for query results.
|
|
||||||
/// Do not access this directly. It is only meant to be used by
|
|
||||||
/// `DepGraph::try_mark_green()` and the query infrastructure.
|
|
||||||
/// This is `None` if we are not incremental compilation mode
|
|
||||||
pub(crate) on_disk_cache: Option<OnDiskCache<'tcx>>,
|
|
||||||
|
|
||||||
providers: IndexVec<CrateNum, Providers>,
|
|
||||||
fallback_extern_providers: Box<Providers>,
|
|
||||||
|
|
||||||
$($(#[$attr])* $name: QueryState<
|
|
||||||
crate::dep_graph::DepKind,
|
|
||||||
Query<$tcx>,
|
|
||||||
query_keys::$name<$tcx>,
|
|
||||||
>,)*
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<$tcx> Queries<$tcx> {
|
|
||||||
pub(crate) fn new(
|
|
||||||
providers: IndexVec<CrateNum, Providers>,
|
|
||||||
fallback_extern_providers: Providers,
|
|
||||||
on_disk_cache: Option<OnDiskCache<'tcx>>,
|
|
||||||
) -> Self {
|
|
||||||
Queries {
|
|
||||||
providers,
|
|
||||||
fallback_extern_providers: Box::new(fallback_extern_providers),
|
|
||||||
on_disk_cache,
|
|
||||||
$($name: Default::default()),*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn try_collect_active_jobs(
|
|
||||||
&self
|
|
||||||
) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query>>> {
|
|
||||||
let mut jobs = FxHashMap::default();
|
|
||||||
|
|
||||||
$(
|
|
||||||
self.$name.try_collect_active_jobs(
|
|
||||||
<queries::$name<'tcx> as QueryAccessors<TyCtxt<'tcx>>>::DEP_KIND,
|
|
||||||
Query::$name,
|
|
||||||
&mut jobs,
|
|
||||||
)?;
|
|
||||||
)*
|
|
||||||
|
|
||||||
Some(jobs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! define_provider_struct {
|
|
||||||
(tcx: $tcx:tt,
|
|
||||||
input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => {
|
|
||||||
pub struct Providers {
|
|
||||||
$(pub $name: for<$tcx> fn(TyCtxt<$tcx>, $K) -> $R,)*
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Providers {
|
|
||||||
fn default() -> Self {
|
|
||||||
$(fn $name<$tcx>(_: TyCtxt<$tcx>, key: $K) -> $R {
|
|
||||||
bug!("`tcx.{}({:?})` unsupported by its crate",
|
|
||||||
stringify!($name), key);
|
|
||||||
})*
|
|
||||||
Providers { $($name),* }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
27
compiler/rustc_query_impl/Cargo.toml
Normal file
27
compiler/rustc_query_impl/Cargo.toml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
[package]
|
||||||
|
authors = ["The Rust Project Developers"]
|
||||||
|
name = "rustc_query_impl"
|
||||||
|
version = "0.0.0"
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
doctest = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
measureme = "9.0.0"
|
||||||
|
rustc-rayon-core = "0.3.0"
|
||||||
|
tracing = "0.1"
|
||||||
|
rustc_ast = { path = "../rustc_ast" }
|
||||||
|
rustc_attr = { path = "../rustc_attr" }
|
||||||
|
rustc_data_structures = { path = "../rustc_data_structures" }
|
||||||
|
rustc_errors = { path = "../rustc_errors" }
|
||||||
|
rustc_feature = { path = "../rustc_feature" }
|
||||||
|
rustc_hir = { path = "../rustc_hir" }
|
||||||
|
rustc_index = { path = "../rustc_index" }
|
||||||
|
rustc_macros = { path = "../rustc_macros" }
|
||||||
|
rustc_middle = { path = "../rustc_middle" }
|
||||||
|
rustc_query_system = { path = "../rustc_query_system" }
|
||||||
|
rustc_span = { path = "../rustc_span" }
|
||||||
|
rustc_serialize = { path = "../rustc_serialize" }
|
||||||
|
rustc_session = { path = "../rustc_session" }
|
||||||
|
rustc_target = { path = "../rustc_target" }
|
@ -1,20 +1,17 @@
|
|||||||
//! Defines the set of legal keys that can be used in queries.
|
//! Defines the set of legal keys that can be used in queries.
|
||||||
|
|
||||||
use crate::infer::canonical::Canonical;
|
|
||||||
use crate::mir;
|
|
||||||
use crate::ty::fast_reject::SimplifiedType;
|
|
||||||
use crate::ty::subst::{GenericArg, SubstsRef};
|
|
||||||
use crate::ty::{self, Ty, TyCtxt};
|
|
||||||
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
|
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
|
||||||
use rustc_query_system::query::DefaultCacheSelector;
|
use rustc_middle::infer::canonical::Canonical;
|
||||||
|
use rustc_middle::mir;
|
||||||
|
use rustc_middle::ty::fast_reject::SimplifiedType;
|
||||||
|
use rustc_middle::ty::subst::{GenericArg, SubstsRef};
|
||||||
|
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||||
use rustc_span::symbol::{Ident, Symbol};
|
use rustc_span::symbol::{Ident, Symbol};
|
||||||
use rustc_span::{Span, DUMMY_SP};
|
use rustc_span::{Span, DUMMY_SP};
|
||||||
|
|
||||||
/// The `Key` trait controls what types can legally be used as the key
|
/// The `Key` trait controls what types can legally be used as the key
|
||||||
/// for a query.
|
/// for a query.
|
||||||
pub trait Key {
|
pub trait Key {
|
||||||
type CacheSelector;
|
|
||||||
|
|
||||||
/// Given an instance of this key, what crate is it referring to?
|
/// Given an instance of this key, what crate is it referring to?
|
||||||
/// This is used to find the provider.
|
/// This is used to find the provider.
|
||||||
fn query_crate(&self) -> CrateNum;
|
fn query_crate(&self) -> CrateNum;
|
||||||
@ -25,8 +22,6 @@ pub trait Key {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for ty::InstanceDef<'tcx> {
|
impl<'tcx> Key for ty::InstanceDef<'tcx> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -37,8 +32,6 @@ impl<'tcx> Key for ty::InstanceDef<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for ty::Instance<'tcx> {
|
impl<'tcx> Key for ty::Instance<'tcx> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -49,8 +42,6 @@ impl<'tcx> Key for ty::Instance<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for mir::interpret::GlobalId<'tcx> {
|
impl<'tcx> Key for mir::interpret::GlobalId<'tcx> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.instance.query_crate()
|
self.instance.query_crate()
|
||||||
}
|
}
|
||||||
@ -61,8 +52,6 @@ impl<'tcx> Key for mir::interpret::GlobalId<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for mir::interpret::LitToConstInput<'tcx> {
|
impl<'tcx> Key for mir::interpret::LitToConstInput<'tcx> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -73,8 +62,6 @@ impl<'tcx> Key for mir::interpret::LitToConstInput<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for CrateNum {
|
impl Key for CrateNum {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
*self
|
*self
|
||||||
}
|
}
|
||||||
@ -84,8 +71,6 @@ impl Key for CrateNum {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for LocalDefId {
|
impl Key for LocalDefId {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.to_def_id().query_crate()
|
self.to_def_id().query_crate()
|
||||||
}
|
}
|
||||||
@ -95,8 +80,6 @@ impl Key for LocalDefId {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for DefId {
|
impl Key for DefId {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.krate
|
self.krate
|
||||||
}
|
}
|
||||||
@ -106,8 +89,6 @@ impl Key for DefId {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for ty::WithOptConstParam<LocalDefId> {
|
impl Key for ty::WithOptConstParam<LocalDefId> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.did.query_crate()
|
self.did.query_crate()
|
||||||
}
|
}
|
||||||
@ -117,8 +98,6 @@ impl Key for ty::WithOptConstParam<LocalDefId> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for (DefId, DefId) {
|
impl Key for (DefId, DefId) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.0.krate
|
self.0.krate
|
||||||
}
|
}
|
||||||
@ -128,8 +107,6 @@ impl Key for (DefId, DefId) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for (ty::Instance<'tcx>, LocalDefId) {
|
impl Key for (ty::Instance<'tcx>, LocalDefId) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.0.query_crate()
|
self.0.query_crate()
|
||||||
}
|
}
|
||||||
@ -139,8 +116,6 @@ impl Key for (ty::Instance<'tcx>, LocalDefId) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for (DefId, LocalDefId) {
|
impl Key for (DefId, LocalDefId) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.0.krate
|
self.0.krate
|
||||||
}
|
}
|
||||||
@ -150,8 +125,6 @@ impl Key for (DefId, LocalDefId) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for (LocalDefId, DefId) {
|
impl Key for (LocalDefId, DefId) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -161,8 +134,6 @@ impl Key for (LocalDefId, DefId) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for (DefId, Option<Ident>) {
|
impl Key for (DefId, Option<Ident>) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.0.krate
|
self.0.krate
|
||||||
}
|
}
|
||||||
@ -172,8 +143,6 @@ impl Key for (DefId, Option<Ident>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for (DefId, LocalDefId, Ident) {
|
impl Key for (DefId, LocalDefId, Ident) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.0.krate
|
self.0.krate
|
||||||
}
|
}
|
||||||
@ -183,8 +152,6 @@ impl Key for (DefId, LocalDefId, Ident) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for (CrateNum, DefId) {
|
impl Key for (CrateNum, DefId) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.0
|
self.0
|
||||||
}
|
}
|
||||||
@ -194,8 +161,6 @@ impl Key for (CrateNum, DefId) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for (DefId, SimplifiedType) {
|
impl Key for (DefId, SimplifiedType) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.0.krate
|
self.0.krate
|
||||||
}
|
}
|
||||||
@ -205,8 +170,6 @@ impl Key for (DefId, SimplifiedType) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for SubstsRef<'tcx> {
|
impl<'tcx> Key for SubstsRef<'tcx> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -216,8 +179,6 @@ impl<'tcx> Key for SubstsRef<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for (DefId, SubstsRef<'tcx>) {
|
impl<'tcx> Key for (DefId, SubstsRef<'tcx>) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.0.krate
|
self.0.krate
|
||||||
}
|
}
|
||||||
@ -232,8 +193,6 @@ impl<'tcx> Key
|
|||||||
(ty::WithOptConstParam<DefId>, SubstsRef<'tcx>),
|
(ty::WithOptConstParam<DefId>, SubstsRef<'tcx>),
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
(self.0).0.did.krate
|
(self.0).0.did.krate
|
||||||
}
|
}
|
||||||
@ -243,8 +202,6 @@ impl<'tcx> Key
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for (LocalDefId, DefId, SubstsRef<'tcx>) {
|
impl<'tcx> Key for (LocalDefId, DefId, SubstsRef<'tcx>) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -254,8 +211,6 @@ impl<'tcx> Key for (LocalDefId, DefId, SubstsRef<'tcx>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) {
|
impl<'tcx> Key for (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.1.def_id().krate
|
self.1.def_id().krate
|
||||||
}
|
}
|
||||||
@ -265,8 +220,6 @@ impl<'tcx> Key for (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for (&'tcx ty::Const<'tcx>, mir::Field) {
|
impl<'tcx> Key for (&'tcx ty::Const<'tcx>, mir::Field) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -276,8 +229,6 @@ impl<'tcx> Key for (&'tcx ty::Const<'tcx>, mir::Field) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for ty::PolyTraitRef<'tcx> {
|
impl<'tcx> Key for ty::PolyTraitRef<'tcx> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.def_id().krate
|
self.def_id().krate
|
||||||
}
|
}
|
||||||
@ -287,8 +238,6 @@ impl<'tcx> Key for ty::PolyTraitRef<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for GenericArg<'tcx> {
|
impl<'tcx> Key for GenericArg<'tcx> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -298,8 +247,6 @@ impl<'tcx> Key for GenericArg<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for &'tcx ty::Const<'tcx> {
|
impl<'tcx> Key for &'tcx ty::Const<'tcx> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -309,8 +256,6 @@ impl<'tcx> Key for &'tcx ty::Const<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for Ty<'tcx> {
|
impl<'tcx> Key for Ty<'tcx> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -320,8 +265,6 @@ impl<'tcx> Key for Ty<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for &'tcx ty::List<ty::Predicate<'tcx>> {
|
impl<'tcx> Key for &'tcx ty::List<ty::Predicate<'tcx>> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -331,8 +274,6 @@ impl<'tcx> Key for &'tcx ty::List<ty::Predicate<'tcx>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for ty::ParamEnv<'tcx> {
|
impl<'tcx> Key for ty::ParamEnv<'tcx> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -342,8 +283,6 @@ impl<'tcx> Key for ty::ParamEnv<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> {
|
impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
self.value.query_crate()
|
self.value.query_crate()
|
||||||
}
|
}
|
||||||
@ -353,8 +292,6 @@ impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for Symbol {
|
impl Key for Symbol {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -366,8 +303,6 @@ impl Key for Symbol {
|
|||||||
/// Canonical query goals correspond to abstract trait operations that
|
/// Canonical query goals correspond to abstract trait operations that
|
||||||
/// are not tied to any crate in particular.
|
/// are not tied to any crate in particular.
|
||||||
impl<'tcx, T> Key for Canonical<'tcx, T> {
|
impl<'tcx, T> Key for Canonical<'tcx, T> {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -378,8 +313,6 @@ impl<'tcx, T> Key for Canonical<'tcx, T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Key for (Symbol, u32, u32) {
|
impl Key for (Symbol, u32, u32) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
||||||
@ -390,8 +323,6 @@ impl Key for (Symbol, u32, u32) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Key for (DefId, Ty<'tcx>, SubstsRef<'tcx>, ty::ParamEnv<'tcx>) {
|
impl<'tcx> Key for (DefId, Ty<'tcx>, SubstsRef<'tcx>, ty::ParamEnv<'tcx>) {
|
||||||
type CacheSelector = DefaultCacheSelector;
|
|
||||||
|
|
||||||
fn query_crate(&self) -> CrateNum {
|
fn query_crate(&self) -> CrateNum {
|
||||||
LOCAL_CRATE
|
LOCAL_CRATE
|
||||||
}
|
}
|
65
compiler/rustc_query_impl/src/lib.rs
Normal file
65
compiler/rustc_query_impl/src/lib.rs
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
//! Support for serializing the dep-graph and reloading it.
|
||||||
|
|
||||||
|
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
|
||||||
|
#![feature(in_band_lifetimes)]
|
||||||
|
#![feature(exhaustive_patterns)]
|
||||||
|
#![feature(nll)]
|
||||||
|
#![feature(min_specialization)]
|
||||||
|
#![feature(crate_visibility_modifier)]
|
||||||
|
#![feature(once_cell)]
|
||||||
|
#![feature(rustc_attrs)]
|
||||||
|
#![feature(never_type)]
|
||||||
|
#![recursion_limit = "256"]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate rustc_middle;
|
||||||
|
#[macro_use]
|
||||||
|
extern crate tracing;
|
||||||
|
|
||||||
|
use rustc_data_structures::fingerprint::Fingerprint;
|
||||||
|
use rustc_data_structures::fx::FxHashMap;
|
||||||
|
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
|
||||||
|
use rustc_errors::{Diagnostic, Handler, Level};
|
||||||
|
use rustc_hir::def_id::CrateNum;
|
||||||
|
use rustc_index::vec::IndexVec;
|
||||||
|
use rustc_middle::dep_graph;
|
||||||
|
use rustc_middle::ich::StableHashingContext;
|
||||||
|
use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values};
|
||||||
|
use rustc_middle::ty::query::{Providers, QueryEngine};
|
||||||
|
use rustc_middle::ty::TyCtxt;
|
||||||
|
use rustc_serialize::opaque;
|
||||||
|
use rustc_span::{Span, DUMMY_SP};
|
||||||
|
use std::mem;
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
mod plumbing;
|
||||||
|
pub use plumbing::QueryCtxt;
|
||||||
|
use plumbing::QueryStruct;
|
||||||
|
use rustc_query_system::query::*;
|
||||||
|
|
||||||
|
mod stats;
|
||||||
|
pub use self::stats::print_stats;
|
||||||
|
|
||||||
|
mod keys;
|
||||||
|
use keys::Key;
|
||||||
|
|
||||||
|
mod values;
|
||||||
|
use self::values::Value;
|
||||||
|
|
||||||
|
use rustc_query_system::query::QueryAccessors;
|
||||||
|
pub use rustc_query_system::query::QueryConfig;
|
||||||
|
pub(crate) use rustc_query_system::query::QueryDescription;
|
||||||
|
|
||||||
|
use rustc_middle::ty::query::on_disk_cache;
|
||||||
|
|
||||||
|
mod profiling_support;
|
||||||
|
pub use self::profiling_support::alloc_self_profile_query_strings;
|
||||||
|
|
||||||
|
rustc_query_append! { [define_queries!][<'tcx>] }
|
||||||
|
|
||||||
|
impl<'tcx> Queries<'tcx> {
|
||||||
|
// Force codegen in the dyn-trait transformation in this crate.
|
||||||
|
pub fn as_dyn(&'tcx self) -> &'tcx dyn QueryEngine<'tcx> {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
728
compiler/rustc_query_impl/src/plumbing.rs
Normal file
728
compiler/rustc_query_impl/src/plumbing.rs
Normal file
@ -0,0 +1,728 @@
|
|||||||
|
//! The implementation of the query system itself. This defines the macros that
|
||||||
|
//! generate the actual methods on tcx which find and execute the provider,
|
||||||
|
//! manage the caches, and so forth.
|
||||||
|
|
||||||
|
use super::{queries, Query};
|
||||||
|
use rustc_middle::dep_graph::{DepKind, DepNode, DepNodeExt, DepNodeIndex, SerializedDepNodeIndex};
|
||||||
|
use rustc_middle::ty::query::on_disk_cache;
|
||||||
|
use rustc_middle::ty::tls::{self, ImplicitCtxt};
|
||||||
|
use rustc_middle::ty::{self, TyCtxt};
|
||||||
|
use rustc_query_system::dep_graph::HasDepContext;
|
||||||
|
use rustc_query_system::query::{CycleError, QueryJobId, QueryJobInfo};
|
||||||
|
use rustc_query_system::query::{QueryContext, QueryDescription};
|
||||||
|
|
||||||
|
use rustc_data_structures::fx::FxHashMap;
|
||||||
|
use rustc_data_structures::sync::Lock;
|
||||||
|
use rustc_data_structures::thin_vec::ThinVec;
|
||||||
|
use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder};
|
||||||
|
use rustc_serialize::opaque;
|
||||||
|
use rustc_span::def_id::{DefId, LocalDefId};
|
||||||
|
use rustc_span::Span;
|
||||||
|
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
|
pub struct QueryCtxt<'tcx> {
|
||||||
|
pub tcx: TyCtxt<'tcx>,
|
||||||
|
pub queries: &'tcx super::Queries<'tcx>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'tcx> std::ops::Deref for QueryCtxt<'tcx> {
|
||||||
|
type Target = TyCtxt<'tcx>;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.tcx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HasDepContext for QueryCtxt<'tcx> {
|
||||||
|
type DepKind = rustc_middle::dep_graph::DepKind;
|
||||||
|
type StableHashingContext = rustc_middle::ich::StableHashingContext<'tcx>;
|
||||||
|
type DepContext = TyCtxt<'tcx>;
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn dep_context(&self) -> &Self::DepContext {
|
||||||
|
&self.tcx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl QueryContext for QueryCtxt<'tcx> {
|
||||||
|
type Query = Query<'tcx>;
|
||||||
|
|
||||||
|
fn incremental_verify_ich(&self) -> bool {
|
||||||
|
self.sess.opts.debugging_opts.incremental_verify_ich
|
||||||
|
}
|
||||||
|
fn verbose(&self) -> bool {
|
||||||
|
self.sess.verbose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn def_path_str(&self, def_id: DefId) -> String {
|
||||||
|
self.tcx.def_path_str(def_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>> {
|
||||||
|
tls::with_related_context(**self, |icx| icx.query)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_collect_active_jobs(
|
||||||
|
&self,
|
||||||
|
) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self::DepKind, Self::Query>>>
|
||||||
|
{
|
||||||
|
self.queries.try_collect_active_jobs()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
|
||||||
|
let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
|
||||||
|
(cb.try_load_from_on_disk_cache)(*self, dep_node)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
|
||||||
|
// FIXME: This match is just a workaround for incremental bugs and should
|
||||||
|
// be removed. https://github.com/rust-lang/rust/issues/62649 is one such
|
||||||
|
// bug that must be fixed before removing this.
|
||||||
|
match dep_node.kind {
|
||||||
|
DepKind::hir_owner | DepKind::hir_owner_nodes => {
|
||||||
|
if let Some(def_id) = dep_node.extract_def_id(**self) {
|
||||||
|
let def_id = def_id.expect_local();
|
||||||
|
let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
|
||||||
|
if def_id != hir_id.owner {
|
||||||
|
// This `DefPath` does not have a
|
||||||
|
// corresponding `DepNode` (e.g. a
|
||||||
|
// struct field), and the ` DefPath`
|
||||||
|
// collided with the `DefPath` of a
|
||||||
|
// proper item that existed in the
|
||||||
|
// previous compilation session.
|
||||||
|
//
|
||||||
|
// Since the given `DefPath` does not
|
||||||
|
// denote the item that previously
|
||||||
|
// existed, we just fail to mark green.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If the node does not exist anymore, we
|
||||||
|
// just fail to mark green.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
// For other kinds of nodes it's OK to be
|
||||||
|
// forced.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
|
||||||
|
|
||||||
|
// We must avoid ever having to call `force_from_dep_node()` for a
|
||||||
|
// `DepNode::codegen_unit`:
|
||||||
|
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
|
||||||
|
// would always end up having to evaluate the first caller of the
|
||||||
|
// `codegen_unit` query that *is* reconstructible. This might very well be
|
||||||
|
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
|
||||||
|
// to re-trigger calling the `codegen_unit` query with the right key. At
|
||||||
|
// that point we would already have re-done all the work we are trying to
|
||||||
|
// avoid doing in the first place.
|
||||||
|
// The solution is simple: Just explicitly call the `codegen_unit` query for
|
||||||
|
// each CGU, right after partitioning. This way `try_mark_green` will always
|
||||||
|
// hit the cache instead of having to go through `force_from_dep_node`.
|
||||||
|
// This assertion makes sure, we actually keep applying the solution above.
|
||||||
|
debug_assert!(
|
||||||
|
dep_node.kind != DepKind::codegen_unit,
|
||||||
|
"calling force_from_dep_node() on DepKind::codegen_unit"
|
||||||
|
);
|
||||||
|
|
||||||
|
let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
|
||||||
|
(cb.force_from_dep_node)(*self, dep_node)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn has_errors_or_delayed_span_bugs(&self) -> bool {
|
||||||
|
self.sess.has_errors_or_delayed_span_bugs()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn diagnostic(&self) -> &rustc_errors::Handler {
|
||||||
|
self.sess.diagnostic()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interactions with on_disk_cache
|
||||||
|
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
|
||||||
|
self.on_disk_cache
|
||||||
|
.as_ref()
|
||||||
|
.map(|c| c.load_diagnostics(**self, prev_dep_node_index))
|
||||||
|
.unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
|
||||||
|
if let Some(c) = self.on_disk_cache.as_ref() {
|
||||||
|
c.store_diagnostics(dep_node_index, diagnostics)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn store_diagnostics_for_anon_node(
|
||||||
|
&self,
|
||||||
|
dep_node_index: DepNodeIndex,
|
||||||
|
diagnostics: ThinVec<Diagnostic>,
|
||||||
|
) {
|
||||||
|
if let Some(c) = self.on_disk_cache.as_ref() {
|
||||||
|
c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Executes a job by changing the `ImplicitCtxt` to point to the
|
||||||
|
/// new query job while it executes. It returns the diagnostics
|
||||||
|
/// captured during execution and the actual result.
|
||||||
|
#[inline(always)]
|
||||||
|
fn start_query<R>(
|
||||||
|
&self,
|
||||||
|
token: QueryJobId<Self::DepKind>,
|
||||||
|
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
||||||
|
compute: impl FnOnce() -> R,
|
||||||
|
) -> R {
|
||||||
|
// The `TyCtxt` stored in TLS has the same global interner lifetime
|
||||||
|
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
|
||||||
|
// when accessing the `ImplicitCtxt`.
|
||||||
|
tls::with_related_context(**self, move |current_icx| {
|
||||||
|
// Update the `ImplicitCtxt` to point to our new query job.
|
||||||
|
let new_icx = ImplicitCtxt {
|
||||||
|
tcx: **self,
|
||||||
|
query: Some(token),
|
||||||
|
diagnostics,
|
||||||
|
layout_depth: current_icx.layout_depth,
|
||||||
|
task_deps: current_icx.task_deps,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use the `ImplicitCtxt` while we execute the query.
|
||||||
|
tls::enter_context(&new_icx, |_| {
|
||||||
|
rustc_data_structures::stack::ensure_sufficient_stack(compute)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'tcx> QueryCtxt<'tcx> {
|
||||||
|
#[inline(never)]
|
||||||
|
#[cold]
|
||||||
|
pub(super) fn report_cycle(
|
||||||
|
self,
|
||||||
|
CycleError { usage, cycle: stack }: CycleError<Query<'tcx>>,
|
||||||
|
) -> DiagnosticBuilder<'tcx> {
|
||||||
|
assert!(!stack.is_empty());
|
||||||
|
|
||||||
|
let fix_span = |span: Span, query: &Query<'tcx>| {
|
||||||
|
self.sess.source_map().guess_head_span(query.default_span(*self, span))
|
||||||
|
};
|
||||||
|
|
||||||
|
// Disable naming impls with types in this path, since that
|
||||||
|
// sometimes cycles itself, leading to extra cycle errors.
|
||||||
|
// (And cycle errors around impls tend to occur during the
|
||||||
|
// collect/coherence phases anyhow.)
|
||||||
|
ty::print::with_forced_impl_filename_line(|| {
|
||||||
|
let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
|
||||||
|
let mut err = struct_span_err!(
|
||||||
|
self.sess,
|
||||||
|
span,
|
||||||
|
E0391,
|
||||||
|
"cycle detected when {}",
|
||||||
|
stack[0].query.describe(self)
|
||||||
|
);
|
||||||
|
|
||||||
|
for i in 1..stack.len() {
|
||||||
|
let query = &stack[i].query;
|
||||||
|
let span = fix_span(stack[(i + 1) % stack.len()].span, query);
|
||||||
|
err.span_note(span, &format!("...which requires {}...", query.describe(self)));
|
||||||
|
}
|
||||||
|
|
||||||
|
err.note(&format!(
|
||||||
|
"...which again requires {}, completing the cycle",
|
||||||
|
stack[0].query.describe(self)
|
||||||
|
));
|
||||||
|
|
||||||
|
if let Some((span, query)) = usage {
|
||||||
|
err.span_note(
|
||||||
|
fix_span(span, &query),
|
||||||
|
&format!("cycle used when {}", query.describe(self)),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn encode_query_results(
|
||||||
|
self,
|
||||||
|
encoder: &mut on_disk_cache::CacheEncoder<'a, 'tcx, opaque::FileEncoder>,
|
||||||
|
query_result_index: &mut on_disk_cache::EncodedQueryResultIndex,
|
||||||
|
) -> opaque::FileEncodeResult {
|
||||||
|
macro_rules! encode_queries {
|
||||||
|
($($query:ident,)*) => {
|
||||||
|
$(
|
||||||
|
on_disk_cache::encode_query_results::<_, super::queries::$query<'_>>(
|
||||||
|
self,
|
||||||
|
encoder,
|
||||||
|
query_result_index
|
||||||
|
)?;
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rustc_cached_queries!(encode_queries!);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This struct stores metadata about each Query.
|
||||||
|
///
|
||||||
|
/// Information is retrieved by indexing the `QUERIES` array using the integer value
|
||||||
|
/// of the `DepKind`. Overall, this allows to implement `QueryContext` using this manual
|
||||||
|
/// jump table instead of large matches.
|
||||||
|
pub struct QueryStruct {
|
||||||
|
/// The red/green evaluation system will try to mark a specific DepNode in the
|
||||||
|
/// dependency graph as green by recursively trying to mark the dependencies of
|
||||||
|
/// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
|
||||||
|
/// where we don't know if it is red or green and we therefore actually have
|
||||||
|
/// to recompute its value in order to find out. Since the only piece of
|
||||||
|
/// information that we have at that point is the `DepNode` we are trying to
|
||||||
|
/// re-evaluate, we need some way to re-run a query from just that. This is what
|
||||||
|
/// `force_from_dep_node()` implements.
|
||||||
|
///
|
||||||
|
/// In the general case, a `DepNode` consists of a `DepKind` and an opaque
|
||||||
|
/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
|
||||||
|
/// is usually constructed by computing a stable hash of the query-key that the
|
||||||
|
/// `DepNode` corresponds to. Consequently, it is not in general possible to go
|
||||||
|
/// back from hash to query-key (since hash functions are not reversible). For
|
||||||
|
/// this reason `force_from_dep_node()` is expected to fail from time to time
|
||||||
|
/// because we just cannot find out, from the `DepNode` alone, what the
|
||||||
|
/// corresponding query-key is and therefore cannot re-run the query.
|
||||||
|
///
|
||||||
|
/// The system deals with this case letting `try_mark_green` fail which forces
|
||||||
|
/// the root query to be re-evaluated.
|
||||||
|
///
|
||||||
|
/// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
|
||||||
|
/// Fortunately, we can use some contextual information that will allow us to
|
||||||
|
/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
|
||||||
|
/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
|
||||||
|
/// valid `DefPathHash`. Since we also always build a huge table that maps every
|
||||||
|
/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
|
||||||
|
/// everything we need to re-run the query.
|
||||||
|
///
|
||||||
|
/// Take the `mir_promoted` query as an example. Like many other queries, it
|
||||||
|
/// just has a single parameter: the `DefId` of the item it will compute the
|
||||||
|
/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
|
||||||
|
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
|
||||||
|
/// is actually a `DefPathHash`, and can therefore just look up the corresponding
|
||||||
|
/// `DefId` in `tcx.def_path_hash_to_def_id`.
|
||||||
|
///
|
||||||
|
/// When you implement a new query, it will likely have a corresponding new
|
||||||
|
/// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As
|
||||||
|
/// a rule of thumb, if your query takes a `DefId` or `LocalDefId` as sole parameter,
|
||||||
|
/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
|
||||||
|
/// add it to the "We don't have enough information to reconstruct..." group in
|
||||||
|
/// the match below.
|
||||||
|
pub(crate) force_from_dep_node: fn(tcx: QueryCtxt<'_>, dep_node: &DepNode) -> bool,
|
||||||
|
|
||||||
|
/// Invoke a query to put the on-disk cached value in memory.
|
||||||
|
pub(crate) try_load_from_on_disk_cache: fn(QueryCtxt<'_>, &DepNode),
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! handle_cycle_error {
|
||||||
|
([][$tcx: expr, $error:expr]) => {{
|
||||||
|
$tcx.report_cycle($error).emit();
|
||||||
|
Value::from_cycle_error($tcx)
|
||||||
|
}};
|
||||||
|
([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{
|
||||||
|
$tcx.report_cycle($error).emit();
|
||||||
|
$tcx.sess.abort_if_errors();
|
||||||
|
unreachable!()
|
||||||
|
}};
|
||||||
|
([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{
|
||||||
|
$tcx.report_cycle($error).delay_as_bug();
|
||||||
|
Value::from_cycle_error($tcx)
|
||||||
|
}};
|
||||||
|
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
|
||||||
|
handle_cycle_error!([$($($modifiers)*)*][$($args)*])
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! is_anon {
|
||||||
|
([]) => {{
|
||||||
|
false
|
||||||
|
}};
|
||||||
|
([anon $($rest:tt)*]) => {{
|
||||||
|
true
|
||||||
|
}};
|
||||||
|
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
|
||||||
|
is_anon!([$($($modifiers)*)*])
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! is_eval_always {
|
||||||
|
([]) => {{
|
||||||
|
false
|
||||||
|
}};
|
||||||
|
([eval_always $($rest:tt)*]) => {{
|
||||||
|
true
|
||||||
|
}};
|
||||||
|
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
|
||||||
|
is_eval_always!([$($($modifiers)*)*])
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! hash_result {
|
||||||
|
([][$hcx:expr, $result:expr]) => {{
|
||||||
|
dep_graph::hash_result($hcx, &$result)
|
||||||
|
}};
|
||||||
|
([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{
|
||||||
|
None
|
||||||
|
}};
|
||||||
|
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
|
||||||
|
hash_result!([$($($modifiers)*)*][$($args)*])
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! define_queries {
|
||||||
|
(<$tcx:tt>
|
||||||
|
$($(#[$attr:meta])*
|
||||||
|
[$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
|
||||||
|
|
||||||
|
define_queries_struct! {
|
||||||
|
tcx: $tcx,
|
||||||
|
input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(nonstandard_style)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum Query<$tcx> {
|
||||||
|
$($(#[$attr])* $name(query_keys::$name<$tcx>)),*
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<$tcx> Query<$tcx> {
|
||||||
|
pub fn name(&self) -> &'static str {
|
||||||
|
match *self {
|
||||||
|
$(Query::$name(_) => stringify!($name),)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn describe(&self, tcx: QueryCtxt<$tcx>) -> String {
|
||||||
|
let (r, name) = match *self {
|
||||||
|
$(Query::$name(key) => {
|
||||||
|
(queries::$name::describe(tcx, key), stringify!($name))
|
||||||
|
})*
|
||||||
|
};
|
||||||
|
if tcx.sess.verbose() {
|
||||||
|
format!("{} [{}]", r, name)
|
||||||
|
} else {
|
||||||
|
r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME(eddyb) Get more valid `Span`s on queries.
|
||||||
|
pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span {
|
||||||
|
if !span.is_dummy() {
|
||||||
|
return span;
|
||||||
|
}
|
||||||
|
// The `def_span` query is used to calculate `default_span`,
|
||||||
|
// so exit to avoid infinite recursion.
|
||||||
|
if let Query::def_span(..) = *self {
|
||||||
|
return span
|
||||||
|
}
|
||||||
|
match *self {
|
||||||
|
$(Query::$name(key) => key.default_span(tcx),)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
|
||||||
|
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
|
||||||
|
mem::discriminant(self).hash_stable(hcx, hasher);
|
||||||
|
match *self {
|
||||||
|
$(Query::$name(key) => key.hash_stable(hcx, hasher),)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(nonstandard_style)]
|
||||||
|
pub mod queries {
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
|
$(pub struct $name<$tcx> {
|
||||||
|
data: PhantomData<&$tcx ()>
|
||||||
|
})*
|
||||||
|
}
|
||||||
|
|
||||||
|
$(impl<$tcx> QueryConfig for queries::$name<$tcx> {
|
||||||
|
type Key = query_keys::$name<$tcx>;
|
||||||
|
type Value = query_values::$name<$tcx>;
|
||||||
|
type Stored = query_stored::$name<$tcx>;
|
||||||
|
const NAME: &'static str = stringify!($name);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<$tcx> QueryAccessors<QueryCtxt<$tcx>> for queries::$name<$tcx> {
|
||||||
|
const ANON: bool = is_anon!([$($modifiers)*]);
|
||||||
|
const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
|
||||||
|
const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$name;
|
||||||
|
|
||||||
|
type Cache = query_storage::$name<$tcx>;
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, Query<$tcx>, Self::Key> {
|
||||||
|
&tcx.queries.$name
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn query_cache<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryCacheStore<Self::Cache>
|
||||||
|
where 'tcx:'a
|
||||||
|
{
|
||||||
|
&tcx.query_caches.$name
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn compute(tcx: QueryCtxt<'tcx>, key: Self::Key) -> Self::Value {
|
||||||
|
let provider = tcx.queries.providers.get(key.query_crate())
|
||||||
|
// HACK(eddyb) it's possible crates may be loaded after
|
||||||
|
// the query engine is created, and because crate loading
|
||||||
|
// is not yet integrated with the query engine, such crates
|
||||||
|
// would be missing appropriate entries in `providers`.
|
||||||
|
.unwrap_or(&tcx.queries.fallback_extern_providers)
|
||||||
|
.$name;
|
||||||
|
provider(*tcx, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hash_result(
|
||||||
|
_hcx: &mut StableHashingContext<'_>,
|
||||||
|
_result: &Self::Value
|
||||||
|
) -> Option<Fingerprint> {
|
||||||
|
hash_result!([$($modifiers)*][_hcx, _result])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_cycle_error(
|
||||||
|
tcx: QueryCtxt<'tcx>,
|
||||||
|
error: CycleError<Query<'tcx>>
|
||||||
|
) -> Self::Value {
|
||||||
|
handle_cycle_error!([$($modifiers)*][tcx, error])
|
||||||
|
}
|
||||||
|
})*
|
||||||
|
|
||||||
|
#[allow(non_upper_case_globals)]
|
||||||
|
pub mod query_callbacks {
|
||||||
|
use super::*;
|
||||||
|
use rustc_middle::dep_graph::DepNode;
|
||||||
|
use rustc_middle::ty::query::query_keys;
|
||||||
|
use rustc_query_system::dep_graph::DepNodeParams;
|
||||||
|
use rustc_query_system::query::{force_query, QueryDescription};
|
||||||
|
|
||||||
|
// We use this for most things when incr. comp. is turned off.
|
||||||
|
pub const Null: QueryStruct = QueryStruct {
|
||||||
|
force_from_dep_node: |_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node),
|
||||||
|
try_load_from_on_disk_cache: |_, _| {},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const TraitSelect: QueryStruct = QueryStruct {
|
||||||
|
force_from_dep_node: |_, _| false,
|
||||||
|
try_load_from_on_disk_cache: |_, _| {},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const CompileCodegenUnit: QueryStruct = QueryStruct {
|
||||||
|
force_from_dep_node: |_, _| false,
|
||||||
|
try_load_from_on_disk_cache: |_, _| {},
|
||||||
|
};
|
||||||
|
|
||||||
|
$(pub const $name: QueryStruct = {
|
||||||
|
const is_anon: bool = is_anon!([$($modifiers)*]);
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn can_reconstruct_query_key() -> bool {
|
||||||
|
<query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>
|
||||||
|
::can_reconstruct_query_key()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn recover<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<query_keys::$name<'tcx>> {
|
||||||
|
<query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::recover(tcx, dep_node)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn force_from_dep_node(tcx: QueryCtxt<'_>, dep_node: &DepNode) -> bool {
|
||||||
|
if is_anon {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !can_reconstruct_query_key() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(key) = recover(*tcx, dep_node) {
|
||||||
|
force_query::<queries::$name<'_>, _>(tcx, key, DUMMY_SP, *dep_node);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_load_from_on_disk_cache(tcx: QueryCtxt<'_>, dep_node: &DepNode) {
|
||||||
|
if is_anon {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !can_reconstruct_query_key() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
debug_assert!(tcx.dep_graph
|
||||||
|
.node_color(dep_node)
|
||||||
|
.map(|c| c.is_green())
|
||||||
|
.unwrap_or(false));
|
||||||
|
|
||||||
|
let key = recover(*tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
|
||||||
|
if queries::$name::cache_on_disk(tcx, &key, None) {
|
||||||
|
let _ = tcx.$name(key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
QueryStruct {
|
||||||
|
force_from_dep_node,
|
||||||
|
try_load_from_on_disk_cache,
|
||||||
|
}
|
||||||
|
};)*
|
||||||
|
}
|
||||||
|
|
||||||
|
static QUERY_CALLBACKS: &[QueryStruct] = &make_dep_kind_array!(query_callbacks);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME(eddyb) this macro (and others?) use `$tcx` and `'tcx` interchangeably.
|
||||||
|
// We should either not take `$tcx` at all and use `'tcx` everywhere, or use
|
||||||
|
// `$tcx` everywhere (even if that isn't necessary due to lack of hygiene).
|
||||||
|
macro_rules! define_queries_struct {
|
||||||
|
(tcx: $tcx:tt,
|
||||||
|
input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
|
||||||
|
pub struct Queries<$tcx> {
|
||||||
|
providers: IndexVec<CrateNum, Providers>,
|
||||||
|
fallback_extern_providers: Box<Providers>,
|
||||||
|
|
||||||
|
$($(#[$attr])* $name: QueryState<
|
||||||
|
crate::dep_graph::DepKind,
|
||||||
|
Query<$tcx>,
|
||||||
|
query_keys::$name<$tcx>,
|
||||||
|
>,)*
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<$tcx> Queries<$tcx> {
|
||||||
|
pub fn new(
|
||||||
|
providers: IndexVec<CrateNum, Providers>,
|
||||||
|
fallback_extern_providers: Providers,
|
||||||
|
) -> Self {
|
||||||
|
Queries {
|
||||||
|
providers,
|
||||||
|
fallback_extern_providers: Box::new(fallback_extern_providers),
|
||||||
|
$($name: Default::default()),*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn try_collect_active_jobs(
|
||||||
|
&self
|
||||||
|
) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<crate::dep_graph::DepKind, Query<$tcx>>>> {
|
||||||
|
let mut jobs = FxHashMap::default();
|
||||||
|
|
||||||
|
$(
|
||||||
|
self.$name.try_collect_active_jobs(
|
||||||
|
<queries::$name<'tcx> as QueryAccessors<QueryCtxt<'tcx>>>::DEP_KIND,
|
||||||
|
Query::$name,
|
||||||
|
&mut jobs,
|
||||||
|
)?;
|
||||||
|
)*
|
||||||
|
|
||||||
|
Some(jobs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl QueryEngine<'tcx> for Queries<'tcx> {
|
||||||
|
unsafe fn deadlock(&'tcx self, _tcx: TyCtxt<'tcx>, _registry: &rustc_rayon_core::Registry) {
|
||||||
|
#[cfg(parallel_compiler)]
|
||||||
|
{
|
||||||
|
let tcx = QueryCtxt { tcx: _tcx, queries: self };
|
||||||
|
rustc_query_system::query::deadlock(tcx, _registry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn encode_query_results(
|
||||||
|
&'tcx self,
|
||||||
|
tcx: TyCtxt<'tcx>,
|
||||||
|
encoder: &mut on_disk_cache::CacheEncoder<'a, 'tcx, opaque::FileEncoder>,
|
||||||
|
query_result_index: &mut on_disk_cache::EncodedQueryResultIndex,
|
||||||
|
) -> opaque::FileEncodeResult {
|
||||||
|
let tcx = QueryCtxt { tcx, queries: self };
|
||||||
|
tcx.encode_query_results(encoder, query_result_index)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn exec_cache_promotions(&'tcx self, tcx: TyCtxt<'tcx>) {
|
||||||
|
let tcx = QueryCtxt { tcx, queries: self };
|
||||||
|
tcx.dep_graph.exec_cache_promotions(tcx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_mark_green(&'tcx self, tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool {
|
||||||
|
let qcx = QueryCtxt { tcx, queries: self };
|
||||||
|
tcx.dep_graph.try_mark_green(qcx, dep_node).is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_print_query_stack(
|
||||||
|
&'tcx self,
|
||||||
|
tcx: TyCtxt<'tcx>,
|
||||||
|
query: Option<QueryJobId<dep_graph::DepKind>>,
|
||||||
|
handler: &Handler,
|
||||||
|
num_frames: Option<usize>,
|
||||||
|
) -> usize {
|
||||||
|
let query_map = self.try_collect_active_jobs();
|
||||||
|
|
||||||
|
let mut current_query = query;
|
||||||
|
let mut i = 0;
|
||||||
|
|
||||||
|
while let Some(query) = current_query {
|
||||||
|
if Some(i) == num_frames {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let query_info = if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query))
|
||||||
|
{
|
||||||
|
info
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
let mut diag = Diagnostic::new(
|
||||||
|
Level::FailureNote,
|
||||||
|
&format!(
|
||||||
|
"#{} [{}] {}",
|
||||||
|
i,
|
||||||
|
query_info.info.query.name(),
|
||||||
|
query_info.info.query.describe(QueryCtxt { tcx, queries: self })
|
||||||
|
),
|
||||||
|
);
|
||||||
|
diag.span = tcx.sess.source_map().guess_head_span(query_info.info.span).into();
|
||||||
|
handler.force_print_diagnostic(diag);
|
||||||
|
|
||||||
|
current_query = query_info.job.parent;
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
i
|
||||||
|
}
|
||||||
|
|
||||||
|
$($(#[$attr])*
|
||||||
|
#[inline(always)]
|
||||||
|
fn $name(
|
||||||
|
&'tcx self,
|
||||||
|
tcx: TyCtxt<$tcx>,
|
||||||
|
span: Span,
|
||||||
|
key: query_keys::$name<$tcx>,
|
||||||
|
lookup: QueryLookup,
|
||||||
|
mode: QueryMode,
|
||||||
|
) -> Option<query_stored::$name<$tcx>> {
|
||||||
|
let qcx = QueryCtxt { tcx, queries: self };
|
||||||
|
get_query::<queries::$name<$tcx>, _>(qcx, span, key, lookup, mode)
|
||||||
|
})*
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn describe_as_module(def_id: LocalDefId, tcx: TyCtxt<'_>) -> String {
|
||||||
|
if def_id.is_top_level_module() {
|
||||||
|
"top-level module".to_string()
|
||||||
|
} else {
|
||||||
|
format!("module `{}`", tcx.def_path_str(def_id.to_def_id()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rustc_query_description! {}
|
@ -1,32 +1,31 @@
|
|||||||
use crate::ty::context::TyCtxt;
|
|
||||||
use crate::ty::WithOptConstParam;
|
|
||||||
use measureme::{StringComponent, StringId};
|
use measureme::{StringComponent, StringId};
|
||||||
use rustc_data_structures::fx::FxHashMap;
|
use rustc_data_structures::fx::FxHashMap;
|
||||||
use rustc_data_structures::profiling::SelfProfiler;
|
use rustc_data_structures::profiling::SelfProfiler;
|
||||||
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
|
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
|
||||||
use rustc_hir::definitions::DefPathData;
|
use rustc_hir::definitions::DefPathData;
|
||||||
|
use rustc_middle::ty::{TyCtxt, WithOptConstParam};
|
||||||
use rustc_query_system::query::{QueryCache, QueryCacheStore};
|
use rustc_query_system::query::{QueryCache, QueryCacheStore};
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
pub struct QueryKeyStringCache {
|
struct QueryKeyStringCache {
|
||||||
def_id_cache: FxHashMap<DefId, StringId>,
|
def_id_cache: FxHashMap<DefId, StringId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl QueryKeyStringCache {
|
impl QueryKeyStringCache {
|
||||||
pub fn new() -> QueryKeyStringCache {
|
fn new() -> QueryKeyStringCache {
|
||||||
QueryKeyStringCache { def_id_cache: Default::default() }
|
QueryKeyStringCache { def_id_cache: Default::default() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct QueryKeyStringBuilder<'p, 'c, 'tcx> {
|
struct QueryKeyStringBuilder<'p, 'c, 'tcx> {
|
||||||
profiler: &'p SelfProfiler,
|
profiler: &'p SelfProfiler,
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
string_cache: &'c mut QueryKeyStringCache,
|
string_cache: &'c mut QueryKeyStringCache,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'p, 'c, 'tcx> QueryKeyStringBuilder<'p, 'c, 'tcx> {
|
impl<'p, 'c, 'tcx> QueryKeyStringBuilder<'p, 'c, 'tcx> {
|
||||||
pub fn new(
|
fn new(
|
||||||
profiler: &'p SelfProfiler,
|
profiler: &'p SelfProfiler,
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
string_cache: &'c mut QueryKeyStringCache,
|
string_cache: &'c mut QueryKeyStringCache,
|
||||||
@ -98,7 +97,7 @@ impl<'p, 'c, 'tcx> QueryKeyStringBuilder<'p, 'c, 'tcx> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait IntoSelfProfilingString {
|
trait IntoSelfProfilingString {
|
||||||
fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId;
|
fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,7 +122,7 @@ impl<T: SpecIntoSelfProfilingString> IntoSelfProfilingString for T {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[rustc_specialization_trait]
|
#[rustc_specialization_trait]
|
||||||
pub trait SpecIntoSelfProfilingString: Debug {
|
trait SpecIntoSelfProfilingString: Debug {
|
||||||
fn spec_to_self_profile_string(
|
fn spec_to_self_profile_string(
|
||||||
&self,
|
&self,
|
||||||
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
|
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
|
||||||
@ -227,7 +226,7 @@ where
|
|||||||
/// Allocate the self-profiling query strings for a single query cache. This
|
/// Allocate the self-profiling query strings for a single query cache. This
|
||||||
/// method is called from `alloc_self_profile_query_strings` which knows all
|
/// method is called from `alloc_self_profile_query_strings` which knows all
|
||||||
/// the queries via macro magic.
|
/// the queries via macro magic.
|
||||||
pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
|
fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
query_name: &'static str,
|
query_name: &'static str,
|
||||||
query_cache: &QueryCacheStore<C>,
|
query_cache: &QueryCacheStore<C>,
|
||||||
@ -287,3 +286,35 @@ pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// All self-profiling events generated by the query engine use
|
||||||
|
/// virtual `StringId`s for their `event_id`. This method makes all
|
||||||
|
/// those virtual `StringId`s point to actual strings.
|
||||||
|
///
|
||||||
|
/// If we are recording only summary data, the ids will point to
|
||||||
|
/// just the query names. If we are recording query keys too, we
|
||||||
|
/// allocate the corresponding strings here.
|
||||||
|
pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'tcx>) {
|
||||||
|
if !tcx.prof.enabled() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut string_cache = QueryKeyStringCache::new();
|
||||||
|
|
||||||
|
macro_rules! alloc_once {
|
||||||
|
(<$tcx:tt>
|
||||||
|
$($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($K:ty) -> $V:ty,)*
|
||||||
|
) => {
|
||||||
|
$({
|
||||||
|
alloc_self_profile_query_strings_for_query_cache(
|
||||||
|
tcx,
|
||||||
|
stringify!($name),
|
||||||
|
&tcx.query_caches.$name,
|
||||||
|
&mut string_cache,
|
||||||
|
);
|
||||||
|
})*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rustc_query_append! { [alloc_once!][<'tcx>] }
|
||||||
|
}
|
@ -1,7 +1,7 @@
|
|||||||
use crate::ty::query::queries;
|
|
||||||
use crate::ty::TyCtxt;
|
|
||||||
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
|
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
|
||||||
use rustc_query_system::query::{QueryAccessors, QueryCache, QueryCacheStore};
|
use rustc_middle::ty::query::query_storage;
|
||||||
|
use rustc_middle::ty::TyCtxt;
|
||||||
|
use rustc_query_system::query::{QueryCache, QueryCacheStore};
|
||||||
|
|
||||||
use std::any::type_name;
|
use std::any::type_name;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
@ -125,7 +125,7 @@ macro_rules! print_stats {
|
|||||||
|
|
||||||
$(
|
$(
|
||||||
queries.push(stats::<
|
queries.push(stats::<
|
||||||
<queries::$name<'_> as QueryAccessors<TyCtxt<'_>>>::Cache,
|
query_storage::$name<'_>,
|
||||||
>(
|
>(
|
||||||
stringify!($name),
|
stringify!($name),
|
||||||
&tcx.query_caches.$name,
|
&tcx.query_caches.$name,
|
@ -1,18 +1,19 @@
|
|||||||
use crate::ty::{self, AdtSizedConstraint, Ty, TyCtxt, TyS};
|
use super::QueryCtxt;
|
||||||
|
use rustc_middle::ty::{self, AdtSizedConstraint, Ty, TyS};
|
||||||
|
|
||||||
pub(super) trait Value<'tcx>: Sized {
|
pub(super) trait Value<'tcx>: Sized {
|
||||||
fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self;
|
fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx, T> Value<'tcx> for T {
|
impl<'tcx, T> Value<'tcx> for T {
|
||||||
default fn from_cycle_error(tcx: TyCtxt<'tcx>) -> T {
|
default fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> T {
|
||||||
tcx.sess.abort_if_errors();
|
tcx.sess.abort_if_errors();
|
||||||
bug!("Value::from_cycle_error called without errors");
|
bug!("Value::from_cycle_error called without errors");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Value<'tcx> for &'_ TyS<'_> {
|
impl<'tcx> Value<'tcx> for &'_ TyS<'_> {
|
||||||
fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
|
fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
|
||||||
// SAFETY: This is never called when `Self` is not `Ty<'tcx>`.
|
// SAFETY: This is never called when `Self` is not `Ty<'tcx>`.
|
||||||
// FIXME: Represent the above fact in the trait system somehow.
|
// FIXME: Represent the above fact in the trait system somehow.
|
||||||
unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(tcx.ty_error()) }
|
unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(tcx.ty_error()) }
|
||||||
@ -20,19 +21,19 @@ impl<'tcx> Value<'tcx> for &'_ TyS<'_> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Value<'tcx> for ty::SymbolName<'_> {
|
impl<'tcx> Value<'tcx> for ty::SymbolName<'_> {
|
||||||
fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
|
fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
|
||||||
// SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`.
|
// SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`.
|
||||||
// FIXME: Represent the above fact in the trait system somehow.
|
// FIXME: Represent the above fact in the trait system somehow.
|
||||||
unsafe {
|
unsafe {
|
||||||
std::mem::transmute::<ty::SymbolName<'tcx>, ty::SymbolName<'_>>(ty::SymbolName::new(
|
std::mem::transmute::<ty::SymbolName<'tcx>, ty::SymbolName<'_>>(ty::SymbolName::new(
|
||||||
tcx, "<error>",
|
*tcx, "<error>",
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Value<'tcx> for AdtSizedConstraint<'_> {
|
impl<'tcx> Value<'tcx> for AdtSizedConstraint<'_> {
|
||||||
fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
|
fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
|
||||||
// SAFETY: This is never called when `Self` is not `AdtSizedConstraint<'tcx>`.
|
// SAFETY: This is never called when `Self` is not `AdtSizedConstraint<'tcx>`.
|
||||||
// FIXME: Represent the above fact in the trait system somehow.
|
// FIXME: Represent the above fact in the trait system somehow.
|
||||||
unsafe {
|
unsafe {
|
@ -1,7 +1,6 @@
|
|||||||
//! Cache for candidate selection.
|
//! Cache for candidate selection.
|
||||||
|
|
||||||
use crate::dep_graph::DepNodeIndex;
|
use crate::dep_graph::{DepContext, DepNodeIndex};
|
||||||
use crate::query::QueryContext;
|
|
||||||
|
|
||||||
use rustc_data_structures::fx::FxHashMap;
|
use rustc_data_structures::fx::FxHashMap;
|
||||||
use rustc_data_structures::sync::HashMapExt;
|
use rustc_data_structures::sync::HashMapExt;
|
||||||
@ -28,7 +27,7 @@ impl<Key, Value> Cache<Key, Value> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<Key: Eq + Hash, Value: Clone> Cache<Key, Value> {
|
impl<Key: Eq + Hash, Value: Clone> Cache<Key, Value> {
|
||||||
pub fn get<CTX: QueryContext>(&self, key: &Key, tcx: CTX) -> Option<Value> {
|
pub fn get<CTX: DepContext>(&self, key: &Key, tcx: CTX) -> Option<Value> {
|
||||||
Some(self.hashmap.borrow().get(key)?.get(tcx))
|
Some(self.hashmap.borrow().get(key)?.get(tcx))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,7 +54,7 @@ impl<T: Clone> WithDepNode<T> {
|
|||||||
WithDepNode { dep_node, cached_value }
|
WithDepNode { dep_node, cached_value }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get<CTX: QueryContext>(&self, tcx: CTX) -> T {
|
pub fn get<CTX: DepContext>(&self, tcx: CTX) -> T {
|
||||||
tcx.dep_graph().read_index(self.dep_node);
|
tcx.dep_graph().read_index(self.dep_node);
|
||||||
self.cached_value.clone()
|
self.cached_value.clone()
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,7 @@ impl<K: DepKind> DepNode<K> {
|
|||||||
|
|
||||||
pub fn construct<Ctxt, Key>(tcx: Ctxt, kind: K, arg: &Key) -> DepNode<K>
|
pub fn construct<Ctxt, Key>(tcx: Ctxt, kind: K, arg: &Key) -> DepNode<K>
|
||||||
where
|
where
|
||||||
Ctxt: crate::query::QueryContext<DepKind = K>,
|
Ctxt: super::DepContext<DepKind = K>,
|
||||||
Key: DepNodeParams<Ctxt>,
|
Key: DepNodeParams<Ctxt>,
|
||||||
{
|
{
|
||||||
let hash = arg.to_fingerprint(tcx);
|
let hash = arg.to_fingerprint(tcx);
|
||||||
|
@ -23,7 +23,8 @@ use super::debug::EdgeFilter;
|
|||||||
use super::prev::PreviousDepGraph;
|
use super::prev::PreviousDepGraph;
|
||||||
use super::query::DepGraphQuery;
|
use super::query::DepGraphQuery;
|
||||||
use super::serialized::SerializedDepNodeIndex;
|
use super::serialized::SerializedDepNodeIndex;
|
||||||
use super::{DepContext, DepKind, DepNode, WorkProductId};
|
use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
|
||||||
|
use crate::query::QueryContext;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DepGraph<K: DepKind> {
|
pub struct DepGraph<K: DepKind> {
|
||||||
@ -235,7 +236,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||||||
/// `arg` parameter.
|
/// `arg` parameter.
|
||||||
///
|
///
|
||||||
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
|
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
|
||||||
pub fn with_task<Ctxt: DepContext<DepKind = K>, A, R>(
|
pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
|
||||||
&self,
|
&self,
|
||||||
key: DepNode<K>,
|
key: DepNode<K>,
|
||||||
cx: Ctxt,
|
cx: Ctxt,
|
||||||
@ -261,7 +262,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn with_task_impl<Ctxt: DepContext<DepKind = K>, A, R>(
|
fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A, R>(
|
||||||
&self,
|
&self,
|
||||||
key: DepNode<K>,
|
key: DepNode<K>,
|
||||||
cx: Ctxt,
|
cx: Ctxt,
|
||||||
@ -271,14 +272,15 @@ impl<K: DepKind> DepGraph<K> {
|
|||||||
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
|
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
|
||||||
) -> (R, DepNodeIndex) {
|
) -> (R, DepNodeIndex) {
|
||||||
if let Some(ref data) = self.data {
|
if let Some(ref data) = self.data {
|
||||||
|
let dcx = cx.dep_context();
|
||||||
let task_deps = create_task(key).map(Lock::new);
|
let task_deps = create_task(key).map(Lock::new);
|
||||||
let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
|
let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
|
||||||
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
|
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
|
||||||
|
|
||||||
let mut hcx = cx.create_stable_hashing_context();
|
let mut hcx = dcx.create_stable_hashing_context();
|
||||||
let current_fingerprint = hash_result(&mut hcx, &result);
|
let current_fingerprint = hash_result(&mut hcx, &result);
|
||||||
|
|
||||||
let print_status = cfg!(debug_assertions) && cx.debug_dep_tasks();
|
let print_status = cfg!(debug_assertions) && dcx.debug_dep_tasks();
|
||||||
|
|
||||||
// Intern the new `DepNode`.
|
// Intern the new `DepNode`.
|
||||||
let dep_node_index = if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
|
let dep_node_index = if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
|
||||||
@ -408,7 +410,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||||||
|
|
||||||
/// Executes something within an "eval-always" task which is a task
|
/// Executes something within an "eval-always" task which is a task
|
||||||
/// that runs whenever anything changes.
|
/// that runs whenever anything changes.
|
||||||
pub fn with_eval_always_task<Ctxt: DepContext<DepKind = K>, A, R>(
|
pub fn with_eval_always_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
|
||||||
&self,
|
&self,
|
||||||
key: DepNode<K>,
|
key: DepNode<K>,
|
||||||
cx: Ctxt,
|
cx: Ctxt,
|
||||||
@ -585,7 +587,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||||||
/// A node will have an index, when it's already been marked green, or when we can mark it
|
/// A node will have an index, when it's already been marked green, or when we can mark it
|
||||||
/// green. This function will mark the current task as a reader of the specified node, when
|
/// green. This function will mark the current task as a reader of the specified node, when
|
||||||
/// a node index can be found for that node.
|
/// a node index can be found for that node.
|
||||||
pub fn try_mark_green_and_read<Ctxt: DepContext<DepKind = K>>(
|
pub fn try_mark_green_and_read<Ctxt: QueryContext<DepKind = K>>(
|
||||||
&self,
|
&self,
|
||||||
tcx: Ctxt,
|
tcx: Ctxt,
|
||||||
dep_node: &DepNode<K>,
|
dep_node: &DepNode<K>,
|
||||||
@ -597,7 +599,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_mark_green<Ctxt: DepContext<DepKind = K>>(
|
pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
|
||||||
&self,
|
&self,
|
||||||
tcx: Ctxt,
|
tcx: Ctxt,
|
||||||
dep_node: &DepNode<K>,
|
dep_node: &DepNode<K>,
|
||||||
@ -625,7 +627,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Try to mark a dep-node which existed in the previous compilation session as green.
|
/// Try to mark a dep-node which existed in the previous compilation session as green.
|
||||||
fn try_mark_previous_green<Ctxt: DepContext<DepKind = K>>(
|
fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
|
||||||
&self,
|
&self,
|
||||||
tcx: Ctxt,
|
tcx: Ctxt,
|
||||||
data: &DepGraphData<K>,
|
data: &DepGraphData<K>,
|
||||||
@ -809,7 +811,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||||||
/// This may be called concurrently on multiple threads for the same dep node.
|
/// This may be called concurrently on multiple threads for the same dep node.
|
||||||
#[cold]
|
#[cold]
|
||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
fn emit_diagnostics<Ctxt: DepContext<DepKind = K>>(
|
fn emit_diagnostics<Ctxt: QueryContext<DepKind = K>>(
|
||||||
&self,
|
&self,
|
||||||
tcx: Ctxt,
|
tcx: Ctxt,
|
||||||
data: &DepGraphData<K>,
|
data: &DepGraphData<K>,
|
||||||
@ -874,7 +876,8 @@ impl<K: DepKind> DepGraph<K> {
|
|||||||
//
|
//
|
||||||
// This method will only load queries that will end up in the disk cache.
|
// This method will only load queries that will end up in the disk cache.
|
||||||
// Other queries will not be executed.
|
// Other queries will not be executed.
|
||||||
pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
|
pub fn exec_cache_promotions<Ctxt: QueryContext<DepKind = K>>(&self, qcx: Ctxt) {
|
||||||
|
let tcx = qcx.dep_context();
|
||||||
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
|
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
|
||||||
|
|
||||||
let data = self.data.as_ref().unwrap();
|
let data = self.data.as_ref().unwrap();
|
||||||
@ -882,7 +885,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||||||
match data.colors.get(prev_index) {
|
match data.colors.get(prev_index) {
|
||||||
Some(DepNodeColor::Green(_)) => {
|
Some(DepNodeColor::Green(_)) => {
|
||||||
let dep_node = data.previous.index_to_node(prev_index);
|
let dep_node = data.previous.index_to_node(prev_index);
|
||||||
tcx.try_load_from_on_disk_cache(&dep_node);
|
qcx.try_load_from_on_disk_cache(&dep_node);
|
||||||
}
|
}
|
||||||
None | Some(DepNodeColor::Red) => {
|
None | Some(DepNodeColor::Red) => {
|
||||||
// We can skip red nodes because a node can only be marked
|
// We can skip red nodes because a node can only be marked
|
||||||
|
@ -13,8 +13,6 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
|
|||||||
|
|
||||||
use rustc_data_structures::profiling::SelfProfilerRef;
|
use rustc_data_structures::profiling::SelfProfilerRef;
|
||||||
use rustc_data_structures::sync::Lock;
|
use rustc_data_structures::sync::Lock;
|
||||||
use rustc_data_structures::thin_vec::ThinVec;
|
|
||||||
use rustc_errors::Diagnostic;
|
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::hash::Hash;
|
use std::hash::Hash;
|
||||||
@ -29,37 +27,36 @@ pub trait DepContext: Copy {
|
|||||||
fn debug_dep_tasks(&self) -> bool;
|
fn debug_dep_tasks(&self) -> bool;
|
||||||
fn debug_dep_node(&self) -> bool;
|
fn debug_dep_node(&self) -> bool;
|
||||||
|
|
||||||
/// Try to force a dep node to execute and see if it's green.
|
/// Access the DepGraph.
|
||||||
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
|
fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
|
||||||
|
|
||||||
fn register_reused_dep_node(&self, dep_node: &DepNode<Self::DepKind>);
|
fn register_reused_dep_node(&self, dep_node: &DepNode<Self::DepKind>);
|
||||||
|
|
||||||
/// Return whether the current session is tainted by errors.
|
|
||||||
fn has_errors_or_delayed_span_bugs(&self) -> bool;
|
|
||||||
|
|
||||||
/// Return the diagnostic handler.
|
|
||||||
fn diagnostic(&self) -> &rustc_errors::Handler;
|
|
||||||
|
|
||||||
/// Load data from the on-disk cache.
|
|
||||||
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);
|
|
||||||
|
|
||||||
/// Load diagnostics associated to the node in the previous session.
|
|
||||||
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic>;
|
|
||||||
|
|
||||||
/// Register diagnostics for the given node, for use in next session.
|
|
||||||
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);
|
|
||||||
|
|
||||||
/// Register diagnostics for the given node, for use in next session.
|
|
||||||
fn store_diagnostics_for_anon_node(
|
|
||||||
&self,
|
|
||||||
dep_node_index: DepNodeIndex,
|
|
||||||
diagnostics: ThinVec<Diagnostic>,
|
|
||||||
);
|
|
||||||
|
|
||||||
/// Access the profiler.
|
/// Access the profiler.
|
||||||
fn profiler(&self) -> &SelfProfilerRef;
|
fn profiler(&self) -> &SelfProfilerRef;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait HasDepContext: Copy {
|
||||||
|
type DepKind: self::DepKind;
|
||||||
|
type StableHashingContext;
|
||||||
|
type DepContext: self::DepContext<
|
||||||
|
DepKind = Self::DepKind,
|
||||||
|
StableHashingContext = Self::StableHashingContext,
|
||||||
|
>;
|
||||||
|
|
||||||
|
fn dep_context(&self) -> &Self::DepContext;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: DepContext> HasDepContext for T {
|
||||||
|
type DepKind = T::DepKind;
|
||||||
|
type StableHashingContext = T::StableHashingContext;
|
||||||
|
type DepContext = Self;
|
||||||
|
|
||||||
|
fn dep_context(&self) -> &Self::DepContext {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Describe the different families of dependency nodes.
|
/// Describe the different families of dependency nodes.
|
||||||
pub trait DepKind: Copy + fmt::Debug + Eq + Hash {
|
pub trait DepKind: Copy + fmt::Debug + Eq + Hash {
|
||||||
const NULL: Self;
|
const NULL: Self;
|
||||||
|
@ -33,9 +33,9 @@ pub(crate) struct QueryVtable<CTX: QueryContext, K, V> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<CTX: QueryContext, K, V> QueryVtable<CTX, K, V> {
|
impl<CTX: QueryContext, K, V> QueryVtable<CTX, K, V> {
|
||||||
pub(crate) fn to_dep_node(&self, tcx: CTX, key: &K) -> DepNode<CTX::DepKind>
|
pub(crate) fn to_dep_node(&self, tcx: CTX::DepContext, key: &K) -> DepNode<CTX::DepKind>
|
||||||
where
|
where
|
||||||
K: crate::dep_graph::DepNodeParams<CTX>,
|
K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||||
{
|
{
|
||||||
DepNode::construct(tcx, self.dep_kind, key)
|
DepNode::construct(tcx, self.dep_kind, key)
|
||||||
}
|
}
|
||||||
@ -80,13 +80,6 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig {
|
|||||||
where
|
where
|
||||||
CTX: 'a;
|
CTX: 'a;
|
||||||
|
|
||||||
fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode<CTX::DepKind>
|
|
||||||
where
|
|
||||||
Self::Key: crate::dep_graph::DepNodeParams<CTX>,
|
|
||||||
{
|
|
||||||
DepNode::construct(tcx, Self::DEP_KIND, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't use this method to compute query results, instead use the methods on TyCtxt
|
// Don't use this method to compute query results, instead use the methods on TyCtxt
|
||||||
fn compute(tcx: CTX, key: Self::Key) -> Self::Value;
|
fn compute(tcx: CTX, key: Self::Key) -> Self::Value;
|
||||||
|
|
||||||
|
@ -10,7 +10,8 @@ use std::num::NonZeroU32;
|
|||||||
|
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
use {
|
use {
|
||||||
super::QueryContext,
|
crate::dep_graph::DepContext,
|
||||||
|
crate::query::QueryContext,
|
||||||
parking_lot::{Condvar, Mutex},
|
parking_lot::{Condvar, Mutex},
|
||||||
rustc_data_structures::fx::FxHashSet,
|
rustc_data_structures::fx::FxHashSet,
|
||||||
rustc_data_structures::stable_hasher::{HashStable, StableHasher},
|
rustc_data_structures::stable_hasher::{HashStable, StableHasher},
|
||||||
@ -432,7 +433,7 @@ where
|
|||||||
{
|
{
|
||||||
// Deterministically pick an entry point
|
// Deterministically pick an entry point
|
||||||
// FIXME: Sort this instead
|
// FIXME: Sort this instead
|
||||||
let mut hcx = tcx.create_stable_hashing_context();
|
let mut hcx = tcx.dep_context().create_stable_hashing_context();
|
||||||
queries
|
queries
|
||||||
.iter()
|
.iter()
|
||||||
.min_by_key(|v| {
|
.min_by_key(|v| {
|
||||||
|
@ -14,7 +14,7 @@ pub use self::caches::{
|
|||||||
mod config;
|
mod config;
|
||||||
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
|
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
|
||||||
|
|
||||||
use crate::dep_graph::{DepContext, DepGraph};
|
use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
|
||||||
use crate::query::job::QueryMap;
|
use crate::query::job::QueryMap;
|
||||||
|
|
||||||
use rustc_data_structures::stable_hasher::HashStable;
|
use rustc_data_structures::stable_hasher::HashStable;
|
||||||
@ -23,7 +23,7 @@ use rustc_data_structures::thin_vec::ThinVec;
|
|||||||
use rustc_errors::Diagnostic;
|
use rustc_errors::Diagnostic;
|
||||||
use rustc_span::def_id::DefId;
|
use rustc_span::def_id::DefId;
|
||||||
|
|
||||||
pub trait QueryContext: DepContext {
|
pub trait QueryContext: HasDepContext {
|
||||||
type Query: Clone + HashStable<Self::StableHashingContext>;
|
type Query: Clone + HashStable<Self::StableHashingContext>;
|
||||||
|
|
||||||
fn incremental_verify_ich(&self) -> bool;
|
fn incremental_verify_ich(&self) -> bool;
|
||||||
@ -32,14 +32,36 @@ pub trait QueryContext: DepContext {
|
|||||||
/// Get string representation from DefPath.
|
/// Get string representation from DefPath.
|
||||||
fn def_path_str(&self, def_id: DefId) -> String;
|
fn def_path_str(&self, def_id: DefId) -> String;
|
||||||
|
|
||||||
/// Access the DepGraph.
|
|
||||||
fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
|
|
||||||
|
|
||||||
/// Get the query information from the TLS context.
|
/// Get the query information from the TLS context.
|
||||||
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>;
|
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>;
|
||||||
|
|
||||||
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind, Self::Query>>;
|
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind, Self::Query>>;
|
||||||
|
|
||||||
|
/// Load data from the on-disk cache.
|
||||||
|
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);
|
||||||
|
|
||||||
|
/// Try to force a dep node to execute and see if it's green.
|
||||||
|
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
|
||||||
|
|
||||||
|
/// Return whether the current session is tainted by errors.
|
||||||
|
fn has_errors_or_delayed_span_bugs(&self) -> bool;
|
||||||
|
|
||||||
|
/// Return the diagnostic handler.
|
||||||
|
fn diagnostic(&self) -> &rustc_errors::Handler;
|
||||||
|
|
||||||
|
/// Load diagnostics associated to the node in the previous session.
|
||||||
|
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic>;
|
||||||
|
|
||||||
|
/// Register diagnostics for the given node, for use in next session.
|
||||||
|
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);
|
||||||
|
|
||||||
|
/// Register diagnostics for the given node, for use in next session.
|
||||||
|
fn store_diagnostics_for_anon_node(
|
||||||
|
&self,
|
||||||
|
dep_node_index: DepNodeIndex,
|
||||||
|
diagnostics: ThinVec<Diagnostic>,
|
||||||
|
);
|
||||||
|
|
||||||
/// Executes a job by changing the `ImplicitCtxt` to point to the
|
/// Executes a job by changing the `ImplicitCtxt` to point to the
|
||||||
/// new query job while it executes. It returns the diagnostics
|
/// new query job while it executes. It returns the diagnostics
|
||||||
/// captured during execution and the actual result.
|
/// captured during execution and the actual result.
|
||||||
@ -47,6 +69,6 @@ pub trait QueryContext: DepContext {
|
|||||||
&self,
|
&self,
|
||||||
token: QueryJobId<Self::DepKind>,
|
token: QueryJobId<Self::DepKind>,
|
||||||
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
||||||
compute: impl FnOnce(Self) -> R,
|
compute: impl FnOnce() -> R,
|
||||||
) -> R;
|
) -> R;
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
//! generate the actual methods on tcx which find and execute the provider,
|
//! generate the actual methods on tcx which find and execute the provider,
|
||||||
//! manage the caches, and so forth.
|
//! manage the caches, and so forth.
|
||||||
|
|
||||||
use crate::dep_graph::{DepKind, DepNode};
|
use crate::dep_graph::{DepContext, DepKind, DepNode};
|
||||||
use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
|
use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
|
||||||
use crate::query::caches::QueryCache;
|
use crate::query::caches::QueryCache;
|
||||||
use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
|
use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
|
||||||
@ -204,7 +204,7 @@ where
|
|||||||
// in another thread has completed. Record how long we wait in the
|
// in another thread has completed. Record how long we wait in the
|
||||||
// self-profiler.
|
// self-profiler.
|
||||||
let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
|
let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
|
||||||
Some(tcx.profiler().query_blocked())
|
Some(tcx.dep_context().profiler().query_blocked())
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@ -266,8 +266,8 @@ where
|
|||||||
let cached = cache
|
let cached = cache
|
||||||
.cache
|
.cache
|
||||||
.lookup(cache, &key, |value, index| {
|
.lookup(cache, &key, |value, index| {
|
||||||
if unlikely!(tcx.profiler().enabled()) {
|
if unlikely!(tcx.dep_context().profiler().enabled()) {
|
||||||
tcx.profiler().query_cache_hit(index.into());
|
tcx.dep_context().profiler().query_cache_hit(index.into());
|
||||||
}
|
}
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@ -395,7 +395,7 @@ pub fn try_get_cached<'a, CTX, C, R, OnHit>(
|
|||||||
) -> Result<R, QueryLookup>
|
) -> Result<R, QueryLookup>
|
||||||
where
|
where
|
||||||
C: QueryCache,
|
C: QueryCache,
|
||||||
CTX: QueryContext,
|
CTX: DepContext,
|
||||||
OnHit: FnOnce(&C::Stored) -> R,
|
OnHit: FnOnce(&C::Stored) -> R,
|
||||||
{
|
{
|
||||||
cache.cache.lookup(cache, &key, |value, index| {
|
cache.cache.lookup(cache, &key, |value, index| {
|
||||||
@ -422,7 +422,7 @@ fn try_execute_query<CTX, C>(
|
|||||||
) -> C::Stored
|
) -> C::Stored
|
||||||
where
|
where
|
||||||
C: QueryCache,
|
C: QueryCache,
|
||||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
|
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
|
||||||
@ -432,30 +432,32 @@ where
|
|||||||
TryGetJob::Cycle(result) => return result,
|
TryGetJob::Cycle(result) => return result,
|
||||||
#[cfg(parallel_compiler)]
|
#[cfg(parallel_compiler)]
|
||||||
TryGetJob::JobCompleted((v, index)) => {
|
TryGetJob::JobCompleted((v, index)) => {
|
||||||
tcx.dep_graph().read_index(index);
|
tcx.dep_context().dep_graph().read_index(index);
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Fast path for when incr. comp. is off. `to_dep_node` is
|
// Fast path for when incr. comp. is off. `to_dep_node` is
|
||||||
// expensive for some `DepKind`s.
|
// expensive for some `DepKind`s.
|
||||||
if !tcx.dep_graph().is_fully_enabled() {
|
if !tcx.dep_context().dep_graph().is_fully_enabled() {
|
||||||
let null_dep_node = DepNode::new_no_params(DepKind::NULL);
|
let null_dep_node = DepNode::new_no_params(DepKind::NULL);
|
||||||
return force_query_with_job(tcx, key, job, null_dep_node, query).0;
|
return force_query_with_job(tcx, key, job, null_dep_node, query).0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if query.anon {
|
if query.anon {
|
||||||
let prof_timer = tcx.profiler().query_provider();
|
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||||
|
|
||||||
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
|
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
|
||||||
tcx.start_query(job.id, diagnostics, |tcx| {
|
tcx.start_query(job.id, diagnostics, || {
|
||||||
tcx.dep_graph().with_anon_task(query.dep_kind, || query.compute(tcx, key))
|
tcx.dep_context()
|
||||||
|
.dep_graph()
|
||||||
|
.with_anon_task(query.dep_kind, || query.compute(tcx, key))
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||||
|
|
||||||
tcx.dep_graph().read_index(dep_node_index);
|
tcx.dep_context().dep_graph().read_index(dep_node_index);
|
||||||
|
|
||||||
if unlikely!(!diagnostics.is_empty()) {
|
if unlikely!(!diagnostics.is_empty()) {
|
||||||
tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
|
tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
|
||||||
@ -464,14 +466,14 @@ where
|
|||||||
return job.complete(result, dep_node_index);
|
return job.complete(result, dep_node_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
let dep_node = query.to_dep_node(tcx, &key);
|
let dep_node = query.to_dep_node(*tcx.dep_context(), &key);
|
||||||
|
|
||||||
if !query.eval_always {
|
if !query.eval_always {
|
||||||
// The diagnostics for this query will be
|
// The diagnostics for this query will be
|
||||||
// promoted to the current session during
|
// promoted to the current session during
|
||||||
// `try_mark_green()`, so we can ignore them here.
|
// `try_mark_green()`, so we can ignore them here.
|
||||||
let loaded = tcx.start_query(job.id, None, |tcx| {
|
let loaded = tcx.start_query(job.id, None, || {
|
||||||
let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node);
|
let marked = tcx.dep_context().dep_graph().try_mark_green_and_read(tcx, &dep_node);
|
||||||
marked.map(|(prev_dep_node_index, dep_node_index)| {
|
marked.map(|(prev_dep_node_index, dep_node_index)| {
|
||||||
(
|
(
|
||||||
load_from_disk_and_cache_in_memory(
|
load_from_disk_and_cache_in_memory(
|
||||||
@ -492,7 +494,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
let (result, dep_node_index) = force_query_with_job(tcx, key, job, dep_node, query);
|
let (result, dep_node_index) = force_query_with_job(tcx, key, job, dep_node, query);
|
||||||
tcx.dep_graph().read_index(dep_node_index);
|
tcx.dep_context().dep_graph().read_index(dep_node_index);
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -510,11 +512,11 @@ where
|
|||||||
// Note this function can be called concurrently from the same query
|
// Note this function can be called concurrently from the same query
|
||||||
// We must ensure that this is handled correctly.
|
// We must ensure that this is handled correctly.
|
||||||
|
|
||||||
debug_assert!(tcx.dep_graph().is_green(dep_node));
|
debug_assert!(tcx.dep_context().dep_graph().is_green(dep_node));
|
||||||
|
|
||||||
// First we try to load the result from the on-disk cache.
|
// First we try to load the result from the on-disk cache.
|
||||||
let result = if query.cache_on_disk(tcx, &key, None) {
|
let result = if query.cache_on_disk(tcx, &key, None) {
|
||||||
let prof_timer = tcx.profiler().incr_cache_loading();
|
let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
|
||||||
let result = query.try_load_from_disk(tcx, prev_dep_node_index);
|
let result = query.try_load_from_disk(tcx, prev_dep_node_index);
|
||||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||||
|
|
||||||
@ -536,10 +538,10 @@ where
|
|||||||
} else {
|
} else {
|
||||||
// We could not load a result from the on-disk cache, so
|
// We could not load a result from the on-disk cache, so
|
||||||
// recompute.
|
// recompute.
|
||||||
let prof_timer = tcx.profiler().query_provider();
|
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||||
|
|
||||||
// The dep-graph for this computation is already in-place.
|
// The dep-graph for this computation is already in-place.
|
||||||
let result = tcx.dep_graph().with_ignore(|| query.compute(tcx, key));
|
let result = tcx.dep_context().dep_graph().with_ignore(|| query.compute(tcx, key));
|
||||||
|
|
||||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||||
|
|
||||||
@ -549,7 +551,7 @@ where
|
|||||||
// If `-Zincremental-verify-ich` is specified, re-hash results from
|
// If `-Zincremental-verify-ich` is specified, re-hash results from
|
||||||
// the cache and make sure that they have the expected fingerprint.
|
// the cache and make sure that they have the expected fingerprint.
|
||||||
if unlikely!(tcx.incremental_verify_ich()) {
|
if unlikely!(tcx.incremental_verify_ich()) {
|
||||||
incremental_verify_ich(tcx, &result, dep_node, dep_node_index, query);
|
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query);
|
||||||
}
|
}
|
||||||
|
|
||||||
result
|
result
|
||||||
@ -558,7 +560,7 @@ where
|
|||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
#[cold]
|
#[cold]
|
||||||
fn incremental_verify_ich<CTX, K, V: Debug>(
|
fn incremental_verify_ich<CTX, K, V: Debug>(
|
||||||
tcx: CTX,
|
tcx: CTX::DepContext,
|
||||||
result: &V,
|
result: &V,
|
||||||
dep_node: &DepNode<CTX::DepKind>,
|
dep_node: &DepNode<CTX::DepKind>,
|
||||||
dep_node_index: DepNodeIndex,
|
dep_node_index: DepNodeIndex,
|
||||||
@ -601,7 +603,7 @@ where
|
|||||||
// 2. Two distinct query keys get mapped to the same `DepNode`
|
// 2. Two distinct query keys get mapped to the same `DepNode`
|
||||||
// (see for example #48923).
|
// (see for example #48923).
|
||||||
assert!(
|
assert!(
|
||||||
!tcx.dep_graph().dep_node_exists(&dep_node),
|
!tcx.dep_context().dep_graph().dep_node_exists(&dep_node),
|
||||||
"forcing query with already existing `DepNode`\n\
|
"forcing query with already existing `DepNode`\n\
|
||||||
- query-key: {:?}\n\
|
- query-key: {:?}\n\
|
||||||
- dep-node: {:?}",
|
- dep-node: {:?}",
|
||||||
@ -609,12 +611,12 @@ where
|
|||||||
dep_node
|
dep_node
|
||||||
);
|
);
|
||||||
|
|
||||||
let prof_timer = tcx.profiler().query_provider();
|
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||||
|
|
||||||
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
|
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
|
||||||
tcx.start_query(job.id, diagnostics, |tcx| {
|
tcx.start_query(job.id, diagnostics, || {
|
||||||
if query.eval_always {
|
if query.eval_always {
|
||||||
tcx.dep_graph().with_eval_always_task(
|
tcx.dep_context().dep_graph().with_eval_always_task(
|
||||||
dep_node,
|
dep_node,
|
||||||
tcx,
|
tcx,
|
||||||
key,
|
key,
|
||||||
@ -622,7 +624,13 @@ where
|
|||||||
query.hash_result,
|
query.hash_result,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
tcx.dep_graph().with_task(dep_node, tcx, key, query.compute, query.hash_result)
|
tcx.dep_context().dep_graph().with_task(
|
||||||
|
dep_node,
|
||||||
|
tcx,
|
||||||
|
key,
|
||||||
|
query.compute,
|
||||||
|
query.hash_result,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
@ -651,7 +659,7 @@ fn get_query_impl<CTX, C>(
|
|||||||
where
|
where
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
C: QueryCache,
|
C: QueryCache,
|
||||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||||
{
|
{
|
||||||
try_execute_query(tcx, state, cache, span, key, lookup, query)
|
try_execute_query(tcx, state, cache, span, key, lookup, query)
|
||||||
}
|
}
|
||||||
@ -667,7 +675,7 @@ where
|
|||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
fn ensure_must_run<CTX, K, V>(tcx: CTX, key: &K, query: &QueryVtable<CTX, K, V>) -> bool
|
fn ensure_must_run<CTX, K, V>(tcx: CTX, key: &K, query: &QueryVtable<CTX, K, V>) -> bool
|
||||||
where
|
where
|
||||||
K: crate::dep_graph::DepNodeParams<CTX>,
|
K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
if query.eval_always {
|
if query.eval_always {
|
||||||
@ -677,9 +685,9 @@ where
|
|||||||
// Ensuring an anonymous query makes no sense
|
// Ensuring an anonymous query makes no sense
|
||||||
assert!(!query.anon);
|
assert!(!query.anon);
|
||||||
|
|
||||||
let dep_node = query.to_dep_node(tcx, key);
|
let dep_node = query.to_dep_node(*tcx.dep_context(), key);
|
||||||
|
|
||||||
match tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node) {
|
match tcx.dep_context().dep_graph().try_mark_green_and_read(tcx, &dep_node) {
|
||||||
None => {
|
None => {
|
||||||
// A None return from `try_mark_green_and_read` means that this is either
|
// A None return from `try_mark_green_and_read` means that this is either
|
||||||
// a new dep node or that the dep node has already been marked red.
|
// a new dep node or that the dep node has already been marked red.
|
||||||
@ -690,7 +698,7 @@ where
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
Some((_, dep_node_index)) => {
|
Some((_, dep_node_index)) => {
|
||||||
tcx.profiler().query_cache_hit(dep_node_index.into());
|
tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -707,14 +715,14 @@ fn force_query_impl<CTX, C>(
|
|||||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||||
) where
|
) where
|
||||||
C: QueryCache,
|
C: QueryCache,
|
||||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
// We may be concurrently trying both execute and force a query.
|
// We may be concurrently trying both execute and force a query.
|
||||||
// Ensure that only one of them runs the query.
|
// Ensure that only one of them runs the query.
|
||||||
let cached = cache.cache.lookup(cache, &key, |_, index| {
|
let cached = cache.cache.lookup(cache, &key, |_, index| {
|
||||||
if unlikely!(tcx.profiler().enabled()) {
|
if unlikely!(tcx.dep_context().profiler().enabled()) {
|
||||||
tcx.profiler().query_cache_hit(index.into());
|
tcx.dep_context().profiler().query_cache_hit(index.into());
|
||||||
}
|
}
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@ -752,7 +760,7 @@ pub fn get_query<Q, CTX>(
|
|||||||
) -> Option<Q::Stored>
|
) -> Option<Q::Stored>
|
||||||
where
|
where
|
||||||
Q: QueryDescription<CTX>,
|
Q: QueryDescription<CTX>,
|
||||||
Q::Key: crate::dep_graph::DepNodeParams<CTX>,
|
Q::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
let query = &Q::VTABLE;
|
let query = &Q::VTABLE;
|
||||||
@ -771,7 +779,7 @@ where
|
|||||||
pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, span: Span, dep_node: DepNode<CTX::DepKind>)
|
pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, span: Span, dep_node: DepNode<CTX::DepKind>)
|
||||||
where
|
where
|
||||||
Q: QueryDescription<CTX>,
|
Q: QueryDescription<CTX>,
|
||||||
Q::Key: crate::dep_graph::DepNodeParams<CTX>,
|
Q::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||||
CTX: QueryContext,
|
CTX: QueryContext,
|
||||||
{
|
{
|
||||||
force_query_impl(tcx, Q::query_state(tcx), Q::query_cache(tcx), key, span, dep_node, &Q::VTABLE)
|
force_query_impl(tcx, Q::query_state(tcx), Q::query_cache(tcx), key, span, dep_node, &Q::VTABLE)
|
||||||
|
@ -11,10 +11,8 @@
|
|||||||
extern crate rustc_driver;
|
extern crate rustc_driver;
|
||||||
extern crate rustc_errors;
|
extern crate rustc_errors;
|
||||||
extern crate rustc_interface;
|
extern crate rustc_interface;
|
||||||
extern crate rustc_middle;
|
|
||||||
|
|
||||||
use rustc_interface::interface;
|
use rustc_interface::interface;
|
||||||
use rustc_middle::ty::TyCtxt;
|
|
||||||
use rustc_tools_util::VersionInfo;
|
use rustc_tools_util::VersionInfo;
|
||||||
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
@ -168,7 +166,7 @@ fn report_clippy_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str) {
|
|||||||
|
|
||||||
let num_frames = if backtrace { None } else { Some(2) };
|
let num_frames = if backtrace { None } else { Some(2) };
|
||||||
|
|
||||||
TyCtxt::try_print_query_stack(&handler, num_frames);
|
interface::try_print_query_stack(&handler, num_frames);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn toolchain_path(home: Option<String>, toolchain: Option<String>) -> Option<PathBuf> {
|
fn toolchain_path(home: Option<String>, toolchain: Option<String>) -> Option<PathBuf> {
|
||||||
|
Loading…
Reference in New Issue
Block a user