mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 06:44:35 +00:00
Some cleanup
This commit is contained in:
parent
49560e9c49
commit
ab168e69ac
@ -83,17 +83,12 @@ impl QueryContext for QueryCtxt<'_> {
|
||||
&self,
|
||||
token: QueryJobId<Self::DepKind>,
|
||||
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
||||
read_allowed: bool,
|
||||
compute: impl FnOnce() -> R,
|
||||
) -> R {
|
||||
// The `TyCtxt` stored in TLS has the same global interner lifetime
|
||||
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
|
||||
// when accessing the `ImplicitCtxt`.
|
||||
tls::with_related_context(**self, move |current_icx| {
|
||||
let mut old_read_allowed = false;
|
||||
if let Some(task_deps) = current_icx.task_deps {
|
||||
old_read_allowed = std::mem::replace(&mut task_deps.lock().read_allowed, read_allowed);
|
||||
}
|
||||
// Update the `ImplicitCtxt` to point to our new query job.
|
||||
let new_icx = ImplicitCtxt {
|
||||
tcx: **self,
|
||||
@ -104,14 +99,9 @@ impl QueryContext for QueryCtxt<'_> {
|
||||
};
|
||||
|
||||
// Use the `ImplicitCtxt` while we execute the query.
|
||||
let res = tls::enter_context(&new_icx, |_| {
|
||||
tls::enter_context(&new_icx, |_| {
|
||||
rustc_data_structures::stack::ensure_sufficient_stack(compute)
|
||||
});
|
||||
|
||||
if let Some(task_deps) = new_icx.task_deps {
|
||||
task_deps.lock().read_allowed = old_read_allowed;
|
||||
}
|
||||
res
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -142,7 +142,6 @@ pub trait QueryContext: HasDepContext {
|
||||
&self,
|
||||
token: QueryJobId<Self::DepKind>,
|
||||
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
||||
read_allowed: bool,
|
||||
compute: impl FnOnce() -> R,
|
||||
) -> R;
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
//! generate the actual methods on tcx which find and execute the provider,
|
||||
//! manage the caches, and so forth.
|
||||
|
||||
use crate::dep_graph::DepKind;
|
||||
use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams, TaskDeps};
|
||||
use crate::query::caches::QueryCache;
|
||||
use crate::query::config::{QueryDescription, QueryVtable};
|
||||
@ -9,7 +10,6 @@ use crate::query::job::{
|
||||
report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId,
|
||||
};
|
||||
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
|
||||
use crate::dep_graph::DepKind;
|
||||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHasher};
|
||||
#[cfg(parallel_compiler)]
|
||||
@ -440,7 +440,7 @@ where
|
||||
// Fast path for when incr. comp. is off.
|
||||
if !dep_graph.is_fully_enabled() {
|
||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||
let result = tcx.start_query(job_id, None, true, || query.compute(*tcx.dep_context(), key));
|
||||
let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key));
|
||||
let dep_node_index = dep_graph.next_virtual_depnode_index();
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
return (result, dep_node_index);
|
||||
@ -453,7 +453,7 @@ where
|
||||
|
||||
// The diagnostics for this query will be promoted to the current session during
|
||||
// `try_mark_green()`, so we can ignore them here.
|
||||
if let Some(ret) = tcx.start_query(job_id, None, false, || {
|
||||
if let Some(ret) = tcx.start_query(job_id, None, || {
|
||||
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
|
||||
}) {
|
||||
return ret;
|
||||
@ -463,7 +463,7 @@ where
|
||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||
let diagnostics = Lock::new(ThinVec::new());
|
||||
|
||||
let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), true, || {
|
||||
let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
|
||||
if query.anon {
|
||||
return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
|
||||
query.compute(*tcx.dep_context(), key)
|
||||
|
Loading…
Reference in New Issue
Block a user