mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 14:55:26 +00:00
Auto merge of #108375 - Zoxc:query-inline, r=cjgillot
Add inlining attributes for query system functions These only have a single caller, but don't always get inlined.
This commit is contained in:
commit
43ee4d15bf
@ -1012,6 +1012,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
|
||||
/// Note that this is *untracked* and should only be used within the query
|
||||
/// system if the result is otherwise tracked through queries
|
||||
#[inline]
|
||||
pub fn cstore_untracked(self) -> MappedReadGuard<'tcx, CrateStoreDyn> {
|
||||
ReadGuard::map(self.untracked.cstore.read(), |c| &**c)
|
||||
}
|
||||
|
@ -124,9 +124,7 @@ impl QueryContext for QueryCtxt<'_> {
|
||||
};
|
||||
|
||||
// Use the `ImplicitCtxt` while we execute the query.
|
||||
tls::enter_context(&new_icx, || {
|
||||
rustc_data_structures::stack::ensure_sufficient_stack(compute)
|
||||
})
|
||||
tls::enter_context(&new_icx, compute)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -279,6 +279,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
/// `arg` parameter.
|
||||
///
|
||||
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
|
||||
#[inline(always)]
|
||||
pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
|
||||
&self,
|
||||
key: DepNode<K>,
|
||||
@ -298,6 +299,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
|
||||
&self,
|
||||
key: DepNode<K>,
|
||||
@ -598,6 +600,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
|
||||
self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
|
||||
}
|
||||
@ -1127,6 +1130,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
|
||||
|
||||
/// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
|
||||
/// Assumes that this is a node that has no equivalent in the previous dep-graph.
|
||||
#[inline(always)]
|
||||
fn intern_new_node(
|
||||
&self,
|
||||
profiler: &SelfProfilerRef,
|
||||
@ -1365,6 +1369,7 @@ impl DepNodeColorMap {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
|
||||
self.values[index].store(
|
||||
match color {
|
||||
|
@ -15,6 +15,7 @@ use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::profiling::TimingGuard;
|
||||
#[cfg(parallel_compiler)]
|
||||
use rustc_data_structures::sharded::Sharded;
|
||||
use rustc_data_structures::stack::ensure_sufficient_stack;
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
|
||||
use rustc_session::Session;
|
||||
@ -188,12 +189,12 @@ where
|
||||
#[cfg(not(parallel_compiler))]
|
||||
let mut state_lock = state.active.lock();
|
||||
let lock = &mut *state_lock;
|
||||
let current_job_id = qcx.current_query_job();
|
||||
|
||||
match lock.entry(key) {
|
||||
Entry::Vacant(entry) => {
|
||||
let id = qcx.next_job_id();
|
||||
let job = qcx.current_query_job();
|
||||
let job = QueryJob::new(id, span, job);
|
||||
let job = QueryJob::new(id, span, current_job_id);
|
||||
|
||||
let key = *entry.key();
|
||||
entry.insert(QueryResult::Started(job));
|
||||
@ -212,7 +213,7 @@ where
|
||||
// so we just return the error.
|
||||
return TryGetJob::Cycle(id.find_cycle_in_stack(
|
||||
qcx.try_collect_active_jobs().unwrap(),
|
||||
&qcx.current_query_job(),
|
||||
¤t_job_id,
|
||||
span,
|
||||
));
|
||||
}
|
||||
@ -230,7 +231,7 @@ where
|
||||
|
||||
// With parallel queries we might just have to wait on some other
|
||||
// thread.
|
||||
let result = latch.wait_on(qcx.current_query_job(), span);
|
||||
let result = latch.wait_on(current_job_id, span);
|
||||
|
||||
match result {
|
||||
Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer),
|
||||
@ -346,10 +347,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn try_execute_query<Q, Qcx>(
|
||||
qcx: Qcx,
|
||||
state: &QueryState<Q::Key, Qcx::DepKind>,
|
||||
cache: &Q::Cache,
|
||||
span: Span,
|
||||
key: Q::Key,
|
||||
dep_node: Option<DepNode<Qcx::DepKind>>,
|
||||
@ -358,9 +358,11 @@ where
|
||||
Q: QueryConfig<Qcx>,
|
||||
Qcx: QueryContext,
|
||||
{
|
||||
let state = Q::query_state(qcx);
|
||||
match JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, span, key) {
|
||||
TryGetJob::NotYetStarted(job) => {
|
||||
let (result, dep_node_index) = execute_job::<Q, Qcx>(qcx, key, dep_node, job.id);
|
||||
let cache = Q::query_cache(qcx);
|
||||
if Q::FEEDABLE {
|
||||
// We should not compute queries that also got a value via feeding.
|
||||
// This can't happen, as query feeding adds the very dependencies to the fed query
|
||||
@ -381,7 +383,7 @@ where
|
||||
}
|
||||
#[cfg(parallel_compiler)]
|
||||
TryGetJob::JobCompleted(query_blocked_prof_timer) => {
|
||||
let Some((v, index)) = cache.lookup(&key) else {
|
||||
let Some((v, index)) = Q::query_cache(qcx).lookup(&key) else {
|
||||
panic!("value must be in cache after waiting")
|
||||
};
|
||||
|
||||
@ -393,6 +395,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn execute_job<Q, Qcx>(
|
||||
qcx: Qcx,
|
||||
key: Q::Key,
|
||||
@ -478,6 +481,7 @@ where
|
||||
(result, dep_node_index)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
|
||||
qcx: Qcx,
|
||||
key: &Q::Key,
|
||||
@ -568,6 +572,7 @@ where
|
||||
Some((result, dep_node_index))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[instrument(skip(tcx, result, hash_result), level = "debug")]
|
||||
pub(crate) fn incremental_verify_ich<Tcx, V: Debug>(
|
||||
tcx: Tcx,
|
||||
@ -722,6 +727,7 @@ pub enum QueryMode {
|
||||
Ensure,
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn get_query<Q, Qcx, D>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Value>
|
||||
where
|
||||
D: DepKind,
|
||||
@ -739,14 +745,8 @@ where
|
||||
None
|
||||
};
|
||||
|
||||
let (result, dep_node_index) = try_execute_query::<Q, Qcx>(
|
||||
qcx,
|
||||
Q::query_state(qcx),
|
||||
Q::query_cache(qcx),
|
||||
span,
|
||||
key,
|
||||
dep_node,
|
||||
);
|
||||
let (result, dep_node_index) =
|
||||
ensure_sufficient_stack(|| try_execute_query::<Q, Qcx>(qcx, span, key, dep_node));
|
||||
if let Some(dep_node_index) = dep_node_index {
|
||||
qcx.dep_context().dep_graph().read_index(dep_node_index)
|
||||
}
|
||||
@ -762,14 +762,12 @@ where
|
||||
{
|
||||
// We may be concurrently trying both execute and force a query.
|
||||
// Ensure that only one of them runs the query.
|
||||
let cache = Q::query_cache(qcx);
|
||||
if let Some((_, index)) = cache.lookup(&key) {
|
||||
if let Some((_, index)) = Q::query_cache(qcx).lookup(&key) {
|
||||
qcx.dep_context().profiler().query_cache_hit(index.into());
|
||||
return;
|
||||
}
|
||||
|
||||
let state = Q::query_state(qcx);
|
||||
debug_assert!(!Q::ANON);
|
||||
|
||||
try_execute_query::<Q, _>(qcx, state, cache, DUMMY_SP, key, Some(dep_node));
|
||||
ensure_sufficient_stack(|| try_execute_query::<Q, _>(qcx, DUMMY_SP, key, Some(dep_node)));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user