mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-25 08:13:41 +00:00
Auto merge of #77871 - Julian-Wollersberger:less-query-context, r=oli-obk
Make fewer types generic over QueryContext While trying to refactor `rustc_query_system::query::QueryContext` to make it dyn-safe, I noticed some smaller things: * QueryConfig doesn't need to be generic over QueryContext * ~~The `kind` field on QueryJobId is unused~~ * Some unnecessary where clauses * Many types in `job.rs` where generic over `QueryContext` but only needed `QueryContext::Query`. If handle_cycle_error() could be refactored to not take `error: CycleError<CTX::Query>`, all those bounds could be removed as well. Changing `find_cycle_in_stack()` in job.rs to not take a `tcx` argument is the only functional change here. Everything else is just updating type signatures. (aka compile-error driven development ^^) ~~Currently there is a weird bug where memory usage suddenly skyrockets when running UI tests. I'll investigate that tomorrow. A perf run probably won't make sense before that is fixed.~~ EDIT: `kind` actually is used by `Eq`, and re-adding it fixed the memory issue.
This commit is contained in:
commit
500ddc5efd
@ -40,7 +40,8 @@ impl QueryContext for TyCtxt<'tcx> {
|
||||
|
||||
fn try_collect_active_jobs(
|
||||
&self,
|
||||
) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>> {
|
||||
) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self::DepKind, Self::Query>>>
|
||||
{
|
||||
self.queries.try_collect_active_jobs()
|
||||
}
|
||||
|
||||
@ -353,7 +354,7 @@ macro_rules! define_queries_inner {
|
||||
$(pub type $name<$tcx> = $V;)*
|
||||
}
|
||||
|
||||
$(impl<$tcx> QueryConfig<TyCtxt<$tcx>> for queries::$name<$tcx> {
|
||||
$(impl<$tcx> QueryConfig for queries::$name<$tcx> {
|
||||
type Key = $($K)*;
|
||||
type Value = $V;
|
||||
type Stored = <
|
||||
@ -372,7 +373,7 @@ macro_rules! define_queries_inner {
|
||||
type Cache = query_storage!([$($modifiers)*][$($K)*, $V]);
|
||||
|
||||
#[inline(always)]
|
||||
fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<TyCtxt<$tcx>, Self::Cache> {
|
||||
fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query, Self::Cache> {
|
||||
&tcx.queries.$name
|
||||
}
|
||||
|
||||
@ -454,7 +455,7 @@ macro_rules! define_queries_inner {
|
||||
#[inline(always)]
|
||||
#[must_use]
|
||||
pub fn $name(self, key: query_helper_param_ty!($($K)*))
|
||||
-> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
|
||||
-> <queries::$name<$tcx> as QueryConfig>::Stored
|
||||
{
|
||||
self.at(DUMMY_SP).$name(key.into_query_param())
|
||||
})*
|
||||
@ -493,7 +494,7 @@ macro_rules! define_queries_inner {
|
||||
$($(#[$attr])*
|
||||
#[inline(always)]
|
||||
pub fn $name(self, key: query_helper_param_ty!($($K)*))
|
||||
-> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
|
||||
-> <queries::$name<$tcx> as QueryConfig>::Stored
|
||||
{
|
||||
get_query::<queries::$name<'_>, _>(self.tcx, self.span, key.into_query_param())
|
||||
})*
|
||||
@ -527,7 +528,8 @@ macro_rules! define_queries_struct {
|
||||
fallback_extern_providers: Box<Providers>,
|
||||
|
||||
$($(#[$attr])* $name: QueryState<
|
||||
TyCtxt<$tcx>,
|
||||
crate::dep_graph::DepKind,
|
||||
<TyCtxt<$tcx> as QueryContext>::Query,
|
||||
<queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache,
|
||||
>,)*
|
||||
}
|
||||
@ -548,7 +550,7 @@ macro_rules! define_queries_struct {
|
||||
|
||||
pub(crate) fn try_collect_active_jobs(
|
||||
&self
|
||||
) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<TyCtxt<'tcx>>>> {
|
||||
) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query>>> {
|
||||
let mut jobs = FxHashMap::default();
|
||||
|
||||
$(
|
||||
|
@ -5,8 +5,7 @@ use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::profiling::SelfProfiler;
|
||||
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
|
||||
use rustc_hir::definitions::DefPathData;
|
||||
use rustc_query_system::query::QueryCache;
|
||||
use rustc_query_system::query::QueryState;
|
||||
use rustc_query_system::query::{QueryCache, QueryContext, QueryState};
|
||||
use std::fmt::Debug;
|
||||
use std::io::Write;
|
||||
|
||||
@ -231,7 +230,7 @@ where
|
||||
pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
query_name: &'static str,
|
||||
query_state: &QueryState<TyCtxt<'tcx>, C>,
|
||||
query_state: &QueryState<crate::dep_graph::DepKind, <TyCtxt<'tcx> as QueryContext>::Query, C>,
|
||||
string_cache: &mut QueryKeyStringCache,
|
||||
) where
|
||||
C: QueryCache,
|
||||
|
@ -1,11 +1,10 @@
|
||||
use crate::ty::query::queries;
|
||||
use crate::ty::TyCtxt;
|
||||
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
|
||||
use rustc_query_system::query::QueryCache;
|
||||
use rustc_query_system::query::QueryState;
|
||||
use rustc_query_system::query::{QueryAccessors, QueryContext};
|
||||
use rustc_query_system::query::{QueryAccessors, QueryCache, QueryContext, QueryState};
|
||||
|
||||
use std::any::type_name;
|
||||
use std::hash::Hash;
|
||||
use std::mem;
|
||||
#[cfg(debug_assertions)]
|
||||
use std::sync::atomic::Ordering;
|
||||
@ -38,10 +37,12 @@ struct QueryStats {
|
||||
local_def_id_keys: Option<usize>,
|
||||
}
|
||||
|
||||
fn stats<CTX: QueryContext, C: QueryCache>(
|
||||
name: &'static str,
|
||||
map: &QueryState<CTX, C>,
|
||||
) -> QueryStats {
|
||||
fn stats<D, Q, C>(name: &'static str, map: &QueryState<D, Q, C>) -> QueryStats
|
||||
where
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
Q: Clone,
|
||||
C: QueryCache,
|
||||
{
|
||||
let mut stats = QueryStats {
|
||||
name,
|
||||
#[cfg(debug_assertions)]
|
||||
@ -127,7 +128,8 @@ macro_rules! print_stats {
|
||||
|
||||
$($(
|
||||
queries.push(stats::<
|
||||
TyCtxt<'_>,
|
||||
crate::dep_graph::DepKind,
|
||||
<TyCtxt<'_> as QueryContext>::Query,
|
||||
<queries::$name<'_> as QueryAccessors<TyCtxt<'_>>>::Cache,
|
||||
>(
|
||||
stringify!($name),
|
||||
|
@ -1,12 +1,12 @@
|
||||
use crate::dep_graph::DepNodeIndex;
|
||||
use crate::query::plumbing::{QueryLookup, QueryState};
|
||||
use crate::query::QueryContext;
|
||||
|
||||
use rustc_arena::TypedArena;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::sharded::Sharded;
|
||||
use rustc_data_structures::sync::WorkerLocal;
|
||||
use std::default::Default;
|
||||
use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
@ -24,16 +24,16 @@ pub trait QueryStorage: Default {
|
||||
}
|
||||
|
||||
pub trait QueryCache: QueryStorage {
|
||||
type Key: Hash;
|
||||
type Key: Hash + Eq + Clone + Debug;
|
||||
type Sharded: Default;
|
||||
|
||||
/// Checks if the query is already computed and in the cache.
|
||||
/// It returns the shard index and a lock guard to the shard,
|
||||
/// which will be used if the query is not in the cache and we need
|
||||
/// to compute it.
|
||||
fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
|
||||
fn lookup<D, Q, R, OnHit, OnMiss>(
|
||||
&self,
|
||||
state: &QueryState<CTX, Self>,
|
||||
state: &QueryState<D, Q, Self>,
|
||||
key: Self::Key,
|
||||
// `on_hit` can be called while holding a lock to the query state shard.
|
||||
on_hit: OnHit,
|
||||
@ -41,7 +41,7 @@ pub trait QueryCache: QueryStorage {
|
||||
) -> R
|
||||
where
|
||||
OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R,
|
||||
OnMiss: FnOnce(Self::Key, QueryLookup<'_, CTX, Self::Key, Self::Sharded>) -> R;
|
||||
OnMiss: FnOnce(Self::Key, QueryLookup<'_, D, Q, Self::Key, Self::Sharded>) -> R;
|
||||
|
||||
fn complete(
|
||||
&self,
|
||||
@ -86,21 +86,25 @@ impl<K: Eq + Hash, V: Clone> QueryStorage for DefaultCache<K, V> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Eq + Hash, V: Clone> QueryCache for DefaultCache<K, V> {
|
||||
impl<K, V> QueryCache for DefaultCache<K, V>
|
||||
where
|
||||
K: Eq + Hash + Clone + Debug,
|
||||
V: Clone,
|
||||
{
|
||||
type Key = K;
|
||||
type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
|
||||
|
||||
#[inline(always)]
|
||||
fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
|
||||
fn lookup<D, Q, R, OnHit, OnMiss>(
|
||||
&self,
|
||||
state: &QueryState<CTX, Self>,
|
||||
state: &QueryState<D, Q, Self>,
|
||||
key: K,
|
||||
on_hit: OnHit,
|
||||
on_miss: OnMiss,
|
||||
) -> R
|
||||
where
|
||||
OnHit: FnOnce(&V, DepNodeIndex) -> R,
|
||||
OnMiss: FnOnce(K, QueryLookup<'_, CTX, K, Self::Sharded>) -> R,
|
||||
OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
|
||||
{
|
||||
let mut lookup = state.get_lookup(&key);
|
||||
let lock = &mut *lookup.lock;
|
||||
@ -164,21 +168,24 @@ impl<'tcx, K: Eq + Hash, V: 'tcx> QueryStorage for ArenaCache<'tcx, K, V> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, K: Eq + Hash, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V> {
|
||||
impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V>
|
||||
where
|
||||
K: Eq + Hash + Clone + Debug,
|
||||
{
|
||||
type Key = K;
|
||||
type Sharded = FxHashMap<K, &'tcx (V, DepNodeIndex)>;
|
||||
|
||||
#[inline(always)]
|
||||
fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
|
||||
fn lookup<D, Q, R, OnHit, OnMiss>(
|
||||
&self,
|
||||
state: &QueryState<CTX, Self>,
|
||||
state: &QueryState<D, Q, Self>,
|
||||
key: K,
|
||||
on_hit: OnHit,
|
||||
on_miss: OnMiss,
|
||||
) -> R
|
||||
where
|
||||
OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
|
||||
OnMiss: FnOnce(K, QueryLookup<'_, CTX, K, Self::Sharded>) -> R,
|
||||
OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
|
||||
{
|
||||
let mut lookup = state.get_lookup(&key);
|
||||
let lock = &mut *lookup.lock;
|
||||
|
@ -12,9 +12,7 @@ use std::borrow::Cow;
|
||||
use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
|
||||
// The parameter `CTX` is required in librustc_middle:
|
||||
// implementations may need to access the `'tcx` lifetime in `CTX = TyCtxt<'tcx>`.
|
||||
pub trait QueryConfig<CTX> {
|
||||
pub trait QueryConfig {
|
||||
const NAME: &'static str;
|
||||
const CATEGORY: ProfileCategory;
|
||||
|
||||
@ -70,7 +68,7 @@ impl<CTX: QueryContext, K, V> QueryVtable<CTX, K, V> {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait QueryAccessors<CTX: QueryContext>: QueryConfig<CTX> {
|
||||
pub trait QueryAccessors<CTX: QueryContext>: QueryConfig {
|
||||
const ANON: bool;
|
||||
const EVAL_ALWAYS: bool;
|
||||
const DEP_KIND: CTX::DepKind;
|
||||
@ -78,7 +76,7 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig<CTX> {
|
||||
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
|
||||
|
||||
// Don't use this method to access query results, instead use the methods on TyCtxt
|
||||
fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX, Self::Cache>;
|
||||
fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, CTX::Query, Self::Cache>;
|
||||
|
||||
fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode<CTX::DepKind>
|
||||
where
|
||||
|
@ -1,16 +1,16 @@
|
||||
use crate::dep_graph::{DepContext, DepKind};
|
||||
use crate::query::plumbing::CycleError;
|
||||
use crate::query::QueryContext;
|
||||
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_span::Span;
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::hash::Hash;
|
||||
use std::marker::PhantomData;
|
||||
use std::num::NonZeroU32;
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
use {
|
||||
super::QueryContext,
|
||||
parking_lot::{Condvar, Mutex},
|
||||
rustc_data_structures::fx::FxHashSet,
|
||||
rustc_data_structures::stable_hasher::{HashStable, StableHasher},
|
||||
@ -31,7 +31,7 @@ pub struct QueryInfo<Q> {
|
||||
pub query: Q,
|
||||
}
|
||||
|
||||
type QueryMap<CTX> = FxHashMap<QueryJobId<<CTX as DepContext>::DepKind>, QueryJobInfo<CTX>>;
|
||||
pub(crate) type QueryMap<D, Q> = FxHashMap<QueryJobId<D>, QueryJobInfo<D, Q>>;
|
||||
|
||||
/// A value uniquely identifiying an active query job within a shard in the query cache.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
|
||||
@ -39,71 +39,75 @@ pub struct QueryShardJobId(pub NonZeroU32);
|
||||
|
||||
/// A value uniquely identifiying an active query job.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
|
||||
pub struct QueryJobId<K> {
|
||||
pub struct QueryJobId<D> {
|
||||
/// Which job within a shard is this
|
||||
pub job: QueryShardJobId,
|
||||
|
||||
/// In which shard is this job
|
||||
pub shard: u16,
|
||||
|
||||
/// What kind of query this job is
|
||||
pub kind: K,
|
||||
/// What kind of query this job is.
|
||||
pub kind: D,
|
||||
}
|
||||
|
||||
impl<K: DepKind> QueryJobId<K> {
|
||||
pub fn new(job: QueryShardJobId, shard: usize, kind: K) -> Self {
|
||||
impl<D> QueryJobId<D>
|
||||
where
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
{
|
||||
pub fn new(job: QueryShardJobId, shard: usize, kind: D) -> Self {
|
||||
QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
|
||||
}
|
||||
|
||||
fn query<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> CTX::Query {
|
||||
fn query<Q: Clone>(self, map: &QueryMap<D, Q>) -> Q {
|
||||
map.get(&self).unwrap().info.query.clone()
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
fn span<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> Span {
|
||||
fn span<Q: Clone>(self, map: &QueryMap<D, Q>) -> Span {
|
||||
map.get(&self).unwrap().job.span
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
fn parent<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> Option<QueryJobId<K>> {
|
||||
fn parent<Q: Clone>(self, map: &QueryMap<D, Q>) -> Option<QueryJobId<D>> {
|
||||
map.get(&self).unwrap().job.parent
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
fn latch<'a, CTX: QueryContext<DepKind = K>>(
|
||||
self,
|
||||
map: &'a QueryMap<CTX>,
|
||||
) -> Option<&'a QueryLatch<CTX>> {
|
||||
fn latch<'a, Q: Clone>(self, map: &'a QueryMap<D, Q>) -> Option<&'a QueryLatch<D, Q>> {
|
||||
map.get(&self).unwrap().job.latch.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct QueryJobInfo<CTX: QueryContext> {
|
||||
pub info: QueryInfo<CTX::Query>,
|
||||
pub job: QueryJob<CTX>,
|
||||
pub struct QueryJobInfo<D, Q> {
|
||||
pub info: QueryInfo<Q>,
|
||||
pub job: QueryJob<D, Q>,
|
||||
}
|
||||
|
||||
/// Represents an active query job.
|
||||
#[derive(Clone)]
|
||||
pub struct QueryJob<CTX: QueryContext> {
|
||||
pub struct QueryJob<D, Q> {
|
||||
pub id: QueryShardJobId,
|
||||
|
||||
/// The span corresponding to the reason for which this query was required.
|
||||
pub span: Span,
|
||||
|
||||
/// The parent query job which created this job and is implicitly waiting on it.
|
||||
pub parent: Option<QueryJobId<CTX::DepKind>>,
|
||||
pub parent: Option<QueryJobId<D>>,
|
||||
|
||||
/// The latch that is used to wait on this job.
|
||||
#[cfg(parallel_compiler)]
|
||||
latch: Option<QueryLatch<CTX>>,
|
||||
latch: Option<QueryLatch<D, Q>>,
|
||||
|
||||
dummy: PhantomData<QueryLatch<CTX>>,
|
||||
dummy: PhantomData<QueryLatch<D, Q>>,
|
||||
}
|
||||
|
||||
impl<CTX: QueryContext> QueryJob<CTX> {
|
||||
impl<D, Q> QueryJob<D, Q>
|
||||
where
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
Q: Clone,
|
||||
{
|
||||
/// Creates a new query job.
|
||||
pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<CTX::DepKind>>) -> Self {
|
||||
pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self {
|
||||
QueryJob {
|
||||
id,
|
||||
span,
|
||||
@ -115,7 +119,7 @@ impl<CTX: QueryContext> QueryJob<CTX> {
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
pub(super) fn latch(&mut self, _id: QueryJobId<CTX::DepKind>) -> QueryLatch<CTX> {
|
||||
pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D, Q> {
|
||||
if self.latch.is_none() {
|
||||
self.latch = Some(QueryLatch::new());
|
||||
}
|
||||
@ -123,7 +127,7 @@ impl<CTX: QueryContext> QueryJob<CTX> {
|
||||
}
|
||||
|
||||
#[cfg(not(parallel_compiler))]
|
||||
pub(super) fn latch(&mut self, id: QueryJobId<CTX::DepKind>) -> QueryLatch<CTX> {
|
||||
pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D, Q> {
|
||||
QueryLatch { id, dummy: PhantomData }
|
||||
}
|
||||
|
||||
@ -143,19 +147,26 @@ impl<CTX: QueryContext> QueryJob<CTX> {
|
||||
|
||||
#[cfg(not(parallel_compiler))]
|
||||
#[derive(Clone)]
|
||||
pub(super) struct QueryLatch<CTX: QueryContext> {
|
||||
id: QueryJobId<CTX::DepKind>,
|
||||
dummy: PhantomData<CTX>,
|
||||
pub(super) struct QueryLatch<D, Q> {
|
||||
id: QueryJobId<D>,
|
||||
dummy: PhantomData<Q>,
|
||||
}
|
||||
|
||||
#[cfg(not(parallel_compiler))]
|
||||
impl<CTX: QueryContext> QueryLatch<CTX> {
|
||||
pub(super) fn find_cycle_in_stack(&self, tcx: CTX, span: Span) -> CycleError<CTX::Query> {
|
||||
let query_map = tcx.try_collect_active_jobs().unwrap();
|
||||
|
||||
// Get the current executing query (waiter) and find the waitee amongst its parents
|
||||
let mut current_job = tcx.current_query_job();
|
||||
impl<D, Q> QueryLatch<D, Q>
|
||||
where
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
Q: Clone,
|
||||
{
|
||||
pub(super) fn find_cycle_in_stack(
|
||||
&self,
|
||||
query_map: QueryMap<D, Q>,
|
||||
current_job: &Option<QueryJobId<D>>,
|
||||
span: Span,
|
||||
) -> CycleError<Q> {
|
||||
// Find the waitee amongst `current_job` parents
|
||||
let mut cycle = Vec::new();
|
||||
let mut current_job = Option::clone(current_job);
|
||||
|
||||
while let Some(job) = current_job {
|
||||
let info = query_map.get(&job).unwrap();
|
||||
@ -186,15 +197,15 @@ impl<CTX: QueryContext> QueryLatch<CTX> {
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
struct QueryWaiter<CTX: QueryContext> {
|
||||
query: Option<QueryJobId<CTX::DepKind>>,
|
||||
struct QueryWaiter<D, Q> {
|
||||
query: Option<QueryJobId<D>>,
|
||||
condvar: Condvar,
|
||||
span: Span,
|
||||
cycle: Lock<Option<CycleError<CTX::Query>>>,
|
||||
cycle: Lock<Option<CycleError<Q>>>,
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
impl<CTX: QueryContext> QueryWaiter<CTX> {
|
||||
impl<D, Q> QueryWaiter<D, Q> {
|
||||
fn notify(&self, registry: &rayon_core::Registry) {
|
||||
rayon_core::mark_unblocked(registry);
|
||||
self.condvar.notify_one();
|
||||
@ -202,19 +213,19 @@ impl<CTX: QueryContext> QueryWaiter<CTX> {
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
struct QueryLatchInfo<CTX: QueryContext> {
|
||||
struct QueryLatchInfo<D, Q> {
|
||||
complete: bool,
|
||||
waiters: Vec<Lrc<QueryWaiter<CTX>>>,
|
||||
waiters: Vec<Lrc<QueryWaiter<D, Q>>>,
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
#[derive(Clone)]
|
||||
pub(super) struct QueryLatch<CTX: QueryContext> {
|
||||
info: Lrc<Mutex<QueryLatchInfo<CTX>>>,
|
||||
pub(super) struct QueryLatch<D, Q> {
|
||||
info: Lrc<Mutex<QueryLatchInfo<D, Q>>>,
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
impl<CTX: QueryContext> QueryLatch<CTX> {
|
||||
impl<D: Eq + Hash, Q: Clone> QueryLatch<D, Q> {
|
||||
fn new() -> Self {
|
||||
QueryLatch {
|
||||
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
|
||||
@ -223,10 +234,13 @@ impl<CTX: QueryContext> QueryLatch<CTX> {
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
impl<CTX: QueryContext> QueryLatch<CTX> {
|
||||
impl<D, Q> QueryLatch<D, Q> {
|
||||
/// Awaits for the query job to complete.
|
||||
pub(super) fn wait_on(&self, tcx: CTX, span: Span) -> Result<(), CycleError<CTX::Query>> {
|
||||
let query = tcx.current_query_job();
|
||||
pub(super) fn wait_on(
|
||||
&self,
|
||||
query: Option<QueryJobId<D>>,
|
||||
span: Span,
|
||||
) -> Result<(), CycleError<Q>> {
|
||||
let waiter =
|
||||
Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
|
||||
self.wait_on_inner(&waiter);
|
||||
@ -239,12 +253,9 @@ impl<CTX: QueryContext> QueryLatch<CTX> {
|
||||
Some(cycle) => Err(cycle),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
impl<CTX: QueryContext> QueryLatch<CTX> {
|
||||
/// Awaits the caller on this latch by blocking the current thread.
|
||||
fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<CTX>>) {
|
||||
fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D, Q>>) {
|
||||
let mut info = self.info.lock();
|
||||
if !info.complete {
|
||||
// We push the waiter on to the `waiters` list. It can be accessed inside
|
||||
@ -278,7 +289,7 @@ impl<CTX: QueryContext> QueryLatch<CTX> {
|
||||
|
||||
/// Removes a single waiter from the list of waiters.
|
||||
/// This is used to break query cycles.
|
||||
fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<CTX>> {
|
||||
fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D, Q>> {
|
||||
let mut info = self.info.lock();
|
||||
debug_assert!(!info.complete);
|
||||
// Remove the waiter from the list of waiters
|
||||
@ -288,7 +299,7 @@ impl<CTX: QueryContext> QueryLatch<CTX> {
|
||||
|
||||
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
|
||||
#[cfg(parallel_compiler)]
|
||||
type Waiter<K> = (QueryJobId<K>, usize);
|
||||
type Waiter<D> = (QueryJobId<D>, usize);
|
||||
|
||||
/// Visits all the non-resumable and resumable waiters of a query.
|
||||
/// Only waiters in a query are visited.
|
||||
@ -300,13 +311,15 @@ type Waiter<K> = (QueryJobId<K>, usize);
|
||||
/// required information to resume the waiter.
|
||||
/// If all `visit` calls returns None, this function also returns None.
|
||||
#[cfg(parallel_compiler)]
|
||||
fn visit_waiters<CTX: QueryContext, F>(
|
||||
query_map: &QueryMap<CTX>,
|
||||
query: QueryJobId<CTX::DepKind>,
|
||||
fn visit_waiters<D, Q, F>(
|
||||
query_map: &QueryMap<D, Q>,
|
||||
query: QueryJobId<D>,
|
||||
mut visit: F,
|
||||
) -> Option<Option<Waiter<CTX::DepKind>>>
|
||||
) -> Option<Option<Waiter<D>>>
|
||||
where
|
||||
F: FnMut(Span, QueryJobId<CTX::DepKind>) -> Option<Option<Waiter<CTX::DepKind>>>,
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
Q: Clone,
|
||||
F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>,
|
||||
{
|
||||
// Visit the parent query which is a non-resumable waiter since it's on the same stack
|
||||
if let Some(parent) = query.parent(query_map) {
|
||||
@ -335,13 +348,17 @@ where
|
||||
/// If a cycle is detected, this initial value is replaced with the span causing
|
||||
/// the cycle.
|
||||
#[cfg(parallel_compiler)]
|
||||
fn cycle_check<CTX: QueryContext>(
|
||||
query_map: &QueryMap<CTX>,
|
||||
query: QueryJobId<CTX::DepKind>,
|
||||
fn cycle_check<D, Q>(
|
||||
query_map: &QueryMap<D, Q>,
|
||||
query: QueryJobId<D>,
|
||||
span: Span,
|
||||
stack: &mut Vec<(Span, QueryJobId<CTX::DepKind>)>,
|
||||
visited: &mut FxHashSet<QueryJobId<CTX::DepKind>>,
|
||||
) -> Option<Option<Waiter<CTX::DepKind>>> {
|
||||
stack: &mut Vec<(Span, QueryJobId<D>)>,
|
||||
visited: &mut FxHashSet<QueryJobId<D>>,
|
||||
) -> Option<Option<Waiter<D>>>
|
||||
where
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
Q: Clone,
|
||||
{
|
||||
if !visited.insert(query) {
|
||||
return if let Some(p) = stack.iter().position(|q| q.1 == query) {
|
||||
// We detected a query cycle, fix up the initial span and return Some
|
||||
@ -376,11 +393,15 @@ fn cycle_check<CTX: QueryContext>(
|
||||
/// from `query` without going through any of the queries in `visited`.
|
||||
/// This is achieved with a depth first search.
|
||||
#[cfg(parallel_compiler)]
|
||||
fn connected_to_root<CTX: QueryContext>(
|
||||
query_map: &QueryMap<CTX>,
|
||||
query: QueryJobId<CTX::DepKind>,
|
||||
visited: &mut FxHashSet<QueryJobId<CTX::DepKind>>,
|
||||
) -> bool {
|
||||
fn connected_to_root<D, Q>(
|
||||
query_map: &QueryMap<D, Q>,
|
||||
query: QueryJobId<D>,
|
||||
visited: &mut FxHashSet<QueryJobId<D>>,
|
||||
) -> bool
|
||||
where
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
Q: Clone,
|
||||
{
|
||||
// We already visited this or we're deliberately ignoring it
|
||||
if !visited.insert(query) {
|
||||
return false;
|
||||
@ -399,7 +420,12 @@ fn connected_to_root<CTX: QueryContext>(
|
||||
|
||||
// Deterministically pick an query from a list
|
||||
#[cfg(parallel_compiler)]
|
||||
fn pick_query<'a, CTX, T, F>(query_map: &QueryMap<CTX>, tcx: CTX, queries: &'a [T], f: F) -> &'a T
|
||||
fn pick_query<'a, CTX, T, F>(
|
||||
query_map: &QueryMap<CTX::DepKind, CTX::Query>,
|
||||
tcx: CTX,
|
||||
queries: &'a [T],
|
||||
f: F,
|
||||
) -> &'a T
|
||||
where
|
||||
CTX: QueryContext,
|
||||
F: Fn(&T) -> (Span, QueryJobId<CTX::DepKind>),
|
||||
@ -429,9 +455,9 @@ where
|
||||
/// the function returns false.
|
||||
#[cfg(parallel_compiler)]
|
||||
fn remove_cycle<CTX: QueryContext>(
|
||||
query_map: &QueryMap<CTX>,
|
||||
query_map: &QueryMap<CTX::DepKind, CTX::Query>,
|
||||
jobs: &mut Vec<QueryJobId<CTX::DepKind>>,
|
||||
wakelist: &mut Vec<Lrc<QueryWaiter<CTX>>>,
|
||||
wakelist: &mut Vec<Lrc<QueryWaiter<CTX::DepKind, CTX::Query>>>,
|
||||
tcx: CTX,
|
||||
) -> bool {
|
||||
let mut visited = FxHashSet::default();
|
||||
|
@ -15,8 +15,8 @@ mod config;
|
||||
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
|
||||
|
||||
use crate::dep_graph::{DepContext, DepGraph};
|
||||
use crate::query::job::QueryMap;
|
||||
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::stable_hasher::HashStable;
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use rustc_data_structures::thin_vec::ThinVec;
|
||||
@ -38,9 +38,7 @@ pub trait QueryContext: DepContext {
|
||||
/// Get the query information from the TLS context.
|
||||
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>;
|
||||
|
||||
fn try_collect_active_jobs(
|
||||
&self,
|
||||
) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>>;
|
||||
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind, Self::Query>>;
|
||||
|
||||
/// Executes a job by changing the `ImplicitCtxt` to point to the
|
||||
/// new query job while it executes. It returns the diagnostics
|
||||
|
@ -7,7 +7,7 @@ use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
|
||||
use crate::query::caches::QueryCache;
|
||||
use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
|
||||
use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
|
||||
use crate::query::QueryContext;
|
||||
use crate::query::{QueryContext, QueryMap};
|
||||
|
||||
#[cfg(not(parallel_compiler))]
|
||||
use rustc_data_structures::cold_path;
|
||||
@ -20,8 +20,6 @@ use rustc_errors::{Diagnostic, FatalError};
|
||||
use rustc_span::source_map::DUMMY_SP;
|
||||
use rustc_span::Span;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt::Debug;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::mem;
|
||||
use std::num::NonZeroU32;
|
||||
@ -29,33 +27,33 @@ use std::ptr;
|
||||
#[cfg(debug_assertions)]
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
pub(super) struct QueryStateShard<CTX: QueryContext, K, C> {
|
||||
pub(super) struct QueryStateShard<D, Q, K, C> {
|
||||
pub(super) cache: C,
|
||||
active: FxHashMap<K, QueryResult<CTX>>,
|
||||
active: FxHashMap<K, QueryResult<D, Q>>,
|
||||
|
||||
/// Used to generate unique ids for active jobs.
|
||||
jobs: u32,
|
||||
}
|
||||
|
||||
impl<CTX: QueryContext, K, C: Default> Default for QueryStateShard<CTX, K, C> {
|
||||
fn default() -> QueryStateShard<CTX, K, C> {
|
||||
impl<D, Q, K, C: Default> Default for QueryStateShard<D, Q, K, C> {
|
||||
fn default() -> QueryStateShard<D, Q, K, C> {
|
||||
QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct QueryState<CTX: QueryContext, C: QueryCache> {
|
||||
pub struct QueryState<D, Q, C: QueryCache> {
|
||||
cache: C,
|
||||
shards: Sharded<QueryStateShard<CTX, C::Key, C::Sharded>>,
|
||||
shards: Sharded<QueryStateShard<D, Q, C::Key, C::Sharded>>,
|
||||
#[cfg(debug_assertions)]
|
||||
pub cache_hits: AtomicUsize,
|
||||
}
|
||||
|
||||
impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
|
||||
impl<D, Q, C: QueryCache> QueryState<D, Q, C> {
|
||||
#[inline]
|
||||
pub(super) fn get_lookup<'tcx>(
|
||||
&'tcx self,
|
||||
key: &C::Key,
|
||||
) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> {
|
||||
) -> QueryLookup<'tcx, D, Q, C::Key, C::Sharded> {
|
||||
// We compute the key's hash once and then use it for both the
|
||||
// shard lookup and the hashmap lookup. This relies on the fact
|
||||
// that both of them use `FxHasher`.
|
||||
@ -70,16 +68,21 @@ impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
|
||||
}
|
||||
|
||||
/// Indicates the state of a query for a given key in a query map.
|
||||
enum QueryResult<CTX: QueryContext> {
|
||||
enum QueryResult<D, Q> {
|
||||
/// An already executing query. The query job can be used to await for its completion.
|
||||
Started(QueryJob<CTX>),
|
||||
Started(QueryJob<D, Q>),
|
||||
|
||||
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
|
||||
/// silently panic.
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
|
||||
impl<D, Q, C> QueryState<D, Q, C>
|
||||
where
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
Q: Clone,
|
||||
C: QueryCache,
|
||||
{
|
||||
#[inline(always)]
|
||||
pub fn iter_results<R>(
|
||||
&self,
|
||||
@ -98,13 +101,10 @@ impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
|
||||
|
||||
pub fn try_collect_active_jobs(
|
||||
&self,
|
||||
kind: CTX::DepKind,
|
||||
make_query: fn(C::Key) -> CTX::Query,
|
||||
jobs: &mut FxHashMap<QueryJobId<CTX::DepKind>, QueryJobInfo<CTX>>,
|
||||
) -> Option<()>
|
||||
where
|
||||
C::Key: Clone,
|
||||
{
|
||||
kind: D,
|
||||
make_query: fn(C::Key) -> Q,
|
||||
jobs: &mut QueryMap<D, Q>,
|
||||
) -> Option<()> {
|
||||
// We use try_lock_shards here since we are called from the
|
||||
// deadlock handler, and this shouldn't be locked.
|
||||
let shards = self.shards.try_lock_shards()?;
|
||||
@ -112,8 +112,7 @@ impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
|
||||
jobs.extend(shards.flat_map(|(shard_id, shard)| {
|
||||
shard.active.iter().filter_map(move |(k, v)| {
|
||||
if let QueryResult::Started(ref job) = *v {
|
||||
let id =
|
||||
QueryJobId { job: job.id, shard: u16::try_from(shard_id).unwrap(), kind };
|
||||
let id = QueryJobId::new(job.id, shard_id, kind);
|
||||
let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
|
||||
Some((id, QueryJobInfo { info, job: job.clone() }))
|
||||
} else {
|
||||
@ -126,8 +125,8 @@ impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<CTX: QueryContext, C: QueryCache> Default for QueryState<CTX, C> {
|
||||
fn default() -> QueryState<CTX, C> {
|
||||
impl<D, Q, C: QueryCache> Default for QueryState<D, Q, C> {
|
||||
fn default() -> QueryState<D, Q, C> {
|
||||
QueryState {
|
||||
cache: C::default(),
|
||||
shards: Default::default(),
|
||||
@ -138,28 +137,30 @@ impl<CTX: QueryContext, C: QueryCache> Default for QueryState<CTX, C> {
|
||||
}
|
||||
|
||||
/// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
|
||||
pub struct QueryLookup<'tcx, CTX: QueryContext, K, C> {
|
||||
pub struct QueryLookup<'tcx, D, Q, K, C> {
|
||||
pub(super) key_hash: u64,
|
||||
shard: usize,
|
||||
pub(super) lock: LockGuard<'tcx, QueryStateShard<CTX, K, C>>,
|
||||
pub(super) lock: LockGuard<'tcx, QueryStateShard<D, Q, K, C>>,
|
||||
}
|
||||
|
||||
/// A type representing the responsibility to execute the job in the `job` field.
|
||||
/// This will poison the relevant query if dropped.
|
||||
struct JobOwner<'tcx, CTX: QueryContext, C>
|
||||
struct JobOwner<'tcx, D, Q, C>
|
||||
where
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
Q: Clone,
|
||||
C: QueryCache,
|
||||
C::Key: Eq + Hash + Clone + Debug,
|
||||
{
|
||||
state: &'tcx QueryState<CTX, C>,
|
||||
state: &'tcx QueryState<D, Q, C>,
|
||||
key: C::Key,
|
||||
id: QueryJobId<CTX::DepKind>,
|
||||
id: QueryJobId<D>,
|
||||
}
|
||||
|
||||
impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C>
|
||||
impl<'tcx, D, Q, C> JobOwner<'tcx, D, Q, C>
|
||||
where
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
Q: Clone,
|
||||
C: QueryCache,
|
||||
C::Key: Eq + Hash + Clone + Debug,
|
||||
{
|
||||
/// Either gets a `JobOwner` corresponding the query, allowing us to
|
||||
/// start executing the query, or returns with the result of the query.
|
||||
@ -170,14 +171,14 @@ where
|
||||
/// This function is inlined because that results in a noticeable speed-up
|
||||
/// for some compile-time benchmarks.
|
||||
#[inline(always)]
|
||||
fn try_start<'a, 'b>(
|
||||
fn try_start<'a, 'b, CTX>(
|
||||
tcx: CTX,
|
||||
state: &'b QueryState<CTX, C>,
|
||||
state: &'b QueryState<CTX::DepKind, CTX::Query, C>,
|
||||
span: Span,
|
||||
key: &C::Key,
|
||||
mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>,
|
||||
mut lookup: QueryLookup<'a, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
|
||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||
) -> TryGetJob<'b, CTX, C>
|
||||
) -> TryGetJob<'b, CTX::DepKind, CTX::Query, C>
|
||||
where
|
||||
CTX: QueryContext,
|
||||
{
|
||||
@ -229,7 +230,12 @@ where
|
||||
// so we just return the error.
|
||||
#[cfg(not(parallel_compiler))]
|
||||
return TryGetJob::Cycle(cold_path(|| {
|
||||
let value = query.handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span));
|
||||
let error: CycleError<CTX::Query> = latch.find_cycle_in_stack(
|
||||
tcx.try_collect_active_jobs().unwrap(),
|
||||
&tcx.current_query_job(),
|
||||
span,
|
||||
);
|
||||
let value = query.handle_cycle_error(tcx, error);
|
||||
state.cache.store_nocache(value)
|
||||
}));
|
||||
|
||||
@ -237,7 +243,7 @@ where
|
||||
// thread.
|
||||
#[cfg(parallel_compiler)]
|
||||
{
|
||||
let result = latch.wait_on(tcx, span);
|
||||
let result = latch.wait_on(tcx.current_query_job(), span);
|
||||
|
||||
if let Err(cycle) = result {
|
||||
let value = query.handle_cycle_error(tcx, cycle);
|
||||
@ -297,9 +303,11 @@ where
|
||||
(result, diagnostics.into_inner())
|
||||
}
|
||||
|
||||
impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C>
|
||||
impl<'tcx, D, Q, C> Drop for JobOwner<'tcx, D, Q, C>
|
||||
where
|
||||
C::Key: Eq + Hash + Clone + Debug,
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
Q: Clone,
|
||||
C: QueryCache,
|
||||
{
|
||||
#[inline(never)]
|
||||
#[cold]
|
||||
@ -330,12 +338,14 @@ pub struct CycleError<Q> {
|
||||
}
|
||||
|
||||
/// The result of `try_start`.
|
||||
enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache>
|
||||
enum TryGetJob<'tcx, D, Q, C>
|
||||
where
|
||||
C::Key: Eq + Hash + Clone + Debug,
|
||||
D: Copy + Clone + Eq + Hash,
|
||||
Q: Clone,
|
||||
C: QueryCache,
|
||||
{
|
||||
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
|
||||
NotYetStarted(JobOwner<'tcx, CTX, C>),
|
||||
NotYetStarted(JobOwner<'tcx, D, Q, C>),
|
||||
|
||||
/// The query was already completed.
|
||||
/// Returns the result of the query and its dep-node index
|
||||
@ -354,7 +364,7 @@ where
|
||||
#[inline(always)]
|
||||
fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
|
||||
tcx: CTX,
|
||||
state: &QueryState<CTX, C>,
|
||||
state: &QueryState<CTX::DepKind, CTX::Query, C>,
|
||||
key: C::Key,
|
||||
// `on_hit` can be called while holding a lock to the query cache
|
||||
on_hit: OnHit,
|
||||
@ -364,7 +374,7 @@ where
|
||||
C: QueryCache,
|
||||
CTX: QueryContext,
|
||||
OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R,
|
||||
OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R,
|
||||
OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>) -> R,
|
||||
{
|
||||
state.cache.lookup(
|
||||
state,
|
||||
@ -386,19 +396,20 @@ where
|
||||
#[inline(always)]
|
||||
fn try_execute_query<CTX, C>(
|
||||
tcx: CTX,
|
||||
state: &QueryState<CTX, C>,
|
||||
state: &QueryState<CTX::DepKind, CTX::Query, C>,
|
||||
span: Span,
|
||||
key: C::Key,
|
||||
lookup: QueryLookup<'_, CTX, C::Key, C::Sharded>,
|
||||
lookup: QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
|
||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||
) -> C::Stored
|
||||
where
|
||||
C: QueryCache,
|
||||
C::Key: Eq + Clone + Debug + crate::dep_graph::DepNodeParams<CTX>,
|
||||
C::Stored: Clone,
|
||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
let job = match JobOwner::try_start(tcx, state, span, &key, lookup, query) {
|
||||
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
|
||||
tcx, state, span, &key, lookup, query,
|
||||
) {
|
||||
TryGetJob::NotYetStarted(job) => job,
|
||||
TryGetJob::Cycle(result) => return result,
|
||||
#[cfg(parallel_compiler)]
|
||||
@ -559,14 +570,12 @@ fn incremental_verify_ich<CTX, K, V>(
|
||||
fn force_query_with_job<C, CTX>(
|
||||
tcx: CTX,
|
||||
key: C::Key,
|
||||
job: JobOwner<'_, CTX, C>,
|
||||
job: JobOwner<'_, CTX::DepKind, CTX::Query, C>,
|
||||
dep_node: DepNode<CTX::DepKind>,
|
||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||
) -> (C::Stored, DepNodeIndex)
|
||||
where
|
||||
C: QueryCache,
|
||||
C::Key: Eq + Clone + Debug,
|
||||
C::Stored: Clone,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
// If the following assertion triggers, it can have two reasons:
|
||||
@ -617,7 +626,7 @@ where
|
||||
#[inline(never)]
|
||||
fn get_query_impl<CTX, C>(
|
||||
tcx: CTX,
|
||||
state: &QueryState<CTX, C>,
|
||||
state: &QueryState<CTX::DepKind, CTX::Query, C>,
|
||||
span: Span,
|
||||
key: C::Key,
|
||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||
@ -625,8 +634,7 @@ fn get_query_impl<CTX, C>(
|
||||
where
|
||||
CTX: QueryContext,
|
||||
C: QueryCache,
|
||||
C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>,
|
||||
C::Stored: Clone,
|
||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||
{
|
||||
try_get_cached(
|
||||
tcx,
|
||||
@ -650,12 +658,12 @@ where
|
||||
#[inline(never)]
|
||||
fn ensure_query_impl<CTX, C>(
|
||||
tcx: CTX,
|
||||
state: &QueryState<CTX, C>,
|
||||
state: &QueryState<CTX::DepKind, CTX::Query, C>,
|
||||
key: C::Key,
|
||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||
) where
|
||||
C: QueryCache,
|
||||
C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>,
|
||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
if query.eval_always {
|
||||
@ -687,14 +695,14 @@ fn ensure_query_impl<CTX, C>(
|
||||
#[inline(never)]
|
||||
fn force_query_impl<CTX, C>(
|
||||
tcx: CTX,
|
||||
state: &QueryState<CTX, C>,
|
||||
state: &QueryState<CTX::DepKind, CTX::Query, C>,
|
||||
key: C::Key,
|
||||
span: Span,
|
||||
dep_node: DepNode<CTX::DepKind>,
|
||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||
) where
|
||||
C: QueryCache,
|
||||
C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>,
|
||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
// We may be concurrently trying both execute and force a query.
|
||||
@ -708,7 +716,9 @@ fn force_query_impl<CTX, C>(
|
||||
// Cache hit, do nothing
|
||||
},
|
||||
|key, lookup| {
|
||||
let job = match JobOwner::try_start(tcx, state, span, &key, lookup, query) {
|
||||
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
|
||||
tcx, state, span, &key, lookup, query,
|
||||
) {
|
||||
TryGetJob::NotYetStarted(job) => job,
|
||||
TryGetJob::Cycle(_) => return,
|
||||
#[cfg(parallel_compiler)]
|
||||
|
Loading…
Reference in New Issue
Block a user