Rename Ctxt and CTX to Tcx and Qcx

This makes it consistent and clear which context is used.
This commit is contained in:
Nilstrieb 2022-11-05 21:04:19 +01:00
parent 16558bd267
commit 6d26ea86da
No known key found for this signature in database
7 changed files with 109 additions and 109 deletions

View File

@ -26,7 +26,7 @@ impl<Key, Value> Cache<Key, Value> {
}
impl<Key: Eq + Hash, Value: Clone> Cache<Key, Value> {
pub fn get<CTX: DepContext>(&self, key: &Key, tcx: CTX) -> Option<Value> {
pub fn get<Tcx: DepContext>(&self, key: &Key, tcx: Tcx) -> Option<Value> {
Some(self.hashmap.borrow().get(key)?.get(tcx))
}
@ -46,7 +46,7 @@ impl<T: Clone> WithDepNode<T> {
WithDepNode { dep_node, cached_value }
}
pub fn get<CTX: DepContext>(&self, tcx: CTX) -> T {
pub fn get<Tcx: DepContext>(&self, tcx: Tcx) -> T {
tcx.dep_graph().read_index(self.dep_node);
self.cached_value.clone()
}

View File

@ -61,18 +61,18 @@ impl<K: DepKind> DepNode<K> {
/// Creates a new, parameterless DepNode. This method will assert
/// that the DepNode corresponding to the given DepKind actually
/// does not require any parameters.
pub fn new_no_params<Ctxt>(tcx: Ctxt, kind: K) -> DepNode<K>
pub fn new_no_params<Tcx>(tcx: Tcx, kind: K) -> DepNode<K>
where
Ctxt: super::DepContext<DepKind = K>,
Tcx: super::DepContext<DepKind = K>,
{
debug_assert_eq!(tcx.fingerprint_style(kind), FingerprintStyle::Unit);
DepNode { kind, hash: Fingerprint::ZERO.into() }
}
pub fn construct<Ctxt, Key>(tcx: Ctxt, kind: K, arg: &Key) -> DepNode<K>
pub fn construct<Tcx, Key>(tcx: Tcx, kind: K, arg: &Key) -> DepNode<K>
where
Ctxt: super::DepContext<DepKind = K>,
Key: DepNodeParams<Ctxt>,
Tcx: super::DepContext<DepKind = K>,
Key: DepNodeParams<Tcx>,
{
let hash = arg.to_fingerprint(tcx);
let dep_node = DepNode { kind, hash: hash.into() };
@ -93,9 +93,9 @@ impl<K: DepKind> DepNode<K> {
/// Construct a DepNode from the given DepKind and DefPathHash. This
/// method will assert that the given DepKind actually requires a
/// single DefId/DefPathHash parameter.
pub fn from_def_path_hash<Ctxt>(tcx: Ctxt, def_path_hash: DefPathHash, kind: K) -> Self
pub fn from_def_path_hash<Tcx>(tcx: Tcx, def_path_hash: DefPathHash, kind: K) -> Self
where
Ctxt: super::DepContext<DepKind = K>,
Tcx: super::DepContext<DepKind = K>,
{
debug_assert!(tcx.fingerprint_style(kind) == FingerprintStyle::DefPathHash);
DepNode { kind, hash: def_path_hash.0.into() }
@ -108,18 +108,18 @@ impl<K: DepKind> fmt::Debug for DepNode<K> {
}
}
pub trait DepNodeParams<Ctxt: DepContext>: fmt::Debug + Sized {
pub trait DepNodeParams<Tcx: DepContext>: fmt::Debug + Sized {
fn fingerprint_style() -> FingerprintStyle;
/// This method turns the parameters of a DepNodeConstructor into an opaque
/// Fingerprint to be used in DepNode.
/// Not all DepNodeParams support being turned into a Fingerprint (they
/// don't need to if the corresponding DepNode is anonymous).
fn to_fingerprint(&self, _: Ctxt) -> Fingerprint {
fn to_fingerprint(&self, _: Tcx) -> Fingerprint {
panic!("Not implemented. Accidentally called on anonymous node?")
}
fn to_debug_str(&self, _: Ctxt) -> String {
fn to_debug_str(&self, _: Tcx) -> String {
format!("{:?}", self)
}
@ -129,10 +129,10 @@ pub trait DepNodeParams<Ctxt: DepContext>: fmt::Debug + Sized {
/// `fingerprint_style()` is not `FingerprintStyle::Opaque`.
/// It is always valid to return `None` here, in which case incremental
/// compilation will treat the query as having changed instead of forcing it.
fn recover(tcx: Ctxt, dep_node: &DepNode<Ctxt::DepKind>) -> Option<Self>;
fn recover(tcx: Tcx, dep_node: &DepNode<Tcx::DepKind>) -> Option<Self>;
}
impl<Ctxt: DepContext, T> DepNodeParams<Ctxt> for T
impl<Tcx: DepContext, T> DepNodeParams<Tcx> for T
where
T: for<'a> HashStable<StableHashingContext<'a>> + fmt::Debug,
{
@ -142,7 +142,7 @@ where
}
#[inline(always)]
default fn to_fingerprint(&self, tcx: Ctxt) -> Fingerprint {
default fn to_fingerprint(&self, tcx: Tcx) -> Fingerprint {
tcx.with_stable_hashing_context(|mut hcx| {
let mut hasher = StableHasher::new();
self.hash_stable(&mut hcx, &mut hasher);
@ -151,12 +151,12 @@ where
}
#[inline(always)]
default fn to_debug_str(&self, _: Ctxt) -> String {
default fn to_debug_str(&self, _: Tcx) -> String {
format!("{:?}", *self)
}
#[inline(always)]
default fn recover(_: Ctxt, _: &DepNode<Ctxt::DepKind>) -> Option<Self> {
default fn recover(_: Tcx, _: &DepNode<Tcx::DepKind>) -> Option<Self> {
None
}
}
@ -166,7 +166,7 @@ where
/// Information is retrieved by indexing the `DEP_KINDS` array using the integer value
/// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual
/// jump table instead of large matches.
pub struct DepKindStruct<CTX: DepContext> {
pub struct DepKindStruct<Tcx: DepContext> {
/// Anonymous queries cannot be replayed from one compiler invocation to the next.
/// When their result is needed, it is recomputed. They are useful for fine-grained
/// dependency tracking, and caching within one compiler invocation.
@ -216,10 +216,10 @@ pub struct DepKindStruct<CTX: DepContext> {
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
/// is actually a `DefPathHash`, and can therefore just look up the corresponding
/// `DefId` in `tcx.def_path_hash_to_def_id`.
pub force_from_dep_node: Option<fn(tcx: CTX, dep_node: DepNode<CTX::DepKind>) -> bool>,
pub force_from_dep_node: Option<fn(tcx: Tcx, dep_node: DepNode<Tcx::DepKind>) -> bool>,
/// Invoke a query to put the on-disk cached value in memory.
pub try_load_from_on_disk_cache: Option<fn(CTX, DepNode<CTX::DepKind>)>,
pub try_load_from_on_disk_cache: Option<fn(Tcx, DepNode<Tcx::DepKind>)>,
}
/// A "work product" corresponds to a `.o` (or other) file that we

View File

@ -377,9 +377,9 @@ impl<K: DepKind> DepGraph<K> {
/// Executes something within an "anonymous" task, that is, a task the
/// `DepNode` of which is determined by the list of inputs it read from.
pub fn with_anon_task<Ctxt: DepContext<DepKind = K>, OP, R>(
pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
&self,
cx: Ctxt,
cx: Tcx,
dep_kind: K,
op: OP,
) -> (R, DepNodeIndex)
@ -571,9 +571,9 @@ impl<K: DepKind> DepGraph<K> {
/// A node will have an index, when it's already been marked green, or when we can mark it
/// green. This function will mark the current task as a reader of the specified node, when
/// a node index can be found for that node.
pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
&self,
qcx: Ctxt,
qcx: Qcx,
dep_node: &DepNode<K>,
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
@ -599,9 +599,9 @@ impl<K: DepKind> DepGraph<K> {
}
#[instrument(skip(self, qcx, data, parent_dep_node_index), level = "debug")]
fn try_mark_parent_green<Ctxt: QueryContext<DepKind = K>>(
fn try_mark_parent_green<Qcx: QueryContext<DepKind = K>>(
&self,
qcx: Ctxt,
qcx: Qcx,
data: &DepGraphData<K>,
parent_dep_node_index: SerializedDepNodeIndex,
dep_node: &DepNode<K>,
@ -687,9 +687,9 @@ impl<K: DepKind> DepGraph<K> {
/// Try to mark a dep-node which existed in the previous compilation session as green.
#[instrument(skip(self, qcx, data, prev_dep_node_index), level = "debug")]
fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
fn try_mark_previous_green<Qcx: QueryContext<DepKind = K>>(
&self,
qcx: Ctxt,
qcx: Qcx,
data: &DepGraphData<K>,
prev_dep_node_index: SerializedDepNodeIndex,
dep_node: &DepNode<K>,
@ -755,9 +755,9 @@ impl<K: DepKind> DepGraph<K> {
/// This may be called concurrently on multiple threads for the same dep node.
#[cold]
#[inline(never)]
fn emit_side_effects<Ctxt: QueryContext<DepKind = K>>(
fn emit_side_effects<Qcx: QueryContext<DepKind = K>>(
&self,
qcx: Ctxt,
qcx: Qcx,
data: &DepGraphData<K>,
dep_node_index: DepNodeIndex,
side_effects: QuerySideEffects,
@ -799,7 +799,7 @@ impl<K: DepKind> DepGraph<K> {
//
// This method will only load queries that will end up in the disk cache.
// Other queries will not be executed.
pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
pub fn exec_cache_promotions<Tcx: DepContext<DepKind = K>>(&self, tcx: Tcx) {
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
let data = self.data.as_ref().unwrap();

View File

@ -11,7 +11,7 @@ use rustc_data_structures::fingerprint::Fingerprint;
use std::fmt::Debug;
use std::hash::Hash;
pub trait QueryConfig<CTX: QueryContext> {
pub trait QueryConfig<Qcx: QueryContext> {
const NAME: &'static str;
type Key: Eq + Hash + Clone + Debug;
@ -21,47 +21,47 @@ pub trait QueryConfig<CTX: QueryContext> {
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt
fn query_state<'a>(tcx: CTX) -> &'a QueryState<Self::Key>
fn query_state<'a>(tcx: Qcx) -> &'a QueryState<Self::Key>
where
CTX: 'a;
Qcx: 'a;
// Don't use this method to access query results, instead use the methods on TyCtxt
fn query_cache<'a>(tcx: CTX) -> &'a Self::Cache
fn query_cache<'a>(tcx: Qcx) -> &'a Self::Cache
where
CTX: 'a;
Qcx: 'a;
// Don't use this method to compute query results, instead use the methods on TyCtxt
fn make_vtable(tcx: CTX, key: &Self::Key) -> QueryVTable<CTX, Self::Key, Self::Value>;
fn make_vtable(tcx: Qcx, key: &Self::Key) -> QueryVTable<Qcx, Self::Key, Self::Value>;
fn cache_on_disk(tcx: CTX::DepContext, key: &Self::Key) -> bool;
fn cache_on_disk(tcx: Qcx::DepContext, key: &Self::Key) -> bool;
// Don't use this method to compute query results, instead use the methods on TyCtxt
fn execute_query(tcx: CTX::DepContext, k: Self::Key) -> Self::Stored;
fn execute_query(tcx: Qcx::DepContext, k: Self::Key) -> Self::Stored;
}
#[derive(Copy, Clone)]
pub struct QueryVTable<CTX: QueryContext, K, V> {
pub struct QueryVTable<Qcx: QueryContext, K, V> {
pub anon: bool,
pub dep_kind: CTX::DepKind,
pub dep_kind: Qcx::DepKind,
pub eval_always: bool,
pub depth_limit: bool,
pub compute: fn(CTX::DepContext, K) -> V,
pub compute: fn(Qcx::DepContext, K) -> V,
pub hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
pub handle_cycle_error: HandleCycleError,
// NOTE: this is also `None` if `cache_on_disk()` returns false, not just if it's unsupported by the query
pub try_load_from_disk: Option<fn(CTX, SerializedDepNodeIndex) -> Option<V>>,
pub try_load_from_disk: Option<fn(Qcx, SerializedDepNodeIndex) -> Option<V>>,
}
impl<CTX: QueryContext, K, V> QueryVTable<CTX, K, V> {
pub(crate) fn to_dep_node(&self, tcx: CTX::DepContext, key: &K) -> DepNode<CTX::DepKind>
impl<Qcx: QueryContext, K, V> QueryVTable<Qcx, K, V> {
pub(crate) fn to_dep_node(&self, tcx: Qcx::DepContext, key: &K) -> DepNode<Qcx::DepKind>
where
K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
K: crate::dep_graph::DepNodeParams<Qcx::DepContext>,
{
DepNode::construct(tcx, self.dep_kind, key)
}
pub(crate) fn compute(&self, tcx: CTX::DepContext, key: K) -> V {
pub(crate) fn compute(&self, tcx: Qcx::DepContext, key: K) -> V {
(self.compute)(tcx, key)
}
}

View File

@ -596,8 +596,8 @@ pub(crate) fn report_cycle<'a>(
cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic)
}
pub fn print_query_stack<CTX: QueryContext>(
qcx: CTX,
pub fn print_query_stack<Qcx: QueryContext>(
qcx: Qcx,
mut current_query: Option<QueryJobId>,
handler: &Handler,
num_frames: Option<usize>,

View File

@ -62,10 +62,10 @@ where
}
}
pub fn try_collect_active_jobs<CTX: Copy>(
pub fn try_collect_active_jobs<Qcx: Copy>(
&self,
qcx: CTX,
make_query: fn(CTX, K) -> QueryStackFrame,
qcx: Qcx,
make_query: fn(Qcx, K) -> QueryStackFrame,
jobs: &mut QueryMap,
) -> Option<()> {
#[cfg(parallel_compiler)]
@ -119,15 +119,15 @@ where
#[cold]
#[inline(never)]
fn mk_cycle<CTX, V, R>(
qcx: CTX,
fn mk_cycle<Qcx, V, R>(
qcx: Qcx,
cycle_error: CycleError,
handler: HandleCycleError,
cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
) -> R
where
CTX: QueryContext,
V: std::fmt::Debug + Value<CTX::DepContext>,
Qcx: QueryContext,
V: std::fmt::Debug + Value<Qcx::DepContext>,
R: Clone,
{
let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
@ -135,15 +135,15 @@ where
cache.store_nocache(value)
}
fn handle_cycle_error<CTX, V>(
tcx: CTX,
fn handle_cycle_error<Tcx, V>(
tcx: Tcx,
cycle_error: &CycleError,
mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
handler: HandleCycleError,
) -> V
where
CTX: DepContext,
V: Value<CTX>,
Tcx: DepContext,
V: Value<Tcx>,
{
use HandleCycleError::*;
match handler {
@ -176,14 +176,14 @@ where
/// This function is inlined because that results in a noticeable speed-up
/// for some compile-time benchmarks.
#[inline(always)]
fn try_start<'b, CTX>(
qcx: &'b CTX,
fn try_start<'b, Qcx>(
qcx: &'b Qcx,
state: &'b QueryState<K>,
span: Span,
key: K,
) -> TryGetJob<'b, K>
where
CTX: QueryContext,
Qcx: QueryContext,
{
#[cfg(parallel_compiler)]
let mut state_lock = state.active.get_shard_by_value(&key).lock();
@ -335,8 +335,8 @@ where
/// which will be used if the query is not in the cache and we need
/// to compute it.
#[inline]
pub fn try_get_cached<'a, CTX, C, R, OnHit>(
tcx: CTX,
pub fn try_get_cached<'a, Tcx, C, R, OnHit>(
tcx: Tcx,
cache: &'a C,
key: &C::Key,
// `on_hit` can be called while holding a lock to the query cache
@ -344,7 +344,7 @@ pub fn try_get_cached<'a, CTX, C, R, OnHit>(
) -> Result<R, ()>
where
C: QueryCache,
CTX: DepContext,
Tcx: DepContext,
OnHit: FnOnce(&C::Stored) -> R,
{
cache.lookup(&key, |value, index| {
@ -356,20 +356,20 @@ where
})
}
fn try_execute_query<CTX, C>(
qcx: CTX,
fn try_execute_query<Qcx, C>(
qcx: Qcx,
state: &QueryState<C::Key>,
cache: &C,
span: Span,
key: C::Key,
dep_node: Option<DepNode<CTX::DepKind>>,
query: &QueryVTable<CTX, C::Key, C::Value>,
dep_node: Option<DepNode<Qcx::DepKind>>,
query: &QueryVTable<Qcx, C::Key, C::Value>,
) -> (C::Stored, Option<DepNodeIndex>)
where
C: QueryCache,
C::Key: Clone + DepNodeParams<CTX::DepContext>,
C::Value: Value<CTX::DepContext>,
CTX: QueryContext,
C::Key: Clone + DepNodeParams<Qcx::DepContext>,
C::Value: Value<Qcx::DepContext>,
Qcx: QueryContext,
{
match JobOwner::<'_, C::Key>::try_start(&qcx, state, span, key.clone()) {
TryGetJob::NotYetStarted(job) => {
@ -397,17 +397,17 @@ where
}
}
fn execute_job<CTX, K, V>(
qcx: CTX,
fn execute_job<Qcx, K, V>(
qcx: Qcx,
key: K,
mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
query: &QueryVTable<CTX, K, V>,
mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
query: &QueryVTable<Qcx, K, V>,
job_id: QueryJobId,
) -> (V, DepNodeIndex)
where
K: Clone + DepNodeParams<CTX::DepContext>,
K: Clone + DepNodeParams<Qcx::DepContext>,
V: Debug,
CTX: QueryContext,
Qcx: QueryContext,
{
let dep_graph = qcx.dep_context().dep_graph();
@ -470,15 +470,15 @@ where
(result, dep_node_index)
}
fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
qcx: CTX,
fn try_load_from_disk_and_cache_in_memory<Qcx, K, V>(
qcx: Qcx,
key: &K,
dep_node: &DepNode<CTX::DepKind>,
query: &QueryVTable<CTX, K, V>,
dep_node: &DepNode<Qcx::DepKind>,
query: &QueryVTable<Qcx, K, V>,
) -> Option<(V, DepNodeIndex)>
where
K: Clone,
CTX: QueryContext,
Qcx: QueryContext,
V: Debug,
{
// Note this function can be called concurrently from the same query
@ -564,13 +564,13 @@ where
}
#[instrument(skip(qcx, result, query), level = "debug")]
fn incremental_verify_ich<CTX, K, V: Debug>(
qcx: CTX::DepContext,
fn incremental_verify_ich<Qcx, K, V: Debug>(
qcx: Qcx::DepContext,
result: &V,
dep_node: &DepNode<CTX::DepKind>,
query: &QueryVTable<CTX, K, V>,
dep_node: &DepNode<Qcx::DepKind>,
query: &QueryVTable<Qcx, K, V>,
) where
CTX: QueryContext,
Qcx: QueryContext,
{
assert!(
qcx.dep_graph().is_green(dep_node),
@ -676,14 +676,14 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result:
///
/// Note: The optimization is only available during incr. comp.
#[inline(never)]
fn ensure_must_run<CTX, K, V>(
qcx: CTX,
fn ensure_must_run<Qcx, K, V>(
qcx: Qcx,
key: &K,
query: &QueryVTable<CTX, K, V>,
) -> (bool, Option<DepNode<CTX::DepKind>>)
query: &QueryVTable<Qcx, K, V>,
) -> (bool, Option<DepNode<Qcx::DepKind>>)
where
K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
K: crate::dep_graph::DepNodeParams<Qcx::DepContext>,
Qcx: QueryContext,
{
if query.eval_always {
return (true, None);
@ -719,12 +719,12 @@ pub enum QueryMode {
Ensure,
}
pub fn get_query<Q, CTX>(qcx: CTX, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
pub fn get_query<Q, Qcx>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
where
Q: QueryConfig<CTX>,
Q::Key: DepNodeParams<CTX::DepContext>,
Q::Value: Value<CTX::DepContext>,
CTX: QueryContext,
Q: QueryConfig<Qcx>,
Q::Key: DepNodeParams<Qcx::DepContext>,
Q::Value: Value<Qcx::DepContext>,
Qcx: QueryContext,
{
let query = Q::make_vtable(qcx, &key);
let dep_node = if let QueryMode::Ensure = mode {
@ -752,12 +752,12 @@ where
Some(result)
}
pub fn force_query<Q, CTX>(qcx: CTX, key: Q::Key, dep_node: DepNode<CTX::DepKind>)
pub fn force_query<Q, Qcx>(qcx: Qcx, key: Q::Key, dep_node: DepNode<Qcx::DepKind>)
where
Q: QueryConfig<CTX>,
Q::Key: DepNodeParams<CTX::DepContext>,
Q::Value: Value<CTX::DepContext>,
CTX: QueryContext,
Q: QueryConfig<Qcx>,
Q::Key: DepNodeParams<Qcx::DepContext>,
Q::Value: Value<Qcx::DepContext>,
Qcx: QueryContext,
{
// We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query.

View File

@ -1,12 +1,12 @@
use crate::dep_graph::DepContext;
use crate::query::QueryInfo;
pub trait Value<CTX: DepContext>: Sized {
fn from_cycle_error(tcx: CTX, cycle: &[QueryInfo]) -> Self;
pub trait Value<Tcx: DepContext>: Sized {
fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo]) -> Self;
}
impl<CTX: DepContext, T> Value<CTX> for T {
default fn from_cycle_error(tcx: CTX, _: &[QueryInfo]) -> T {
impl<Tcx: DepContext, T> Value<Tcx> for T {
default fn from_cycle_error(tcx: Tcx, _: &[QueryInfo]) -> T {
tcx.sess().abort_if_errors();
// Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's
// non-trivial to define it earlier.