Auto merge of #130821 - lcnr:nalgebra-hang-2, r=compiler-errors

add caching to most type folders, rm region uniquification

Fixes the new minimization of the hang in nalgebra and nalgebra itself :3

this is a bit iffy, especially the cache in `TypeRelating`. I believe all the caches are correct, but it definitely adds some non-local complexity in places. The first commit removes region uniquification, reintroducing the ICE from https://github.com/rust-lang/trait-system-refactor-initiative/issues/27. This does not affect coherence and I would like to fix this by introducing OR-region constraints

r? `@compiler-errors`
This commit is contained in:
bors 2024-10-02 19:21:44 +00:00
commit 18b1161ec9
12 changed files with 443 additions and 134 deletions

View File

@ -36,10 +36,15 @@ use crate::traits::{Obligation, PredicateObligation};
#[derive(Clone)]
pub struct CombineFields<'infcx, 'tcx> {
pub infcx: &'infcx InferCtxt<'tcx>,
// Immutable fields
pub trace: TypeTrace<'tcx>,
pub param_env: ty::ParamEnv<'tcx>,
pub goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
pub define_opaque_types: DefineOpaqueTypes,
// Mutable fields
//
// Adding any additional field likely requires
// changes to the cache of `TypeRelating`.
pub goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
}
impl<'infcx, 'tcx> CombineFields<'infcx, 'tcx> {

View File

@ -4,6 +4,7 @@ use rustc_middle::ty::relate::{
};
use rustc_middle::ty::{self, Ty, TyCtxt, TyVar};
use rustc_span::Span;
use rustc_type_ir::data_structures::DelayedSet;
use tracing::{debug, instrument};
use super::combine::CombineFields;
@ -13,9 +14,38 @@ use crate::infer::{DefineOpaqueTypes, InferCtxt, SubregionOrigin};
/// Enforce that `a` is equal to or a subtype of `b`.
pub struct TypeRelating<'combine, 'a, 'tcx> {
// Immutable except for the `InferCtxt` and the
// resulting nested `goals`.
fields: &'combine mut CombineFields<'a, 'tcx>,
// Immutable field.
structurally_relate_aliases: StructurallyRelateAliases,
// Mutable field.
ambient_variance: ty::Variance,
/// The cache only tracks the `ambient_variance` as it's the
/// only field which is mutable and which meaningfully changes
/// the result when relating types.
///
/// The cache does not track whether the state of the
/// `InferCtxt` has been changed or whether we've added any
/// obligations to `self.fields.goals`. Whether a goal is added
/// once or multiple times is not really meaningful.
///
/// Changes in the inference state may delay some type inference to
/// the next fulfillment loop. Given that this loop is already
/// necessary, this is also not a meaningful change. Consider
/// the following three relations:
/// ```text
/// Vec<?0> sub Vec<?1>
/// ?0 eq u32
/// Vec<?0> sub Vec<?1>
/// ```
/// Without a cache, the second `Vec<?0> sub Vec<?1>` would eagerly
/// constrain `?1` to `u32`. When using the cache entry from the
/// first time we've related these types, this only happens when
/// later proving the `Subtype(?0, ?1)` goal from the first relation.
cache: DelayedSet<(ty::Variance, Ty<'tcx>, Ty<'tcx>)>,
}
impl<'combine, 'infcx, 'tcx> TypeRelating<'combine, 'infcx, 'tcx> {
@ -24,7 +54,12 @@ impl<'combine, 'infcx, 'tcx> TypeRelating<'combine, 'infcx, 'tcx> {
structurally_relate_aliases: StructurallyRelateAliases,
ambient_variance: ty::Variance,
) -> TypeRelating<'combine, 'infcx, 'tcx> {
TypeRelating { fields: f, structurally_relate_aliases, ambient_variance }
TypeRelating {
fields: f,
structurally_relate_aliases,
ambient_variance,
cache: Default::default(),
}
}
}
@ -78,6 +113,10 @@ impl<'tcx> TypeRelation<TyCtxt<'tcx>> for TypeRelating<'_, '_, 'tcx> {
let a = infcx.shallow_resolve(a);
let b = infcx.shallow_resolve(b);
if self.cache.contains(&(self.ambient_variance, a, b)) {
return Ok(a);
}
match (a.kind(), b.kind()) {
(&ty::Infer(TyVar(a_id)), &ty::Infer(TyVar(b_id))) => {
match self.ambient_variance {
@ -160,6 +199,8 @@ impl<'tcx> TypeRelation<TyCtxt<'tcx>> for TypeRelating<'_, '_, 'tcx> {
}
}
assert!(self.cache.insert((self.ambient_variance, a, b)));
Ok(a)
}

View File

@ -2,6 +2,7 @@ use rustc_middle::bug;
use rustc_middle::ty::fold::{FallibleTypeFolder, TypeFolder, TypeSuperFoldable};
use rustc_middle::ty::visit::TypeVisitableExt;
use rustc_middle::ty::{self, Const, InferConst, Ty, TyCtxt, TypeFoldable};
use rustc_type_ir::data_structures::DelayedMap;
use super::{FixupError, FixupResult, InferCtxt};
@ -15,12 +16,15 @@ use super::{FixupError, FixupResult, InferCtxt};
/// points for correctness.
pub struct OpportunisticVarResolver<'a, 'tcx> {
infcx: &'a InferCtxt<'tcx>,
/// We're able to use a cache here as the folder does
/// not have any mutable state.
cache: DelayedMap<Ty<'tcx>, Ty<'tcx>>,
}
impl<'a, 'tcx> OpportunisticVarResolver<'a, 'tcx> {
#[inline]
pub fn new(infcx: &'a InferCtxt<'tcx>) -> Self {
OpportunisticVarResolver { infcx }
OpportunisticVarResolver { infcx, cache: Default::default() }
}
}
@ -33,9 +37,13 @@ impl<'a, 'tcx> TypeFolder<TyCtxt<'tcx>> for OpportunisticVarResolver<'a, 'tcx> {
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
if !t.has_non_region_infer() {
t // micro-optimize -- if there is nothing in this type that this fold affects...
} else if let Some(&ty) = self.cache.get(&t) {
return ty;
} else {
let t = self.infcx.shallow_resolve(t);
t.super_fold_with(self)
let shallow = self.infcx.shallow_resolve(t);
let res = shallow.super_fold_with(self);
assert!(self.cache.insert(t, res));
res
}
}

View File

@ -1,5 +1,6 @@
use rustc_data_structures::fx::FxIndexMap;
use rustc_hir::def_id::DefId;
use rustc_type_ir::data_structures::DelayedMap;
pub use rustc_type_ir::fold::{
FallibleTypeFolder, TypeFoldable, TypeFolder, TypeSuperFoldable, shift_region, shift_vars,
};
@ -131,12 +132,20 @@ impl<'a, 'tcx> TypeFolder<TyCtxt<'tcx>> for RegionFolder<'a, 'tcx> {
///////////////////////////////////////////////////////////////////////////
// Bound vars replacer
/// A delegate used when instantiating bound vars.
///
/// Any implementation must make sure that each bound variable always
/// gets mapped to the same result. `BoundVarReplacer` caches by using
/// a `DelayedMap` which does not cache the first few types it encounters.
pub trait BoundVarReplacerDelegate<'tcx> {
fn replace_region(&mut self, br: ty::BoundRegion) -> ty::Region<'tcx>;
fn replace_ty(&mut self, bt: ty::BoundTy) -> Ty<'tcx>;
fn replace_const(&mut self, bv: ty::BoundVar) -> ty::Const<'tcx>;
}
/// A simple delegate taking 3 mutable functions. The used functions must
/// always return the same result for each bound variable, no matter how
/// frequently they are called.
pub struct FnMutDelegate<'a, 'tcx> {
pub regions: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a),
pub types: &'a mut (dyn FnMut(ty::BoundTy) -> Ty<'tcx> + 'a),
@ -164,11 +173,15 @@ struct BoundVarReplacer<'tcx, D> {
current_index: ty::DebruijnIndex,
delegate: D,
/// This cache only tracks the `DebruijnIndex` and assumes that it does not matter
/// for the delegate how often its methods get used.
cache: DelayedMap<(ty::DebruijnIndex, Ty<'tcx>), Ty<'tcx>>,
}
impl<'tcx, D: BoundVarReplacerDelegate<'tcx>> BoundVarReplacer<'tcx, D> {
fn new(tcx: TyCtxt<'tcx>, delegate: D) -> Self {
BoundVarReplacer { tcx, current_index: ty::INNERMOST, delegate }
BoundVarReplacer { tcx, current_index: ty::INNERMOST, delegate, cache: Default::default() }
}
}
@ -197,8 +210,17 @@ where
debug_assert!(!ty.has_vars_bound_above(ty::INNERMOST));
ty::fold::shift_vars(self.tcx, ty, self.current_index.as_u32())
}
_ if t.has_vars_bound_at_or_above(self.current_index) => t.super_fold_with(self),
_ => t,
_ => {
if !t.has_vars_bound_at_or_above(self.current_index) {
t
} else if let Some(&t) = self.cache.get(&(self.current_index, t)) {
t
} else {
let res = t.super_fold_with(self);
assert!(self.cache.insert((self.current_index, t), res));
res
}
}
}
}

View File

@ -1,5 +1,6 @@
use std::cmp::Ordering;
use rustc_type_ir::data_structures::HashMap;
use rustc_type_ir::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
use rustc_type_ir::inherent::*;
use rustc_type_ir::visit::TypeVisitableExt;
@ -41,11 +42,20 @@ pub enum CanonicalizeMode {
pub struct Canonicalizer<'a, D: SolverDelegate<Interner = I>, I: Interner> {
delegate: &'a D,
// Immutable field.
canonicalize_mode: CanonicalizeMode,
// Mutable fields.
variables: &'a mut Vec<I::GenericArg>,
primitive_var_infos: Vec<CanonicalVarInfo<I>>,
variable_lookup_table: HashMap<I::GenericArg, usize>,
binder_index: ty::DebruijnIndex,
/// We only use the debruijn index during lookup. We don't need to
/// track the `variables` as each generic arg only results in a single
/// bound variable regardless of how many times it is encountered.
cache: HashMap<(ty::DebruijnIndex, I::Ty), I::Ty>,
}
impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> {
@ -60,12 +70,14 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> {
canonicalize_mode,
variables,
variable_lookup_table: Default::default(),
primitive_var_infos: Vec::new(),
binder_index: ty::INNERMOST,
cache: Default::default(),
};
let value = value.fold_with(&mut canonicalizer);
// FIXME: Restore these assertions. Should we uplift type flags?
assert!(!value.has_infer(), "unexpected infer in {value:?}");
assert!(!value.has_placeholders(), "unexpected placeholders in {value:?}");
@ -75,6 +87,37 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> {
Canonical { defining_opaque_types, max_universe, variables, value }
}
fn get_or_insert_bound_var(
&mut self,
arg: impl Into<I::GenericArg>,
canonical_var_info: CanonicalVarInfo<I>,
) -> ty::BoundVar {
// FIXME: 16 is made up and arbitrary. We should look at some
// perf data here.
let arg = arg.into();
let idx = if self.variables.len() > 16 {
if self.variable_lookup_table.is_empty() {
self.variable_lookup_table.extend(self.variables.iter().copied().zip(0..));
}
*self.variable_lookup_table.entry(arg).or_insert_with(|| {
let var = self.variables.len();
self.variables.push(arg);
self.primitive_var_infos.push(canonical_var_info);
var
})
} else {
self.variables.iter().position(|&v| v == arg).unwrap_or_else(|| {
let var = self.variables.len();
self.variables.push(arg);
self.primitive_var_infos.push(canonical_var_info);
var
})
};
ty::BoundVar::from(idx)
}
fn finalize(self) -> (ty::UniverseIndex, I::CanonicalVars) {
let mut var_infos = self.primitive_var_infos;
// See the rustc-dev-guide section about how we deal with universes
@ -124,8 +167,8 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> {
// - var_infos: [E0, U1, E2, U1, E1, E6, U6], curr_compressed_uv: 2, next_orig_uv: 6
// - var_infos: [E0, U1, E1, U1, E1, E3, U3], curr_compressed_uv: 2, next_orig_uv: -
//
// This algorithm runs in `O(n²)` where `n` is the number of different universe
// indices in the input. This should be fine as `n` is expected to be small.
// This algorithm runs in `O(mn)` where `n` is the number of different universes and
// `m` the number of variables. This should be fine as both are expected to be small.
let mut curr_compressed_uv = ty::UniverseIndex::ROOT;
let mut existential_in_new_uv = None;
let mut next_orig_uv = Some(ty::UniverseIndex::ROOT);
@ -185,14 +228,16 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> {
for var in var_infos.iter_mut() {
// We simply put all regions from the input into the highest
// compressed universe, so we only deal with them at the end.
if !var.is_region() && is_existential == var.is_existential() {
update_uv(var, orig_uv, is_existential)
if !var.is_region() {
if is_existential == var.is_existential() {
update_uv(var, orig_uv, is_existential)
}
}
}
}
}
// We uniquify regions and always put them into their own universe
// We put all regions into a separate universe.
let mut first_region = true;
for var in var_infos.iter_mut() {
if var.is_region() {
@ -208,93 +253,8 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> {
let var_infos = self.delegate.cx().mk_canonical_var_infos(&var_infos);
(curr_compressed_uv, var_infos)
}
}
impl<D: SolverDelegate<Interner = I>, I: Interner> TypeFolder<I> for Canonicalizer<'_, D, I> {
fn cx(&self) -> I {
self.delegate.cx()
}
fn fold_binder<T>(&mut self, t: ty::Binder<I, T>) -> ty::Binder<I, T>
where
T: TypeFoldable<I>,
{
self.binder_index.shift_in(1);
let t = t.super_fold_with(self);
self.binder_index.shift_out(1);
t
}
fn fold_region(&mut self, r: I::Region) -> I::Region {
let kind = match r.kind() {
ty::ReBound(..) => return r,
// We may encounter `ReStatic` in item signatures or the hidden type
// of an opaque. `ReErased` should only be encountered in the hidden
// type of an opaque for regions that are ignored for the purposes of
// captures.
//
// FIXME: We should investigate the perf implications of not uniquifying
// `ReErased`. We may be able to short-circuit registering region
// obligations if we encounter a `ReErased` on one side, for example.
ty::ReStatic | ty::ReErased | ty::ReError(_) => match self.canonicalize_mode {
CanonicalizeMode::Input => CanonicalVarKind::Region(ty::UniverseIndex::ROOT),
CanonicalizeMode::Response { .. } => return r,
},
ty::ReEarlyParam(_) | ty::ReLateParam(_) => match self.canonicalize_mode {
CanonicalizeMode::Input => CanonicalVarKind::Region(ty::UniverseIndex::ROOT),
CanonicalizeMode::Response { .. } => {
panic!("unexpected region in response: {r:?}")
}
},
ty::RePlaceholder(placeholder) => match self.canonicalize_mode {
// We canonicalize placeholder regions as existentials in query inputs.
CanonicalizeMode::Input => CanonicalVarKind::Region(ty::UniverseIndex::ROOT),
CanonicalizeMode::Response { max_input_universe } => {
// If we have a placeholder region inside of a query, it must be from
// a new universe.
if max_input_universe.can_name(placeholder.universe()) {
panic!("new placeholder in universe {max_input_universe:?}: {r:?}");
}
CanonicalVarKind::PlaceholderRegion(placeholder)
}
},
ty::ReVar(vid) => {
assert_eq!(
self.delegate.opportunistic_resolve_lt_var(vid),
r,
"region vid should have been resolved fully before canonicalization"
);
match self.canonicalize_mode {
CanonicalizeMode::Input => CanonicalVarKind::Region(ty::UniverseIndex::ROOT),
CanonicalizeMode::Response { .. } => {
CanonicalVarKind::Region(self.delegate.universe_of_lt(vid).unwrap())
}
}
}
};
let existing_bound_var = match self.canonicalize_mode {
CanonicalizeMode::Input => None,
CanonicalizeMode::Response { .. } => {
self.variables.iter().position(|&v| v == r.into()).map(ty::BoundVar::from)
}
};
let var = existing_bound_var.unwrap_or_else(|| {
let var = ty::BoundVar::from(self.variables.len());
self.variables.push(r.into());
self.primitive_var_infos.push(CanonicalVarInfo { kind });
var
});
Region::new_anon_bound(self.cx(), self.binder_index, var)
}
fn fold_ty(&mut self, t: I::Ty) -> I::Ty {
fn cached_fold_ty(&mut self, t: I::Ty) -> I::Ty {
let kind = match t.kind() {
ty::Infer(i) => match i {
ty::TyVar(vid) => {
@ -368,20 +328,98 @@ impl<D: SolverDelegate<Interner = I>, I: Interner> TypeFolder<I> for Canonicaliz
| ty::Tuple(_)
| ty::Alias(_, _)
| ty::Bound(_, _)
| ty::Error(_) => return t.super_fold_with(self),
| ty::Error(_) => {
return t.super_fold_with(self);
}
};
let var = ty::BoundVar::from(
self.variables.iter().position(|&v| v == t.into()).unwrap_or_else(|| {
let var = self.variables.len();
self.variables.push(t.into());
self.primitive_var_infos.push(CanonicalVarInfo { kind });
var
}),
);
let var = self.get_or_insert_bound_var(t, CanonicalVarInfo { kind });
Ty::new_anon_bound(self.cx(), self.binder_index, var)
}
}
impl<D: SolverDelegate<Interner = I>, I: Interner> TypeFolder<I> for Canonicalizer<'_, D, I> {
fn cx(&self) -> I {
self.delegate.cx()
}
fn fold_binder<T>(&mut self, t: ty::Binder<I, T>) -> ty::Binder<I, T>
where
T: TypeFoldable<I>,
{
self.binder_index.shift_in(1);
let t = t.super_fold_with(self);
self.binder_index.shift_out(1);
t
}
fn fold_region(&mut self, r: I::Region) -> I::Region {
let kind = match r.kind() {
ty::ReBound(..) => return r,
// We may encounter `ReStatic` in item signatures or the hidden type
// of an opaque. `ReErased` should only be encountered in the hidden
// type of an opaque for regions that are ignored for the purposes of
// captures.
//
// FIXME: We should investigate the perf implications of not uniquifying
// `ReErased`. We may be able to short-circuit registering region
// obligations if we encounter a `ReErased` on one side, for example.
ty::ReStatic | ty::ReErased | ty::ReError(_) => match self.canonicalize_mode {
CanonicalizeMode::Input => CanonicalVarKind::Region(ty::UniverseIndex::ROOT),
CanonicalizeMode::Response { .. } => return r,
},
ty::ReEarlyParam(_) | ty::ReLateParam(_) => match self.canonicalize_mode {
CanonicalizeMode::Input => CanonicalVarKind::Region(ty::UniverseIndex::ROOT),
CanonicalizeMode::Response { .. } => {
panic!("unexpected region in response: {r:?}")
}
},
ty::RePlaceholder(placeholder) => match self.canonicalize_mode {
// We canonicalize placeholder regions as existentials in query inputs.
CanonicalizeMode::Input => CanonicalVarKind::Region(ty::UniverseIndex::ROOT),
CanonicalizeMode::Response { max_input_universe } => {
// If we have a placeholder region inside of a query, it must be from
// a new universe.
if max_input_universe.can_name(placeholder.universe()) {
panic!("new placeholder in universe {max_input_universe:?}: {r:?}");
}
CanonicalVarKind::PlaceholderRegion(placeholder)
}
},
ty::ReVar(vid) => {
assert_eq!(
self.delegate.opportunistic_resolve_lt_var(vid),
r,
"region vid should have been resolved fully before canonicalization"
);
match self.canonicalize_mode {
CanonicalizeMode::Input => CanonicalVarKind::Region(ty::UniverseIndex::ROOT),
CanonicalizeMode::Response { .. } => {
CanonicalVarKind::Region(self.delegate.universe_of_lt(vid).unwrap())
}
}
}
};
let var = self.get_or_insert_bound_var(r, CanonicalVarInfo { kind });
Region::new_anon_bound(self.cx(), self.binder_index, var)
}
fn fold_ty(&mut self, t: I::Ty) -> I::Ty {
if let Some(&ty) = self.cache.get(&(self.binder_index, t)) {
ty
} else {
let res = self.cached_fold_ty(t);
assert!(self.cache.insert((self.binder_index, t), res).is_none());
res
}
}
fn fold_const(&mut self, c: I::Const) -> I::Const {
let kind = match c.kind() {
@ -419,14 +457,7 @@ impl<D: SolverDelegate<Interner = I>, I: Interner> TypeFolder<I> for Canonicaliz
| ty::ConstKind::Expr(_) => return c.super_fold_with(self),
};
let var = ty::BoundVar::from(
self.variables.iter().position(|&v| v == c.into()).unwrap_or_else(|| {
let var = self.variables.len();
self.variables.push(c.into());
self.primitive_var_infos.push(CanonicalVarInfo { kind });
var
}),
);
let var = self.get_or_insert_bound_var(c, CanonicalVarInfo { kind });
Const::new_anon_bound(self.cx(), self.binder_index, var)
}

View File

@ -1,3 +1,4 @@
use rustc_type_ir::data_structures::DelayedMap;
use rustc_type_ir::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
use rustc_type_ir::inherent::*;
use rustc_type_ir::visit::TypeVisitableExt;
@ -15,11 +16,14 @@ where
I: Interner,
{
delegate: &'a D,
/// We're able to use a cache here as the folder does not have any
/// mutable state.
cache: DelayedMap<I::Ty, I::Ty>,
}
impl<'a, D: SolverDelegate> EagerResolver<'a, D> {
pub fn new(delegate: &'a D) -> Self {
EagerResolver { delegate }
EagerResolver { delegate, cache: Default::default() }
}
}
@ -42,7 +46,12 @@ impl<D: SolverDelegate<Interner = I>, I: Interner> TypeFolder<I> for EagerResolv
ty::Infer(ty::FloatVar(vid)) => self.delegate.opportunistic_resolve_float_var(vid),
_ => {
if t.has_infer() {
t.super_fold_with(self)
if let Some(&ty) = self.cache.get(&t) {
return ty;
}
let res = t.super_fold_with(self);
assert!(self.cache.insert(t, res));
res
} else {
t
}

View File

@ -3,7 +3,7 @@ use std::ops::ControlFlow;
use derive_where::derive_where;
#[cfg(feature = "nightly")]
use rustc_macros::{HashStable_NoContext, TyDecodable, TyEncodable};
use rustc_type_ir::data_structures::ensure_sufficient_stack;
use rustc_type_ir::data_structures::{HashMap, HashSet, ensure_sufficient_stack};
use rustc_type_ir::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
use rustc_type_ir::inherent::*;
use rustc_type_ir::relate::Relate;
@ -579,18 +579,16 @@ where
#[instrument(level = "trace", skip(self))]
pub(super) fn add_normalizes_to_goal(&mut self, mut goal: Goal<I, ty::NormalizesTo<I>>) {
goal.predicate = goal
.predicate
.fold_with(&mut ReplaceAliasWithInfer { ecx: self, param_env: goal.param_env });
goal.predicate =
goal.predicate.fold_with(&mut ReplaceAliasWithInfer::new(self, goal.param_env));
self.inspect.add_normalizes_to_goal(self.delegate, self.max_input_universe, goal);
self.nested_goals.normalizes_to_goals.push(goal);
}
#[instrument(level = "debug", skip(self))]
pub(super) fn add_goal(&mut self, source: GoalSource, mut goal: Goal<I, I::Predicate>) {
goal.predicate = goal
.predicate
.fold_with(&mut ReplaceAliasWithInfer { ecx: self, param_env: goal.param_env });
goal.predicate =
goal.predicate.fold_with(&mut ReplaceAliasWithInfer::new(self, goal.param_env));
self.inspect.add_goal(self.delegate, self.max_input_universe, source, goal);
self.nested_goals.goals.push((source, goal));
}
@ -654,6 +652,7 @@ where
term: I::Term,
universe_of_term: ty::UniverseIndex,
delegate: &'a D,
cache: HashSet<I::Ty>,
}
impl<D: SolverDelegate<Interner = I>, I: Interner> ContainsTermOrNotNameable<'_, D, I> {
@ -671,6 +670,10 @@ where
{
type Result = ControlFlow<()>;
fn visit_ty(&mut self, t: I::Ty) -> Self::Result {
if self.cache.contains(&t) {
return ControlFlow::Continue(());
}
match t.kind() {
ty::Infer(ty::TyVar(vid)) => {
if let ty::TermKind::Ty(term) = self.term.kind() {
@ -683,17 +686,18 @@ where
}
}
self.check_nameable(self.delegate.universe_of_ty(vid).unwrap())
self.check_nameable(self.delegate.universe_of_ty(vid).unwrap())?;
}
ty::Placeholder(p) => self.check_nameable(p.universe()),
ty::Placeholder(p) => self.check_nameable(p.universe())?,
_ => {
if t.has_non_region_infer() || t.has_placeholders() {
t.super_visit_with(self)
} else {
ControlFlow::Continue(())
t.super_visit_with(self)?
}
}
}
assert!(self.cache.insert(t));
ControlFlow::Continue(())
}
fn visit_const(&mut self, c: I::Const) -> Self::Result {
@ -728,6 +732,7 @@ where
delegate: self.delegate,
universe_of_term,
term: goal.predicate.term,
cache: Default::default(),
};
goal.predicate.alias.visit_with(&mut visitor).is_continue()
&& goal.param_env.visit_with(&mut visitor).is_continue()
@ -1017,6 +1022,17 @@ where
{
ecx: &'me mut EvalCtxt<'a, D>,
param_env: I::ParamEnv,
cache: HashMap<I::Ty, I::Ty>,
}
impl<'me, 'a, D, I> ReplaceAliasWithInfer<'me, 'a, D, I>
where
D: SolverDelegate<Interner = I>,
I: Interner,
{
fn new(ecx: &'me mut EvalCtxt<'a, D>, param_env: I::ParamEnv) -> Self {
ReplaceAliasWithInfer { ecx, param_env, cache: Default::default() }
}
}
impl<D, I> TypeFolder<I> for ReplaceAliasWithInfer<'_, '_, D, I>
@ -1043,7 +1059,17 @@ where
);
infer_ty
}
_ => ty.super_fold_with(self),
_ => {
if !ty.has_aliases() {
ty
} else if let Some(&entry) = self.cache.get(&ty) {
return entry;
} else {
let res = ty.super_fold_with(self);
assert!(self.cache.insert(ty, res).is_none());
res
}
}
}
}

View File

@ -0,0 +1,92 @@
use std::hash::Hash;
use crate::data_structures::{HashMap, HashSet};
const CACHE_CUTOFF: u32 = 32;
/// A hashmap which only starts hashing after ignoring the first few inputs.
///
/// This is used in type folders as in nearly all cases caching is not worth it
/// as nearly all folded types are tiny. However, there are very rare incredibly
/// large types for which caching is necessary to avoid hangs.
#[derive(Debug)]
pub struct DelayedMap<K, V> {
cache: HashMap<K, V>,
count: u32,
}
impl<K, V> Default for DelayedMap<K, V> {
fn default() -> Self {
DelayedMap { cache: Default::default(), count: 0 }
}
}
impl<K: Hash + Eq, V> DelayedMap<K, V> {
#[inline(always)]
pub fn insert(&mut self, key: K, value: V) -> bool {
if self.count >= CACHE_CUTOFF {
self.cold_insert(key, value)
} else {
self.count += 1;
true
}
}
#[cold]
#[inline(never)]
fn cold_insert(&mut self, key: K, value: V) -> bool {
self.cache.insert(key, value).is_none()
}
#[inline(always)]
pub fn get(&self, key: &K) -> Option<&V> {
if self.cache.is_empty() { None } else { self.cold_get(key) }
}
#[cold]
#[inline(never)]
fn cold_get(&self, key: &K) -> Option<&V> {
self.cache.get(key)
}
}
#[derive(Debug)]
pub struct DelayedSet<T> {
cache: HashSet<T>,
count: u32,
}
impl<T> Default for DelayedSet<T> {
fn default() -> Self {
DelayedSet { cache: Default::default(), count: 0 }
}
}
impl<T: Hash + Eq> DelayedSet<T> {
#[inline(always)]
pub fn insert(&mut self, value: T) -> bool {
if self.count >= CACHE_CUTOFF {
self.cold_insert(value)
} else {
self.count += 1;
true
}
}
#[cold]
#[inline(never)]
fn cold_insert(&mut self, value: T) -> bool {
self.cache.insert(value)
}
#[inline(always)]
pub fn contains(&self, value: &T) -> bool {
!self.cache.is_empty() && self.cold_contains(value)
}
#[cold]
#[inline(never)]
fn cold_contains(&self, value: &T) -> bool {
self.cache.contains(value)
}
}

View File

@ -6,6 +6,8 @@ pub use rustc_hash::{FxHashMap as HashMap, FxHashSet as HashSet};
pub type IndexMap<K, V> = indexmap::IndexMap<K, V, BuildHasherDefault<FxHasher>>;
pub type IndexSet<V> = indexmap::IndexSet<V, BuildHasherDefault<FxHasher>>;
mod delayed_map;
#[cfg(feature = "nightly")]
mod impl_ {
pub use rustc_data_structures::sso::{SsoHashMap, SsoHashSet};
@ -24,4 +26,5 @@ mod impl_ {
}
}
pub use delayed_map::{DelayedMap, DelayedSet};
pub use impl_::*;

View File

@ -0,0 +1,30 @@
//@ check-pass
//@ revisions: ai ia ii
//@ compile-flags: -Znext-solver=coherence
// Regression test for nalgebra hang <https://github.com/rust-lang/rust/issues/130056>.
#![feature(lazy_type_alias)]
#![allow(incomplete_features)]
type Id<T: ?Sized> = T;
trait NotImplemented {}
struct W<'a, T: ?Sized, U: ?Sized>(&'a (), *const T, *const U);
trait Trait {
type Assoc: ?Sized;
}
impl<'a, T: ?Sized + Trait> Trait for W<'a, T, T> {
#[cfg(ai)]
type Assoc = W<'a, T::Assoc, Id<T::Assoc>>;
#[cfg(ia)]
type Assoc = W<'a, Id<T::Assoc>, T::Assoc>;
#[cfg(ii)]
type Assoc = W<'a, Id<T::Assoc>, Id<T::Assoc>>;
}
trait Overlap<T: ?Sized> {}
impl<'a, T: ?Sized> Overlap<T> for W<'a, T, T> {}
impl<T: ?Sized + Trait + NotImplemented> Overlap<T::Assoc> for T {}
fn main() {}

View File

@ -1,6 +1,8 @@
//@ check-pass
//@ revisions: current next
//[next]@ compile-flags: -Znext-solver
//@ revisions: ai_current ai_next ia_current ia_next ii_current ii_next
//@[ai_next] compile-flags: -Znext-solver
//@[ia_next] compile-flags: -Znext-solver
//@[ii_next] compile-flags: -Znext-solver
// Regression test for nalgebra hang <https://github.com/rust-lang/rust/issues/130056>.
@ -15,7 +17,12 @@ trait Trait {
type Assoc: ?Sized;
}
impl<T: ?Sized + Trait> Trait for W<T, T> {
#[cfg(any(ai_current, ai_next))]
type Assoc = W<T::Assoc, Id<T::Assoc>>;
#[cfg(any(ia_current, ia_next))]
type Assoc = W<Id<T::Assoc>, T::Assoc>;
#[cfg(any(ii_current, ii_next))]
type Assoc = W<Id<T::Assoc>, Id<T::Assoc>>;
}
trait Overlap<T: ?Sized> {}

View File

@ -0,0 +1,35 @@
//@ check-pass
//@ revisions: current next
//@[next] compile-flags: -Znext-solver
// Regression test for nalgebra hang from
// https://github.com/rust-lang/rust/pull/130654#issuecomment-2365465354
trait HasAlias {}
struct Dummy;
trait DummyTrait {
type DummyType<T: HasAlias>;
}
impl DummyTrait for Dummy {
type DummyType<T: HasAlias> = T;
}
type AliasOf<T> = <Dummy as DummyTrait>::DummyType<T>;
struct Matrix<T, S>(T, S);
type OMatrix<T> = Matrix<T, AliasOf<T>>;
impl<T: HasAlias> HasAlias for OMatrix<T> {}
trait SimdValue {
type Element;
}
impl<T: HasAlias + SimdValue<Element: HasAlias>> SimdValue for OMatrix<T> {
type Element = OMatrix<T::Element>;
}
trait Unimplemented {}
pub trait MyFrom<T> {}
impl<T: Unimplemented> MyFrom<T> for T {}
impl<T: SimdValue<Element: HasAlias>> MyFrom<T> for OMatrix<T::Element> {}
fn main() {}