mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-26 00:34:06 +00:00
Rollup merge of #22785 - nikomatsakis:issue-21750-normalization-with-regions, r=pnkfelix
Two changes: 1. Make traits with assoc types invariant w/r/t their inputs. 2. Fully normalize parameter environments, including any region variables (which were being overlooked). The former supports the latter, but also just seems like a reasonably good idea. Fixes #21750 cc @edwardw r? @pnkfelix
This commit is contained in:
commit
3a49c3bd9c
@ -265,13 +265,7 @@ pub trait Combine<'tcx> : Sized {
|
||||
Err(ty::terr_projection_name_mismatched(
|
||||
expected_found(self, a.item_name, b.item_name)))
|
||||
} else {
|
||||
// Note that the trait refs for the projection must be
|
||||
// *equal*. This is because there is no inherent
|
||||
// relationship between `<T as Foo>::Bar` and `<U as
|
||||
// Foo>::Bar` that we can derive based on how `T` relates
|
||||
// to `U`. Issue #21726 contains further discussion and
|
||||
// in-depth examples.
|
||||
let trait_ref = try!(self.equate().trait_refs(&*a.trait_ref, &*b.trait_ref));
|
||||
let trait_ref = try!(self.trait_refs(&*a.trait_ref, &*b.trait_ref));
|
||||
Ok(ty::ProjectionTy { trait_ref: Rc::new(trait_ref), item_name: a.item_name })
|
||||
}
|
||||
}
|
||||
|
@ -52,9 +52,16 @@ fn overlap(selcx: &mut SelectionContext,
|
||||
b_def_id: ast::DefId)
|
||||
-> bool
|
||||
{
|
||||
debug!("overlap(a_def_id={}, b_def_id={})",
|
||||
a_def_id.repr(selcx.tcx()),
|
||||
b_def_id.repr(selcx.tcx()));
|
||||
|
||||
let (a_trait_ref, a_obligations) = impl_trait_ref_and_oblig(selcx, a_def_id);
|
||||
let (b_trait_ref, b_obligations) = impl_trait_ref_and_oblig(selcx, b_def_id);
|
||||
|
||||
debug!("overlap: a_trait_ref={}", a_trait_ref.repr(selcx.tcx()));
|
||||
debug!("overlap: b_trait_ref={}", b_trait_ref.repr(selcx.tcx()));
|
||||
|
||||
// Does `a <: b` hold? If not, no overlap.
|
||||
if let Err(_) = infer::mk_sub_poly_trait_refs(selcx.infcx(),
|
||||
true,
|
||||
@ -64,10 +71,20 @@ fn overlap(selcx: &mut SelectionContext,
|
||||
return false;
|
||||
}
|
||||
|
||||
debug!("overlap: subtraitref check succeeded");
|
||||
|
||||
// Are any of the obligations unsatisfiable? If so, no overlap.
|
||||
a_obligations.iter()
|
||||
.chain(b_obligations.iter())
|
||||
.all(|o| selcx.evaluate_obligation(o))
|
||||
let opt_failing_obligation =
|
||||
a_obligations.iter()
|
||||
.chain(b_obligations.iter())
|
||||
.find(|o| !selcx.evaluate_obligation(o));
|
||||
|
||||
if let Some(failing_obligation) = opt_failing_obligation {
|
||||
debug!("overlap: obligation unsatisfiable {}", failing_obligation.repr(selcx.tcx()));
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Instantiate fresh variables for all bound parameters of the impl
|
||||
|
@ -18,7 +18,7 @@ pub use self::ObligationCauseCode::*;
|
||||
use middle::subst;
|
||||
use middle::ty::{self, HasProjectionTypes, Ty};
|
||||
use middle::ty_fold::TypeFoldable;
|
||||
use middle::infer::{self, InferCtxt};
|
||||
use middle::infer::{self, fixup_err_to_string, InferCtxt};
|
||||
use std::slice::Iter;
|
||||
use std::rc::Rc;
|
||||
use syntax::ast;
|
||||
@ -395,53 +395,64 @@ pub fn type_known_to_meet_builtin_bound<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
|
||||
}
|
||||
}
|
||||
|
||||
/// Normalizes the parameter environment, reporting errors if they occur.
|
||||
pub fn normalize_param_env_or_error<'a,'tcx>(unnormalized_env: ty::ParameterEnvironment<'a,'tcx>,
|
||||
cause: ObligationCause<'tcx>)
|
||||
-> ty::ParameterEnvironment<'a,'tcx>
|
||||
{
|
||||
match normalize_param_env(&unnormalized_env, cause) {
|
||||
Ok(p) => p,
|
||||
Err(errors) => {
|
||||
// I'm not wild about reporting errors here; I'd prefer to
|
||||
// have the errors get reported at a defined place (e.g.,
|
||||
// during typeck). Instead I have all parameter
|
||||
// environments, in effect, going through this function
|
||||
// and hence potentially reporting errors. This ensurse of
|
||||
// course that we never forget to normalize (the
|
||||
// alternative seemed like it would involve a lot of
|
||||
// manual invocations of this fn -- and then we'd have to
|
||||
// deal with the errors at each of those sites).
|
||||
//
|
||||
// In any case, in practice, typeck constructs all the
|
||||
// parameter environments once for every fn as it goes,
|
||||
// and errors will get reported then; so after typeck we
|
||||
// can be sure that no errors should occur.
|
||||
let infcx = infer::new_infer_ctxt(unnormalized_env.tcx);
|
||||
report_fulfillment_errors(&infcx, &errors);
|
||||
// I'm not wild about reporting errors here; I'd prefer to
|
||||
// have the errors get reported at a defined place (e.g.,
|
||||
// during typeck). Instead I have all parameter
|
||||
// environments, in effect, going through this function
|
||||
// and hence potentially reporting errors. This ensurse of
|
||||
// course that we never forget to normalize (the
|
||||
// alternative seemed like it would involve a lot of
|
||||
// manual invocations of this fn -- and then we'd have to
|
||||
// deal with the errors at each of those sites).
|
||||
//
|
||||
// In any case, in practice, typeck constructs all the
|
||||
// parameter environments once for every fn as it goes,
|
||||
// and errors will get reported then; so after typeck we
|
||||
// can be sure that no errors should occur.
|
||||
|
||||
// Normalized failed? use what they gave us, it's better than nothing.
|
||||
unnormalized_env
|
||||
}
|
||||
}
|
||||
}
|
||||
let tcx = unnormalized_env.tcx;
|
||||
let span = cause.span;
|
||||
let body_id = cause.body_id;
|
||||
|
||||
pub fn normalize_param_env<'a,'tcx>(param_env: &ty::ParameterEnvironment<'a,'tcx>,
|
||||
cause: ObligationCause<'tcx>)
|
||||
-> Result<ty::ParameterEnvironment<'a,'tcx>,
|
||||
Vec<FulfillmentError<'tcx>>>
|
||||
{
|
||||
let tcx = param_env.tcx;
|
||||
|
||||
debug!("normalize_param_env(param_env={})",
|
||||
param_env.repr(tcx));
|
||||
debug!("normalize_param_env_or_error(unnormalized_env={})",
|
||||
unnormalized_env.repr(tcx));
|
||||
|
||||
let infcx = infer::new_infer_ctxt(tcx);
|
||||
let predicates = try!(fully_normalize(&infcx, param_env, cause, ¶m_env.caller_bounds));
|
||||
let predicates = match fully_normalize(&infcx, &unnormalized_env, cause,
|
||||
&unnormalized_env.caller_bounds) {
|
||||
Ok(predicates) => predicates,
|
||||
Err(errors) => {
|
||||
report_fulfillment_errors(&infcx, &errors);
|
||||
return unnormalized_env; // an unnormalized env is better than nothing
|
||||
}
|
||||
};
|
||||
|
||||
debug!("normalize_param_env: predicates={}",
|
||||
infcx.resolve_regions_and_report_errors(body_id);
|
||||
let predicates = match infcx.fully_resolve(&predicates) {
|
||||
Ok(predicates) => predicates,
|
||||
Err(fixup_err) => {
|
||||
// If we encounter a fixup error, it means that some type
|
||||
// variable wound up unconstrained. I actually don't know
|
||||
// if this can happen, and I certainly don't expect it to
|
||||
// happen often, but if it did happen it probably
|
||||
// represents a legitimate failure due to some kind of
|
||||
// unconstrained variable, and it seems better not to ICE,
|
||||
// all things considered.
|
||||
let err_msg = fixup_err_to_string(fixup_err);
|
||||
tcx.sess.span_err(span, &err_msg);
|
||||
return unnormalized_env; // an unnormalized env is better than nothing
|
||||
}
|
||||
};
|
||||
|
||||
debug!("normalize_param_env_or_error: predicates={}",
|
||||
predicates.repr(tcx));
|
||||
|
||||
Ok(param_env.with_caller_bounds(predicates))
|
||||
unnormalized_env.with_caller_bounds(predicates)
|
||||
}
|
||||
|
||||
pub fn fully_normalize<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
|
||||
@ -453,8 +464,7 @@ pub fn fully_normalize<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
|
||||
{
|
||||
let tcx = closure_typer.tcx();
|
||||
|
||||
debug!("normalize_param_env(value={})",
|
||||
value.repr(tcx));
|
||||
debug!("normalize_param_env(value={})", value.repr(tcx));
|
||||
|
||||
let mut selcx = &mut SelectionContext::new(infcx, closure_typer);
|
||||
let mut fulfill_cx = FulfillmentContext::new();
|
||||
@ -468,8 +478,7 @@ pub fn fully_normalize<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
|
||||
}
|
||||
try!(fulfill_cx.select_all_or_error(infcx, closure_typer));
|
||||
let resolved_value = infcx.resolve_type_vars_if_possible(&normalized_value);
|
||||
debug!("normalize_param_env: resolved_value={}",
|
||||
resolved_value.repr(tcx));
|
||||
debug!("normalize_param_env: resolved_value={}", resolved_value.repr(tcx));
|
||||
Ok(resolved_value)
|
||||
}
|
||||
|
||||
|
@ -203,6 +203,56 @@
|
||||
//! failure, but rather because the target type `Foo<Y>` is itself just
|
||||
//! not well-formed. Basically we get to assume well-formedness of all
|
||||
//! types involved before considering variance.
|
||||
//!
|
||||
//! ### Associated types
|
||||
//!
|
||||
//! Any trait with an associated type is invariant with respect to all
|
||||
//! of its inputs. To see why this makes sense, consider what
|
||||
//! subtyping for a trait reference means:
|
||||
//!
|
||||
//! <T as Trait> <: <U as Trait>
|
||||
//!
|
||||
//! means that if I know that `T as Trait`,
|
||||
//! I also know that `U as
|
||||
//! Trait`. Moreover, if you think of it as
|
||||
//! dictionary passing style, it means that
|
||||
//! a dictionary for `<T as Trait>` is safe
|
||||
//! to use where a dictionary for `<U as
|
||||
//! Trait>` is expected.
|
||||
//!
|
||||
//! The problem is that when you can
|
||||
//! project types out from `<T as Trait>`,
|
||||
//! the relationship to types projected out
|
||||
//! of `<U as Trait>` is completely unknown
|
||||
//! unless `T==U` (see #21726 for more
|
||||
//! details). Making `Trait` invariant
|
||||
//! ensures that this is true.
|
||||
//!
|
||||
//! *Historical note: we used to preserve this invariant another way,
|
||||
//! by tweaking the subtyping rules and requiring that when a type `T`
|
||||
//! appeared as part of a projection, that was considered an invariant
|
||||
//! location, but this version does away with the need for those
|
||||
//! somewhat "special-case-feeling" rules.*
|
||||
//!
|
||||
//! Another related reason is that if we didn't make traits with
|
||||
//! associated types invariant, then projection is no longer a
|
||||
//! function with a single result. Consider:
|
||||
//!
|
||||
//! ```
|
||||
//! trait Identity { type Out; fn foo(&self); }
|
||||
//! impl<T> Identity for T { type Out = T; ... }
|
||||
//! ```
|
||||
//!
|
||||
//! Now if I have `<&'static () as Identity>::Out`, this can be
|
||||
//! validly derived as `&'a ()` for any `'a`:
|
||||
//!
|
||||
//! <&'a () as Identity> <: <&'static () as Identity>
|
||||
//! if &'static () < : &'a () -- Identity is contravariant in Self
|
||||
//! if 'static : 'a -- Subtyping rules for relations
|
||||
//!
|
||||
//! This change otoh means that `<'static () as Identity>::Out` is
|
||||
//! always `&'static ()` (which might then be upcast to `'a ()`,
|
||||
//! separately). This was helpful in solving #21750.
|
||||
|
||||
use self::VarianceTerm::*;
|
||||
use self::ParamKind::*;
|
||||
@ -613,7 +663,18 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ConstraintContext<'a, 'tcx> {
|
||||
&method.fty.sig,
|
||||
self.covariant);
|
||||
}
|
||||
ty::TypeTraitItem(_) => {}
|
||||
ty::TypeTraitItem(ref data) => {
|
||||
// Any trait with an associated type is
|
||||
// invariant with respect to all of its
|
||||
// inputs. See length discussion in the comment
|
||||
// on this module.
|
||||
let projection_ty = ty::mk_projection(tcx,
|
||||
trait_def.trait_ref.clone(),
|
||||
data.name);
|
||||
self.add_constraints_from_ty(&trait_def.generics,
|
||||
projection_ty,
|
||||
self.invariant);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -893,7 +954,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
|
||||
trait_def.generics.types.as_slice(),
|
||||
trait_def.generics.regions.as_slice(),
|
||||
trait_ref.substs,
|
||||
self.invariant);
|
||||
variance);
|
||||
}
|
||||
|
||||
ty::ty_trait(ref data) => {
|
||||
|
Loading…
Reference in New Issue
Block a user