mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-01 23:12:02 +00:00
Remove *most* mentions of phantom fns and variance on traits. Leave some
comments and also leave the entries in the variance tables for now.
This commit is contained in:
parent
628d715ff4
commit
38fdd50e0b
@ -276,12 +276,15 @@ macro_rules! impls{
|
||||
#[unstable(feature = "core", reason = "deprecated")]
|
||||
#[deprecated(since = "1.0.0", reason = "No longer needed")]
|
||||
#[allow(deprecated)]
|
||||
#[cfg(stage0)]
|
||||
pub trait MarkerTrait : PhantomFn<Self,Self> { }
|
||||
// ~~~~~ <-- FIXME(#22806)?
|
||||
//
|
||||
// Marker trait has been made invariant so as to avoid inf recursion,
|
||||
// but we should ideally solve the underlying problem. That's a bit
|
||||
// complicated.
|
||||
|
||||
/// `MarkerTrait` is deprecated and no longer needed.
|
||||
#[unstable(feature = "core", reason = "deprecated")]
|
||||
#[deprecated(since = "1.0.0", reason = "No longer needed")]
|
||||
#[allow(deprecated)]
|
||||
#[cfg(not(stage0))]
|
||||
pub trait MarkerTrait { }
|
||||
|
||||
#[allow(deprecated)]
|
||||
impl<T:?Sized> MarkerTrait for T { }
|
||||
@ -290,7 +293,20 @@ impl<T:?Sized> MarkerTrait for T { }
|
||||
#[lang="phantom_fn"]
|
||||
#[unstable(feature = "core", reason = "deprecated")]
|
||||
#[deprecated(since = "1.0.0", reason = "No longer needed")]
|
||||
pub trait PhantomFn<A:?Sized,R:?Sized=()> { }
|
||||
#[cfg(stage0)]
|
||||
pub trait PhantomFn<A:?Sized,R:?Sized=()> {
|
||||
}
|
||||
|
||||
/// `PhantomFn` is a deprecated marker trait that is no longer needed.
|
||||
#[unstable(feature = "core", reason = "deprecated")]
|
||||
#[deprecated(since = "1.0.0", reason = "No longer needed")]
|
||||
#[cfg(not(stage0))]
|
||||
pub trait PhantomFn<A:?Sized,R:?Sized=()> {
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
#[cfg(not(stage0))]
|
||||
impl<A:?Sized,R:?Sized,T:?Sized> PhantomFn<A,R> for T { }
|
||||
|
||||
/// `PhantomData<T>` allows you to describe that a type acts as if it stores a value of type `T`,
|
||||
/// even though it does not. This allows you to inform the compiler about certain safety properties
|
||||
|
@ -321,7 +321,6 @@ lets_do_this! {
|
||||
ExchangeHeapLangItem, "exchange_heap", exchange_heap;
|
||||
OwnedBoxLangItem, "owned_box", owned_box;
|
||||
|
||||
PhantomFnItem, "phantom_fn", phantom_fn;
|
||||
PhantomDataItem, "phantom_data", phantom_data;
|
||||
|
||||
// Deprecated:
|
||||
|
@ -138,11 +138,10 @@ fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>,
|
||||
match predicate {
|
||||
ty::Predicate::Trait(ref data) => {
|
||||
// In the case of a trait predicate, we can skip the "self" type.
|
||||
Some(data.def_id()) != tcx.lang_items.phantom_fn() &&
|
||||
data.0.trait_ref.substs.types.get_slice(TypeSpace)
|
||||
.iter()
|
||||
.cloned()
|
||||
.any(is_self)
|
||||
data.0.trait_ref.substs.types.get_slice(TypeSpace)
|
||||
.iter()
|
||||
.cloned()
|
||||
.any(is_self)
|
||||
}
|
||||
ty::Predicate::Projection(..) |
|
||||
ty::Predicate::TypeOutlives(..) |
|
||||
|
@ -836,14 +836,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
||||
ambiguous: false
|
||||
};
|
||||
|
||||
// Check for the `PhantomFn` trait. This is really just a
|
||||
// special annotation that is *always* considered to match, no
|
||||
// matter what the type parameters are etc.
|
||||
if self.tcx().lang_items.phantom_fn() == Some(obligation.predicate.def_id()) {
|
||||
candidates.vec.push(PhantomFnCandidate);
|
||||
return Ok(candidates);
|
||||
}
|
||||
|
||||
// Other bounds. Consider both in-scope bounds from fn decl
|
||||
// and applicable impls. There is a certain set of precedence rules here.
|
||||
|
||||
|
@ -117,15 +117,10 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
|
||||
|
||||
self.check_variances_for_type_defn(item, ast_generics);
|
||||
}
|
||||
ast::ItemTrait(_, ref ast_generics, _, ref items) => {
|
||||
ast::ItemTrait(_, _, _, ref items) => {
|
||||
let trait_predicates =
|
||||
ty::lookup_predicates(ccx.tcx, local_def(item.id));
|
||||
reject_non_type_param_bounds(
|
||||
ccx.tcx,
|
||||
item.span,
|
||||
&trait_predicates);
|
||||
self.check_variances(item, ast_generics, &trait_predicates,
|
||||
self.tcx().lang_items.phantom_fn());
|
||||
reject_non_type_param_bounds(ccx.tcx, item.span, &trait_predicates);
|
||||
if ty::trait_has_default_impl(ccx.tcx, local_def(item.id)) {
|
||||
if !items.is_empty() {
|
||||
ccx.tcx.sess.span_err(
|
||||
@ -287,30 +282,7 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
|
||||
ast_generics: &ast::Generics)
|
||||
{
|
||||
let item_def_id = local_def(item.id);
|
||||
let predicates = ty::lookup_predicates(self.tcx(), item_def_id);
|
||||
self.check_variances(item,
|
||||
ast_generics,
|
||||
&predicates,
|
||||
self.tcx().lang_items.phantom_data());
|
||||
}
|
||||
|
||||
fn check_variances(&self,
|
||||
item: &ast::Item,
|
||||
ast_generics: &ast::Generics,
|
||||
ty_predicates: &ty::GenericPredicates<'tcx>,
|
||||
suggested_marker_id: Option<ast::DefId>)
|
||||
{
|
||||
let variance_lang_items = &[
|
||||
self.tcx().lang_items.phantom_fn(),
|
||||
self.tcx().lang_items.phantom_data(),
|
||||
];
|
||||
|
||||
let item_def_id = local_def(item.id);
|
||||
let is_lang_item = variance_lang_items.iter().any(|n| *n == Some(item_def_id));
|
||||
if is_lang_item {
|
||||
return;
|
||||
}
|
||||
|
||||
let ty_predicates = ty::lookup_predicates(self.tcx(), item_def_id);
|
||||
let variances = ty::item_variances(self.tcx(), item_def_id);
|
||||
|
||||
let mut constrained_parameters: HashSet<_> =
|
||||
@ -331,7 +303,7 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
|
||||
continue;
|
||||
}
|
||||
let span = self.ty_param_span(ast_generics, item, space, index);
|
||||
self.report_bivariance(span, param_ty.name, suggested_marker_id);
|
||||
self.report_bivariance(span, param_ty.name);
|
||||
}
|
||||
|
||||
for (space, index, &variance) in variances.regions.iter_enumerated() {
|
||||
@ -342,7 +314,7 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
|
||||
assert_eq!(space, TypeSpace);
|
||||
let span = ast_generics.lifetimes[index].lifetime.span;
|
||||
let name = ast_generics.lifetimes[index].lifetime.name;
|
||||
self.report_bivariance(span, name, suggested_marker_id);
|
||||
self.report_bivariance(span, name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -377,14 +349,14 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
|
||||
|
||||
fn report_bivariance(&self,
|
||||
span: Span,
|
||||
param_name: ast::Name,
|
||||
suggested_marker_id: Option<ast::DefId>)
|
||||
param_name: ast::Name)
|
||||
{
|
||||
self.tcx().sess.span_err(
|
||||
span,
|
||||
&format!("parameter `{}` is never used",
|
||||
param_name.user_string(self.tcx())));
|
||||
|
||||
let suggested_marker_id = self.tcx().lang_items.phantom_data();
|
||||
match suggested_marker_id {
|
||||
Some(def_id) => {
|
||||
self.tcx().sess.fileline_help(
|
||||
|
@ -18,34 +18,121 @@
|
||||
//! defined on type `X`, we only consider the definition of the type `X`
|
||||
//! and the definitions of any types it references.
|
||||
//!
|
||||
//! We only infer variance for type parameters found on *types*: structs,
|
||||
//! enums, and traits. We do not infer variance for type parameters found
|
||||
//! on fns or impls. This is because those things are not type definitions
|
||||
//! and variance doesn't really make sense in that context.
|
||||
//!
|
||||
//! It is worth covering what variance means in each case. For structs and
|
||||
//! enums, I think it is fairly straightforward. The variance of the type
|
||||
//! We only infer variance for type parameters found on *data types*
|
||||
//! like structs and enums. In these cases, there is fairly straightforward
|
||||
//! explanation for what variance means. The variance of the type
|
||||
//! or lifetime parameters defines whether `T<A>` is a subtype of `T<B>`
|
||||
//! (resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B`
|
||||
//! (resp. `'a` and `'b`). (FIXME #3598 -- we do not currently make use of
|
||||
//! the variances we compute for type parameters.)
|
||||
//! (resp. `'a` and `'b`).
|
||||
//!
|
||||
//! ### Variance on traits
|
||||
//! We do not infer variance for type parameters found on traits, fns,
|
||||
//! or impls. Variance on trait parameters can make indeed make sense
|
||||
//! (and we used to compute it) but it is actually rather subtle in
|
||||
//! meaning and not that useful in practice, so we removed it. See the
|
||||
//! addendum for some details. Variances on fn/impl parameters, otoh,
|
||||
//! doesn't make sense because these parameters are instantiated and
|
||||
//! then forgotten, they don't persist in types or compiled
|
||||
//! byproducts.
|
||||
//!
|
||||
//! The meaning of variance for trait parameters is more subtle and worth
|
||||
//! expanding upon. There are in fact two uses of the variance values we
|
||||
//! compute.
|
||||
//! ### The algorithm
|
||||
//!
|
||||
//! #### Trait variance and object types
|
||||
//! The basic idea is quite straightforward. We iterate over the types
|
||||
//! defined and, for each use of a type parameter X, accumulate a
|
||||
//! constraint indicating that the variance of X must be valid for the
|
||||
//! variance of that use site. We then iteratively refine the variance of
|
||||
//! X until all constraints are met. There is *always* a sol'n, because at
|
||||
//! the limit we can declare all type parameters to be invariant and all
|
||||
//! constraints will be satisfied.
|
||||
//!
|
||||
//! The first is for object types. Just as with structs and enums, we can
|
||||
//! decide the subtyping relationship between two object types `&Trait<A>`
|
||||
//! and `&Trait<B>` based on the relationship of `A` and `B`. Note that
|
||||
//! for object types we ignore the `Self` type parameter -- it is unknown,
|
||||
//! and the nature of dynamic dispatch ensures that we will always call a
|
||||
//! As a simple example, consider:
|
||||
//!
|
||||
//! enum Option<A> { Some(A), None }
|
||||
//! enum OptionalFn<B> { Some(|B|), None }
|
||||
//! enum OptionalMap<C> { Some(|C| -> C), None }
|
||||
//!
|
||||
//! Here, we will generate the constraints:
|
||||
//!
|
||||
//! 1. V(A) <= +
|
||||
//! 2. V(B) <= -
|
||||
//! 3. V(C) <= +
|
||||
//! 4. V(C) <= -
|
||||
//!
|
||||
//! These indicate that (1) the variance of A must be at most covariant;
|
||||
//! (2) the variance of B must be at most contravariant; and (3, 4) the
|
||||
//! variance of C must be at most covariant *and* contravariant. All of these
|
||||
//! results are based on a variance lattice defined as follows:
|
||||
//!
|
||||
//! * Top (bivariant)
|
||||
//! - +
|
||||
//! o Bottom (invariant)
|
||||
//!
|
||||
//! Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
|
||||
//! optimal solution. Note that there is always a naive solution which
|
||||
//! just declares all variables to be invariant.
|
||||
//!
|
||||
//! You may be wondering why fixed-point iteration is required. The reason
|
||||
//! is that the variance of a use site may itself be a function of the
|
||||
//! variance of other type parameters. In full generality, our constraints
|
||||
//! take the form:
|
||||
//!
|
||||
//! V(X) <= Term
|
||||
//! Term := + | - | * | o | V(X) | Term x Term
|
||||
//!
|
||||
//! Here the notation V(X) indicates the variance of a type/region
|
||||
//! parameter `X` with respect to its defining class. `Term x Term`
|
||||
//! represents the "variance transform" as defined in the paper:
|
||||
//!
|
||||
//! If the variance of a type variable `X` in type expression `E` is `V2`
|
||||
//! and the definition-site variance of the [corresponding] type parameter
|
||||
//! of a class `C` is `V1`, then the variance of `X` in the type expression
|
||||
//! `C<E>` is `V3 = V1.xform(V2)`.
|
||||
//!
|
||||
//! ### Constraints
|
||||
//!
|
||||
//! If I have a struct or enum with where clauses:
|
||||
//!
|
||||
//! struct Foo<T:Bar> { ... }
|
||||
//!
|
||||
//! you might wonder whether the variance of `T` with respect to `Bar`
|
||||
//! affects the variance `T` with respect to `Foo`. I claim no. The
|
||||
//! reason: assume that `T` is invariant w/r/t `Bar` but covariant w/r/t
|
||||
//! `Foo`. And then we have a `Foo<X>` that is upcast to `Foo<Y>`, where
|
||||
//! `X <: Y`. However, while `X : Bar`, `Y : Bar` does not hold. In that
|
||||
//! case, the upcast will be illegal, but not because of a variance
|
||||
//! failure, but rather because the target type `Foo<Y>` is itself just
|
||||
//! not well-formed. Basically we get to assume well-formedness of all
|
||||
//! types involved before considering variance.
|
||||
//!
|
||||
//! ### Addendum: Variance on traits
|
||||
//!
|
||||
//! As mentioned above, we used to permit variance on traits. This was
|
||||
//! computed based on the appearance of trait type parameters in
|
||||
//! method signatures and was used to represent the compatibility of
|
||||
//! vtables in trait objects (and also "virtual" vtables or dictionary
|
||||
//! in trait bounds). One complication was that variance for
|
||||
//! associated types is less obvious, since they can be projected out
|
||||
//! and put to myriad uses, so it's not clear when it is safe to allow
|
||||
//! `X<A>::Bar` to vary (or indeed just what that means). Moreover (as
|
||||
//! covered below) all inputs on any trait with an associated type had
|
||||
//! to be invariant, limiting the applicability. Finally, the
|
||||
//! annotations (`MarkerTrait`, `PhantomFn`) needed to ensure that all
|
||||
//! trait type parameters had a variance were confusing and annoying
|
||||
//! for little benefit.
|
||||
//!
|
||||
//! Just for historical reference,I am going to preserve some text indicating
|
||||
//! how one could interpret variance and trait matching.
|
||||
//!
|
||||
//! #### Variance and object types
|
||||
//!
|
||||
//! Just as with structs and enums, we can decide the subtyping
|
||||
//! relationship between two object types `&Trait<A>` and `&Trait<B>`
|
||||
//! based on the relationship of `A` and `B`. Note that for object
|
||||
//! types we ignore the `Self` type parameter -- it is unknown, and
|
||||
//! the nature of dynamic dispatch ensures that we will always call a
|
||||
//! function that is expected the appropriate `Self` type. However, we
|
||||
//! must be careful with the other type parameters, or else we could end
|
||||
//! up calling a function that is expecting one type but provided another.
|
||||
//! must be careful with the other type parameters, or else we could
|
||||
//! end up calling a function that is expecting one type but provided
|
||||
//! another.
|
||||
//!
|
||||
//! To see what I mean, consider a trait like so:
|
||||
//!
|
||||
@ -135,104 +222,24 @@
|
||||
//!
|
||||
//! These conditions are satisfied and so we are happy.
|
||||
//!
|
||||
//! ### The algorithm
|
||||
//! #### Variance and associated types
|
||||
//!
|
||||
//! The basic idea is quite straightforward. We iterate over the types
|
||||
//! defined and, for each use of a type parameter X, accumulate a
|
||||
//! constraint indicating that the variance of X must be valid for the
|
||||
//! variance of that use site. We then iteratively refine the variance of
|
||||
//! X until all constraints are met. There is *always* a sol'n, because at
|
||||
//! the limit we can declare all type parameters to be invariant and all
|
||||
//! constraints will be satisfied.
|
||||
//!
|
||||
//! As a simple example, consider:
|
||||
//!
|
||||
//! enum Option<A> { Some(A), None }
|
||||
//! enum OptionalFn<B> { Some(|B|), None }
|
||||
//! enum OptionalMap<C> { Some(|C| -> C), None }
|
||||
//!
|
||||
//! Here, we will generate the constraints:
|
||||
//!
|
||||
//! 1. V(A) <= +
|
||||
//! 2. V(B) <= -
|
||||
//! 3. V(C) <= +
|
||||
//! 4. V(C) <= -
|
||||
//!
|
||||
//! These indicate that (1) the variance of A must be at most covariant;
|
||||
//! (2) the variance of B must be at most contravariant; and (3, 4) the
|
||||
//! variance of C must be at most covariant *and* contravariant. All of these
|
||||
//! results are based on a variance lattice defined as follows:
|
||||
//!
|
||||
//! * Top (bivariant)
|
||||
//! - +
|
||||
//! o Bottom (invariant)
|
||||
//!
|
||||
//! Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
|
||||
//! optimal solution. Note that there is always a naive solution which
|
||||
//! just declares all variables to be invariant.
|
||||
//!
|
||||
//! You may be wondering why fixed-point iteration is required. The reason
|
||||
//! is that the variance of a use site may itself be a function of the
|
||||
//! variance of other type parameters. In full generality, our constraints
|
||||
//! take the form:
|
||||
//!
|
||||
//! V(X) <= Term
|
||||
//! Term := + | - | * | o | V(X) | Term x Term
|
||||
//!
|
||||
//! Here the notation V(X) indicates the variance of a type/region
|
||||
//! parameter `X` with respect to its defining class. `Term x Term`
|
||||
//! represents the "variance transform" as defined in the paper:
|
||||
//!
|
||||
//! If the variance of a type variable `X` in type expression `E` is `V2`
|
||||
//! and the definition-site variance of the [corresponding] type parameter
|
||||
//! of a class `C` is `V1`, then the variance of `X` in the type expression
|
||||
//! `C<E>` is `V3 = V1.xform(V2)`.
|
||||
//!
|
||||
//! ### Constraints
|
||||
//!
|
||||
//! If I have a struct or enum with where clauses:
|
||||
//!
|
||||
//! struct Foo<T:Bar> { ... }
|
||||
//!
|
||||
//! you might wonder whether the variance of `T` with respect to `Bar`
|
||||
//! affects the variance `T` with respect to `Foo`. I claim no. The
|
||||
//! reason: assume that `T` is invariant w/r/t `Bar` but covariant w/r/t
|
||||
//! `Foo`. And then we have a `Foo<X>` that is upcast to `Foo<Y>`, where
|
||||
//! `X <: Y`. However, while `X : Bar`, `Y : Bar` does not hold. In that
|
||||
//! case, the upcast will be illegal, but not because of a variance
|
||||
//! failure, but rather because the target type `Foo<Y>` is itself just
|
||||
//! not well-formed. Basically we get to assume well-formedness of all
|
||||
//! types involved before considering variance.
|
||||
//!
|
||||
//! ### Associated types
|
||||
//!
|
||||
//! Any trait with an associated type is invariant with respect to all
|
||||
//! of its inputs. To see why this makes sense, consider what
|
||||
//! subtyping for a trait reference means:
|
||||
//! Traits with associated types -- or at minimum projection
|
||||
//! expressions -- must be invariant with respect to all of their
|
||||
//! inputs. To see why this makes sense, consider what subtyping for a
|
||||
//! trait reference means:
|
||||
//!
|
||||
//! <T as Trait> <: <U as Trait>
|
||||
//!
|
||||
//! means that if I know that `T as Trait`,
|
||||
//! I also know that `U as
|
||||
//! Trait`. Moreover, if you think of it as
|
||||
//! dictionary passing style, it means that
|
||||
//! a dictionary for `<T as Trait>` is safe
|
||||
//! to use where a dictionary for `<U as
|
||||
//! Trait>` is expected.
|
||||
//! means that if I know that `T as Trait`, I also know that `U as
|
||||
//! Trait`. Moreover, if you think of it as dictionary passing style,
|
||||
//! it means that a dictionary for `<T as Trait>` is safe to use where
|
||||
//! a dictionary for `<U as Trait>` is expected.
|
||||
//!
|
||||
//! The problem is that when you can
|
||||
//! project types out from `<T as Trait>`,
|
||||
//! the relationship to types projected out
|
||||
//! of `<U as Trait>` is completely unknown
|
||||
//! unless `T==U` (see #21726 for more
|
||||
//! details). Making `Trait` invariant
|
||||
//! ensures that this is true.
|
||||
//!
|
||||
//! *Historical note: we used to preserve this invariant another way,
|
||||
//! by tweaking the subtyping rules and requiring that when a type `T`
|
||||
//! appeared as part of a projection, that was considered an invariant
|
||||
//! location, but this version does away with the need for those
|
||||
//! somewhat "special-case-feeling" rules.*
|
||||
//! The problem is that when you can project types out from `<T as
|
||||
//! Trait>`, the relationship to types projected out of `<U as Trait>`
|
||||
//! is completely unknown unless `T==U` (see #21726 for more
|
||||
//! details). Making `Trait` invariant ensures that this is true.
|
||||
//!
|
||||
//! Another related reason is that if we didn't make traits with
|
||||
//! associated types invariant, then projection is no longer a
|
||||
@ -383,7 +390,6 @@ fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>,
|
||||
|
||||
fn lang_items(tcx: &ty::ctxt) -> Vec<(ast::NodeId,Vec<ty::Variance>)> {
|
||||
let all = vec![
|
||||
(tcx.lang_items.phantom_fn(), vec![ty::Contravariant, ty::Covariant]),
|
||||
(tcx.lang_items.phantom_data(), vec![ty::Covariant]),
|
||||
(tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]),
|
||||
|
||||
@ -520,6 +526,9 @@ impl<'a, 'tcx, 'v> Visitor<'v> for TermsContext<'a, 'tcx> {
|
||||
self.add_inferreds_for_item(item.id, false, generics);
|
||||
}
|
||||
ast::ItemTrait(_, ref generics, _, _) => {
|
||||
// Note: all inputs for traits are ultimately
|
||||
// constrained to be invariant. See `visit_item` in
|
||||
// the impl for `ConstraintContext` below.
|
||||
self.add_inferreds_for_item(item.id, true, generics);
|
||||
visit::walk_item(self, item);
|
||||
}
|
||||
|
@ -10,8 +10,8 @@
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
// This test was previously testing variance on traits.
|
||||
// But now that it is removed, both cases error.
|
||||
// Test that even when `T` is only used in contravariant position, it
|
||||
// is treated as invariant.
|
||||
|
||||
trait Get<T> : 'static {
|
||||
fn get(&self, t: T);
|
||||
|
@ -10,6 +10,9 @@
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
// Test that even when `T` is only used in contravariant position, it
|
||||
// is treated as invariant.
|
||||
|
||||
trait Get<T> {
|
||||
fn get(&self, t: T);
|
||||
}
|
||||
|
@ -10,6 +10,9 @@
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
// Test that even when `Self` is only used in contravariant position, it
|
||||
// is treated as invariant.
|
||||
|
||||
trait Get {
|
||||
fn get(&self);
|
||||
}
|
||||
|
@ -10,6 +10,9 @@
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
// Test that even when `T` is only used in covariant position, it
|
||||
// is treated as invariant.
|
||||
|
||||
trait Get<T> : 'static {
|
||||
fn get(&self) -> T;
|
||||
}
|
||||
|
@ -10,6 +10,9 @@
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
// Test that even when `T` is only used in covariant position, it
|
||||
// is treated as invariant.
|
||||
|
||||
trait Get<T> {
|
||||
fn get(&self) -> T;
|
||||
}
|
||||
|
@ -10,6 +10,9 @@
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
// Test that even when `Self` is only used in covariant position, it
|
||||
// is treated as invariant.
|
||||
|
||||
trait Get {
|
||||
fn get() -> Self;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user