mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-21 22:34:05 +00:00
Auto merge of #132371 - workingjubilee:rollup-aqd86tm, r=workingjubilee
Rollup of 5 pull requests Successful merges: - #129383 (Remap impl-trait lifetimes on HIR instead of AST lowering) - #132210 (rustdoc: make doctest span tweak a 2024 edition change) - #132246 (Rename `rustc_abi::Abi` to `BackendRepr`) - #132267 (force-recompile library changes on download-rustc="if-unchanged") - #132344 (Merge `HostPolarity` and `BoundConstness`) Failed merges: - #132347 (Remove `ValueAnalysis` and `ValueAnalysisWrapper`.) r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
4add5e4211
@ -6,9 +6,9 @@ mod abi {
|
||||
#[cfg(feature = "nightly")]
|
||||
use rustc_macros::HashStable_Generic;
|
||||
|
||||
#[cfg(feature = "nightly")]
|
||||
use crate::{Abi, FieldsShape, TyAbiInterface, TyAndLayout};
|
||||
use crate::{Align, HasDataLayout, Size};
|
||||
#[cfg(feature = "nightly")]
|
||||
use crate::{BackendRepr, FieldsShape, TyAbiInterface, TyAndLayout};
|
||||
|
||||
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
@ -128,11 +128,11 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||
where
|
||||
Ty: TyAbiInterface<'a, C> + Copy,
|
||||
{
|
||||
match self.abi {
|
||||
Abi::Uninhabited => Err(Heterogeneous),
|
||||
match self.backend_repr {
|
||||
BackendRepr::Uninhabited => Err(Heterogeneous),
|
||||
|
||||
// The primitive for this algorithm.
|
||||
Abi::Scalar(scalar) => {
|
||||
BackendRepr::Scalar(scalar) => {
|
||||
let kind = match scalar.primitive() {
|
||||
abi::Int(..) | abi::Pointer(_) => RegKind::Integer,
|
||||
abi::Float(_) => RegKind::Float,
|
||||
@ -140,7 +140,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||
Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
|
||||
}
|
||||
|
||||
Abi::Vector { .. } => {
|
||||
BackendRepr::Vector { .. } => {
|
||||
assert!(!self.is_zst());
|
||||
Ok(HomogeneousAggregate::Homogeneous(Reg {
|
||||
kind: RegKind::Vector,
|
||||
@ -148,7 +148,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||
}))
|
||||
}
|
||||
|
||||
Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => {
|
||||
BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => {
|
||||
// Helper for computing `homogeneous_aggregate`, allowing a custom
|
||||
// starting offset (used below for handling variants).
|
||||
let from_fields_at =
|
||||
@ -246,7 +246,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
Abi::Aggregate { sized: false } => Err(Heterogeneous),
|
||||
BackendRepr::Memory { sized: false } => Err(Heterogeneous),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ use rustc_index::Idx;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::{
|
||||
Abi, AbiAndPrefAlign, Align, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
|
||||
AbiAndPrefAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
|
||||
LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
|
||||
Variants, WrappingRange,
|
||||
};
|
||||
@ -125,7 +125,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
offsets: [Size::ZERO, b_offset].into(),
|
||||
memory_index: [0, 1].into(),
|
||||
},
|
||||
abi: Abi::ScalarPair(a, b),
|
||||
backend_repr: BackendRepr::ScalarPair(a, b),
|
||||
largest_niche,
|
||||
align,
|
||||
size,
|
||||
@ -216,7 +216,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
LayoutData {
|
||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||
fields: FieldsShape::Primitive,
|
||||
abi: Abi::Uninhabited,
|
||||
backend_repr: BackendRepr::Uninhabited,
|
||||
largest_niche: None,
|
||||
align: dl.i8_align,
|
||||
size: Size::ZERO,
|
||||
@ -331,7 +331,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
|
||||
if let Ok(common) = common_non_zst_abi_and_align {
|
||||
// Discard valid range information and allow undef
|
||||
let field_abi = field.abi.to_union();
|
||||
let field_abi = field.backend_repr.to_union();
|
||||
|
||||
if let Some((common_abi, common_align)) = common {
|
||||
if common_abi != field_abi {
|
||||
@ -340,7 +340,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
} else {
|
||||
// Fields with the same non-Aggregate ABI should also
|
||||
// have the same alignment
|
||||
if !matches!(common_abi, Abi::Aggregate { .. }) {
|
||||
if !matches!(common_abi, BackendRepr::Memory { .. }) {
|
||||
assert_eq!(
|
||||
common_align, field.align.abi,
|
||||
"non-Aggregate field with matching ABI but differing alignment"
|
||||
@ -369,11 +369,11 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
// If all non-ZST fields have the same ABI, we may forward that ABI
|
||||
// for the union as a whole, unless otherwise inhibited.
|
||||
let abi = match common_non_zst_abi_and_align {
|
||||
Err(AbiMismatch) | Ok(None) => Abi::Aggregate { sized: true },
|
||||
Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
|
||||
Ok(Some((abi, _))) => {
|
||||
if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) {
|
||||
// Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
|
||||
Abi::Aggregate { sized: true }
|
||||
BackendRepr::Memory { sized: true }
|
||||
} else {
|
||||
abi
|
||||
}
|
||||
@ -387,7 +387,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
Ok(LayoutData {
|
||||
variants: Variants::Single { index: only_variant_idx },
|
||||
fields: FieldsShape::Union(union_field_count),
|
||||
abi,
|
||||
backend_repr: abi,
|
||||
largest_niche: None,
|
||||
align,
|
||||
size: size.align_to(align.abi),
|
||||
@ -434,23 +434,23 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
// Already doesn't have any niches
|
||||
Scalar::Union { .. } => {}
|
||||
};
|
||||
match &mut st.abi {
|
||||
Abi::Uninhabited => {}
|
||||
Abi::Scalar(scalar) => hide_niches(scalar),
|
||||
Abi::ScalarPair(a, b) => {
|
||||
match &mut st.backend_repr {
|
||||
BackendRepr::Uninhabited => {}
|
||||
BackendRepr::Scalar(scalar) => hide_niches(scalar),
|
||||
BackendRepr::ScalarPair(a, b) => {
|
||||
hide_niches(a);
|
||||
hide_niches(b);
|
||||
}
|
||||
Abi::Vector { element, count: _ } => hide_niches(element),
|
||||
Abi::Aggregate { sized: _ } => {}
|
||||
BackendRepr::Vector { element, count: _ } => hide_niches(element),
|
||||
BackendRepr::Memory { sized: _ } => {}
|
||||
}
|
||||
st.largest_niche = None;
|
||||
return Ok(st);
|
||||
}
|
||||
|
||||
let (start, end) = scalar_valid_range;
|
||||
match st.abi {
|
||||
Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
|
||||
match st.backend_repr {
|
||||
BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => {
|
||||
// Enlarging validity ranges would result in missed
|
||||
// optimizations, *not* wrongly assuming the inner
|
||||
// value is valid. e.g. unions already enlarge validity ranges,
|
||||
@ -607,8 +607,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
}
|
||||
|
||||
// It can't be a Scalar or ScalarPair because the offset isn't 0.
|
||||
if !layout.abi.is_uninhabited() {
|
||||
layout.abi = Abi::Aggregate { sized: true };
|
||||
if !layout.is_uninhabited() {
|
||||
layout.backend_repr = BackendRepr::Memory { sized: true };
|
||||
}
|
||||
layout.size += this_offset;
|
||||
|
||||
@ -627,26 +627,26 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
let same_size = size == variant_layouts[largest_variant_index].size;
|
||||
let same_align = align == variant_layouts[largest_variant_index].align;
|
||||
|
||||
let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
|
||||
Abi::Uninhabited
|
||||
let abi = if variant_layouts.iter().all(|v| v.is_uninhabited()) {
|
||||
BackendRepr::Uninhabited
|
||||
} else if same_size && same_align && others_zst {
|
||||
match variant_layouts[largest_variant_index].abi {
|
||||
match variant_layouts[largest_variant_index].backend_repr {
|
||||
// When the total alignment and size match, we can use the
|
||||
// same ABI as the scalar variant with the reserved niche.
|
||||
Abi::Scalar(_) => Abi::Scalar(niche_scalar),
|
||||
Abi::ScalarPair(first, second) => {
|
||||
BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),
|
||||
BackendRepr::ScalarPair(first, second) => {
|
||||
// Only the niche is guaranteed to be initialised,
|
||||
// so use union layouts for the other primitive.
|
||||
if niche_offset == Size::ZERO {
|
||||
Abi::ScalarPair(niche_scalar, second.to_union())
|
||||
BackendRepr::ScalarPair(niche_scalar, second.to_union())
|
||||
} else {
|
||||
Abi::ScalarPair(first.to_union(), niche_scalar)
|
||||
BackendRepr::ScalarPair(first.to_union(), niche_scalar)
|
||||
}
|
||||
}
|
||||
_ => Abi::Aggregate { sized: true },
|
||||
_ => BackendRepr::Memory { sized: true },
|
||||
}
|
||||
} else {
|
||||
Abi::Aggregate { sized: true }
|
||||
BackendRepr::Memory { sized: true }
|
||||
};
|
||||
|
||||
let layout = LayoutData {
|
||||
@ -664,7 +664,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
offsets: [niche_offset].into(),
|
||||
memory_index: [0].into(),
|
||||
},
|
||||
abi,
|
||||
backend_repr: abi,
|
||||
largest_niche,
|
||||
size,
|
||||
align,
|
||||
@ -833,14 +833,14 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
end: (max as u128 & tag_mask),
|
||||
},
|
||||
};
|
||||
let mut abi = Abi::Aggregate { sized: true };
|
||||
let mut abi = BackendRepr::Memory { sized: true };
|
||||
|
||||
if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
|
||||
abi = Abi::Uninhabited;
|
||||
if layout_variants.iter().all(|v| v.is_uninhabited()) {
|
||||
abi = BackendRepr::Uninhabited;
|
||||
} else if tag.size(dl) == size {
|
||||
// Make sure we only use scalar layout when the enum is entirely its
|
||||
// own tag (i.e. it has no padding nor any non-ZST variant fields).
|
||||
abi = Abi::Scalar(tag);
|
||||
abi = BackendRepr::Scalar(tag);
|
||||
} else {
|
||||
// Try to use a ScalarPair for all tagged enums.
|
||||
// That's possible only if we can find a common primitive type for all variants.
|
||||
@ -864,8 +864,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
break;
|
||||
}
|
||||
};
|
||||
let prim = match field.abi {
|
||||
Abi::Scalar(scalar) => {
|
||||
let prim = match field.backend_repr {
|
||||
BackendRepr::Scalar(scalar) => {
|
||||
common_prim_initialized_in_all_variants &=
|
||||
matches!(scalar, Scalar::Initialized { .. });
|
||||
scalar.primitive()
|
||||
@ -934,7 +934,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
{
|
||||
// We can use `ScalarPair` only when it matches our
|
||||
// already computed layout (including `#[repr(C)]`).
|
||||
abi = pair.abi;
|
||||
abi = pair.backend_repr;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -942,12 +942,14 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
// If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
|
||||
// variants to ensure they are consistent. This is because a downcast is
|
||||
// semantically a NOP, and thus should not affect layout.
|
||||
if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
||||
if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
|
||||
for variant in &mut layout_variants {
|
||||
// We only do this for variants with fields; the others are not accessed anyway.
|
||||
// Also do not overwrite any already existing "clever" ABIs.
|
||||
if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
|
||||
variant.abi = abi;
|
||||
if variant.fields.count() > 0
|
||||
&& matches!(variant.backend_repr, BackendRepr::Memory { .. })
|
||||
{
|
||||
variant.backend_repr = abi;
|
||||
// Also need to bump up the size and alignment, so that the entire value fits
|
||||
// in here.
|
||||
variant.size = cmp::max(variant.size, size);
|
||||
@ -970,7 +972,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
memory_index: [0].into(),
|
||||
},
|
||||
largest_niche,
|
||||
abi,
|
||||
backend_repr: abi,
|
||||
align,
|
||||
size,
|
||||
max_repr_align,
|
||||
@ -1252,7 +1254,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
}
|
||||
let mut layout_of_single_non_zst_field = None;
|
||||
let sized = unsized_field.is_none();
|
||||
let mut abi = Abi::Aggregate { sized };
|
||||
let mut abi = BackendRepr::Memory { sized };
|
||||
|
||||
let optimize_abi = !repr.inhibit_newtype_abi_optimization();
|
||||
|
||||
@ -1270,16 +1272,16 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
// Field fills the struct and it has a scalar or scalar pair ABI.
|
||||
if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
|
||||
{
|
||||
match field.abi {
|
||||
match field.backend_repr {
|
||||
// For plain scalars, or vectors of them, we can't unpack
|
||||
// newtypes for `#[repr(C)]`, as that affects C ABIs.
|
||||
Abi::Scalar(_) | Abi::Vector { .. } if optimize_abi => {
|
||||
abi = field.abi;
|
||||
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } if optimize_abi => {
|
||||
abi = field.backend_repr;
|
||||
}
|
||||
// But scalar pairs are Rust-specific and get
|
||||
// treated as aggregates by C ABIs anyway.
|
||||
Abi::ScalarPair(..) => {
|
||||
abi = field.abi;
|
||||
BackendRepr::ScalarPair(..) => {
|
||||
abi = field.backend_repr;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@ -1288,8 +1290,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
|
||||
// Two non-ZST fields, and they're both scalars.
|
||||
(Some((i, a)), Some((j, b)), None) => {
|
||||
match (a.abi, b.abi) {
|
||||
(Abi::Scalar(a), Abi::Scalar(b)) => {
|
||||
match (a.backend_repr, b.backend_repr) {
|
||||
(BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {
|
||||
// Order by the memory placement, not source order.
|
||||
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
|
||||
((i, a), (j, b))
|
||||
@ -1315,7 +1317,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
{
|
||||
// We can use `ScalarPair` only when it matches our
|
||||
// already computed layout (including `#[repr(C)]`).
|
||||
abi = pair.abi;
|
||||
abi = pair.backend_repr;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
@ -1325,8 +1327,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
if fields.iter().any(|f| f.abi.is_uninhabited()) {
|
||||
abi = Abi::Uninhabited;
|
||||
if fields.iter().any(|f| f.is_uninhabited()) {
|
||||
abi = BackendRepr::Uninhabited;
|
||||
}
|
||||
|
||||
let unadjusted_abi_align = if repr.transparent() {
|
||||
@ -1344,7 +1346,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
Ok(LayoutData {
|
||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||
fields: FieldsShape::Arbitrary { offsets, memory_index },
|
||||
abi,
|
||||
backend_repr: abi,
|
||||
largest_niche,
|
||||
align,
|
||||
size,
|
||||
|
@ -83,8 +83,8 @@ impl<'a> Layout<'a> {
|
||||
&self.0.0.variants
|
||||
}
|
||||
|
||||
pub fn abi(self) -> Abi {
|
||||
self.0.0.abi
|
||||
pub fn backend_repr(self) -> BackendRepr {
|
||||
self.0.0.backend_repr
|
||||
}
|
||||
|
||||
pub fn largest_niche(self) -> Option<Niche> {
|
||||
@ -114,7 +114,7 @@ impl<'a> Layout<'a> {
|
||||
pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool {
|
||||
self.size() == data_layout.pointer_size
|
||||
&& self.align().abi == data_layout.pointer_align.abi
|
||||
&& matches!(self.abi(), Abi::Scalar(Scalar::Initialized { .. }))
|
||||
&& matches!(self.backend_repr(), BackendRepr::Scalar(Scalar::Initialized { .. }))
|
||||
}
|
||||
}
|
||||
|
||||
@ -196,9 +196,9 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||
Ty: TyAbiInterface<'a, C>,
|
||||
C: HasDataLayout,
|
||||
{
|
||||
match self.abi {
|
||||
Abi::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)),
|
||||
Abi::Aggregate { .. } => {
|
||||
match self.backend_repr {
|
||||
BackendRepr::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)),
|
||||
BackendRepr::Memory { .. } => {
|
||||
if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
|
||||
self.field(cx, 0).is_single_fp_element(cx)
|
||||
} else {
|
||||
|
@ -1344,11 +1344,19 @@ impl AddressSpace {
|
||||
pub const DATA: Self = AddressSpace(0);
|
||||
}
|
||||
|
||||
/// Describes how values of the type are passed by target ABIs,
|
||||
/// in terms of categories of C types there are ABI rules for.
|
||||
/// The way we represent values to the backend
|
||||
///
|
||||
/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
|
||||
/// In reality, this implies little about that, but is mostly used to describe the syntactic form
|
||||
/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
|
||||
/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
|
||||
/// how the value will be lowered to the calling convention, in itself.
|
||||
///
|
||||
/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
|
||||
/// and larger values will usually prefer to be represented as memory.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
|
||||
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
|
||||
pub enum Abi {
|
||||
pub enum BackendRepr {
|
||||
Uninhabited,
|
||||
Scalar(Scalar),
|
||||
ScalarPair(Scalar, Scalar),
|
||||
@ -1356,19 +1364,23 @@ pub enum Abi {
|
||||
element: Scalar,
|
||||
count: u64,
|
||||
},
|
||||
Aggregate {
|
||||
// FIXME: I sometimes use memory, sometimes use an IR aggregate!
|
||||
Memory {
|
||||
/// If true, the size is exact, otherwise it's only a lower bound.
|
||||
sized: bool,
|
||||
},
|
||||
}
|
||||
|
||||
impl Abi {
|
||||
impl BackendRepr {
|
||||
/// Returns `true` if the layout corresponds to an unsized type.
|
||||
#[inline]
|
||||
pub fn is_unsized(&self) -> bool {
|
||||
match *self {
|
||||
Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
|
||||
Abi::Aggregate { sized } => !sized,
|
||||
BackendRepr::Uninhabited
|
||||
| BackendRepr::Scalar(_)
|
||||
| BackendRepr::ScalarPair(..)
|
||||
| BackendRepr::Vector { .. } => false,
|
||||
BackendRepr::Memory { sized } => !sized,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1381,7 +1393,7 @@ impl Abi {
|
||||
#[inline]
|
||||
pub fn is_signed(&self) -> bool {
|
||||
match self {
|
||||
Abi::Scalar(scal) => match scal.primitive() {
|
||||
BackendRepr::Scalar(scal) => match scal.primitive() {
|
||||
Primitive::Int(_, signed) => signed,
|
||||
_ => false,
|
||||
},
|
||||
@ -1392,61 +1404,67 @@ impl Abi {
|
||||
/// Returns `true` if this is an uninhabited type
|
||||
#[inline]
|
||||
pub fn is_uninhabited(&self) -> bool {
|
||||
matches!(*self, Abi::Uninhabited)
|
||||
matches!(*self, BackendRepr::Uninhabited)
|
||||
}
|
||||
|
||||
/// Returns `true` if this is a scalar type
|
||||
#[inline]
|
||||
pub fn is_scalar(&self) -> bool {
|
||||
matches!(*self, Abi::Scalar(_))
|
||||
matches!(*self, BackendRepr::Scalar(_))
|
||||
}
|
||||
|
||||
/// Returns `true` if this is a bool
|
||||
#[inline]
|
||||
pub fn is_bool(&self) -> bool {
|
||||
matches!(*self, Abi::Scalar(s) if s.is_bool())
|
||||
matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
|
||||
}
|
||||
|
||||
/// Returns the fixed alignment of this ABI, if any is mandated.
|
||||
pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> {
|
||||
Some(match *self {
|
||||
Abi::Scalar(s) => s.align(cx),
|
||||
Abi::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
|
||||
Abi::Vector { element, count } => {
|
||||
BackendRepr::Scalar(s) => s.align(cx),
|
||||
BackendRepr::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
|
||||
BackendRepr::Vector { element, count } => {
|
||||
cx.data_layout().vector_align(element.size(cx) * count)
|
||||
}
|
||||
Abi::Uninhabited | Abi::Aggregate { .. } => return None,
|
||||
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the fixed size of this ABI, if any is mandated.
|
||||
pub fn inherent_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
|
||||
Some(match *self {
|
||||
Abi::Scalar(s) => {
|
||||
BackendRepr::Scalar(s) => {
|
||||
// No padding in scalars.
|
||||
s.size(cx)
|
||||
}
|
||||
Abi::ScalarPair(s1, s2) => {
|
||||
BackendRepr::ScalarPair(s1, s2) => {
|
||||
// May have some padding between the pair.
|
||||
let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
|
||||
(field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi)
|
||||
}
|
||||
Abi::Vector { element, count } => {
|
||||
BackendRepr::Vector { element, count } => {
|
||||
// No padding in vectors, except possibly for trailing padding
|
||||
// to make the size a multiple of align (e.g. for vectors of size 3).
|
||||
(element.size(cx) * count).align_to(self.inherent_align(cx)?.abi)
|
||||
}
|
||||
Abi::Uninhabited | Abi::Aggregate { .. } => return None,
|
||||
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Discard validity range information and allow undef.
|
||||
pub fn to_union(&self) -> Self {
|
||||
match *self {
|
||||
Abi::Scalar(s) => Abi::Scalar(s.to_union()),
|
||||
Abi::ScalarPair(s1, s2) => Abi::ScalarPair(s1.to_union(), s2.to_union()),
|
||||
Abi::Vector { element, count } => Abi::Vector { element: element.to_union(), count },
|
||||
Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
|
||||
BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
|
||||
BackendRepr::ScalarPair(s1, s2) => {
|
||||
BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
|
||||
}
|
||||
BackendRepr::Vector { element, count } => {
|
||||
BackendRepr::Vector { element: element.to_union(), count }
|
||||
}
|
||||
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
|
||||
BackendRepr::Memory { sized: true }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1454,12 +1472,12 @@ impl Abi {
|
||||
match (self, other) {
|
||||
// Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
|
||||
// We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
|
||||
(Abi::Scalar(l), Abi::Scalar(r)) => l.primitive() == r.primitive(),
|
||||
(BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
|
||||
(
|
||||
Abi::Vector { element: element_l, count: count_l },
|
||||
Abi::Vector { element: element_r, count: count_r },
|
||||
BackendRepr::Vector { element: element_l, count: count_l },
|
||||
BackendRepr::Vector { element: element_r, count: count_r },
|
||||
) => element_l.primitive() == element_r.primitive() && count_l == count_r,
|
||||
(Abi::ScalarPair(l1, l2), Abi::ScalarPair(r1, r2)) => {
|
||||
(BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
|
||||
l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
|
||||
}
|
||||
// Everything else must be strictly identical.
|
||||
@ -1616,14 +1634,14 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
|
||||
/// must be taken into account.
|
||||
pub variants: Variants<FieldIdx, VariantIdx>,
|
||||
|
||||
/// The `abi` defines how this data is passed between functions, and it defines
|
||||
/// value restrictions via `valid_range`.
|
||||
/// The `backend_repr` defines how this data will be represented to the codegen backend,
|
||||
/// and encodes value restrictions via `valid_range`.
|
||||
///
|
||||
/// Note that this is entirely orthogonal to the recursive structure defined by
|
||||
/// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
|
||||
/// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
|
||||
/// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
|
||||
/// have to be taken into account to find all fields of this layout.
|
||||
pub abi: Abi,
|
||||
pub backend_repr: BackendRepr,
|
||||
|
||||
/// The leaf scalar with the largest number of invalid values
|
||||
/// (i.e. outside of its `valid_range`), if it exists.
|
||||
@ -1646,15 +1664,15 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
|
||||
impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||
/// Returns `true` if this is an aggregate type (including a ScalarPair!)
|
||||
pub fn is_aggregate(&self) -> bool {
|
||||
match self.abi {
|
||||
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false,
|
||||
Abi::ScalarPair(..) | Abi::Aggregate { .. } => true,
|
||||
match self.backend_repr {
|
||||
BackendRepr::Uninhabited | BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false,
|
||||
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if this is an uninhabited type
|
||||
pub fn is_uninhabited(&self) -> bool {
|
||||
self.abi.is_uninhabited()
|
||||
self.backend_repr.is_uninhabited()
|
||||
}
|
||||
|
||||
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
|
||||
@ -1664,7 +1682,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||
LayoutData {
|
||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||
fields: FieldsShape::Primitive,
|
||||
abi: Abi::Scalar(scalar),
|
||||
backend_repr: BackendRepr::Scalar(scalar),
|
||||
largest_niche,
|
||||
size,
|
||||
align,
|
||||
@ -1686,7 +1704,7 @@ where
|
||||
let LayoutData {
|
||||
size,
|
||||
align,
|
||||
abi,
|
||||
backend_repr,
|
||||
fields,
|
||||
largest_niche,
|
||||
variants,
|
||||
@ -1696,7 +1714,7 @@ where
|
||||
f.debug_struct("Layout")
|
||||
.field("size", size)
|
||||
.field("align", align)
|
||||
.field("abi", abi)
|
||||
.field("abi", backend_repr)
|
||||
.field("fields", fields)
|
||||
.field("largest_niche", largest_niche)
|
||||
.field("variants", variants)
|
||||
@ -1732,12 +1750,12 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||
/// Returns `true` if the layout corresponds to an unsized type.
|
||||
#[inline]
|
||||
pub fn is_unsized(&self) -> bool {
|
||||
self.abi.is_unsized()
|
||||
self.backend_repr.is_unsized()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_sized(&self) -> bool {
|
||||
self.abi.is_sized()
|
||||
self.backend_repr.is_sized()
|
||||
}
|
||||
|
||||
/// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
|
||||
@ -1750,10 +1768,12 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||
/// Note that this does *not* imply that the type is irrelevant for layout! It can still have
|
||||
/// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
|
||||
pub fn is_zst(&self) -> bool {
|
||||
match self.abi {
|
||||
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
|
||||
Abi::Uninhabited => self.size.bytes() == 0,
|
||||
Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
|
||||
match self.backend_repr {
|
||||
BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => {
|
||||
false
|
||||
}
|
||||
BackendRepr::Uninhabited => self.size.bytes() == 0,
|
||||
BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1768,8 +1788,8 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||
// 2nd point is quite hard to check though.
|
||||
self.size == other.size
|
||||
&& self.is_sized() == other.is_sized()
|
||||
&& self.abi.eq_up_to_validity(&other.abi)
|
||||
&& self.abi.is_bool() == other.abi.is_bool()
|
||||
&& self.backend_repr.eq_up_to_validity(&other.backend_repr)
|
||||
&& self.backend_repr.is_bool() == other.backend_repr.is_bool()
|
||||
&& self.align.abi == other.align.abi
|
||||
&& self.max_repr_align == other.max_repr_align
|
||||
&& self.unadjusted_abi_align == other.unadjusted_abi_align
|
||||
|
@ -45,16 +45,14 @@ use rustc_ast::ptr::P;
|
||||
use rustc_ast::{self as ast, *};
|
||||
use rustc_data_structures::captures::Captures;
|
||||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
use rustc_data_structures::fx::FxIndexSet;
|
||||
use rustc_data_structures::sorted_map::SortedMap;
|
||||
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
|
||||
use rustc_data_structures::sync::Lrc;
|
||||
use rustc_errors::{DiagArgFromDisplay, DiagCtxtHandle, StashKey};
|
||||
use rustc_hir::def::{DefKind, LifetimeRes, Namespace, PartialRes, PerNS, Res};
|
||||
use rustc_hir::def_id::{CRATE_DEF_ID, LOCAL_CRATE, LocalDefId, LocalDefIdMap};
|
||||
use rustc_hir::def_id::{CRATE_DEF_ID, LOCAL_CRATE, LocalDefId};
|
||||
use rustc_hir::{
|
||||
self as hir, ConstArg, GenericArg, HirId, ItemLocalMap, LangItem, MissingLifetimeKind,
|
||||
ParamName, TraitCandidate,
|
||||
self as hir, ConstArg, GenericArg, HirId, ItemLocalMap, LangItem, ParamName, TraitCandidate,
|
||||
};
|
||||
use rustc_index::{Idx, IndexSlice, IndexVec};
|
||||
use rustc_macros::extension;
|
||||
@ -83,7 +81,6 @@ mod expr;
|
||||
mod format;
|
||||
mod index;
|
||||
mod item;
|
||||
mod lifetime_collector;
|
||||
mod pat;
|
||||
mod path;
|
||||
|
||||
@ -149,12 +146,6 @@ struct LoweringContext<'a, 'hir> {
|
||||
allow_async_iterator: Lrc<[Symbol]>,
|
||||
allow_for_await: Lrc<[Symbol]>,
|
||||
allow_async_fn_traits: Lrc<[Symbol]>,
|
||||
|
||||
/// Mapping from generics `def_id`s to TAIT generics `def_id`s.
|
||||
/// For each captured lifetime (e.g., 'a), we create a new lifetime parameter that is a generic
|
||||
/// defined on the TAIT, so we have type Foo<'a1> = ... and we establish a mapping in this
|
||||
/// field from the original parameter 'a to the new parameter 'a1.
|
||||
generics_def_id_map: Vec<LocalDefIdMap<LocalDefId>>,
|
||||
}
|
||||
|
||||
impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
||||
@ -199,7 +190,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
||||
// FIXME(gen_blocks): how does `closure_track_caller`/`async_fn_track_caller`
|
||||
// interact with `gen`/`async gen` blocks
|
||||
allow_async_iterator: [sym::gen_future, sym::async_iterator].into(),
|
||||
generics_def_id_map: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -528,54 +518,14 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
||||
|
||||
/// Given the id of some node in the AST, finds the `LocalDefId` associated with it by the name
|
||||
/// resolver (if any).
|
||||
fn orig_opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId> {
|
||||
self.resolver.node_id_to_def_id.get(&node).copied()
|
||||
}
|
||||
|
||||
/// Given the id of some node in the AST, finds the `LocalDefId` associated with it by the name
|
||||
/// resolver (if any), after applying any remapping from `get_remapped_def_id`.
|
||||
///
|
||||
/// For example, in a function like `fn foo<'a>(x: &'a u32)`,
|
||||
/// invoking with the id from the `ast::Lifetime` node found inside
|
||||
/// the `&'a u32` type would return the `LocalDefId` of the
|
||||
/// `'a` parameter declared on `foo`.
|
||||
///
|
||||
/// This function also applies remapping from `get_remapped_def_id`.
|
||||
/// These are used when synthesizing opaque types from `-> impl Trait` return types and so forth.
|
||||
/// For example, in a function like `fn foo<'a>() -> impl Debug + 'a`,
|
||||
/// we would create an opaque type `type FooReturn<'a1> = impl Debug + 'a1`.
|
||||
/// When lowering the `Debug + 'a` bounds, we add a remapping to map `'a` to `'a1`.
|
||||
fn opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId> {
|
||||
self.orig_opt_local_def_id(node).map(|local_def_id| self.get_remapped_def_id(local_def_id))
|
||||
self.resolver.node_id_to_def_id.get(&node).copied()
|
||||
}
|
||||
|
||||
fn local_def_id(&self, node: NodeId) -> LocalDefId {
|
||||
self.opt_local_def_id(node).unwrap_or_else(|| panic!("no entry for node id: `{node:?}`"))
|
||||
}
|
||||
|
||||
/// Get the previously recorded `to` local def id given the `from` local def id, obtained using
|
||||
/// `generics_def_id_map` field.
|
||||
fn get_remapped_def_id(&self, local_def_id: LocalDefId) -> LocalDefId {
|
||||
// `generics_def_id_map` is a stack of mappings. As we go deeper in impl traits nesting we
|
||||
// push new mappings, so we first need to get the latest (innermost) mappings, hence `iter().rev()`.
|
||||
//
|
||||
// Consider:
|
||||
//
|
||||
// `fn test<'a, 'b>() -> impl Trait<&'a u8, Ty = impl Sized + 'b> {}`
|
||||
//
|
||||
// We would end with a generics_def_id_map like:
|
||||
//
|
||||
// `[[fn#'b -> impl_trait#'b], [fn#'b -> impl_sized#'b]]`
|
||||
//
|
||||
// for the opaque type generated on `impl Sized + 'b`, we want the result to be: impl_sized#'b.
|
||||
// So, if we were trying to find first from the start (outermost) would give the wrong result, impl_trait#'b.
|
||||
self.generics_def_id_map
|
||||
.iter()
|
||||
.rev()
|
||||
.find_map(|map| map.get(&local_def_id).copied())
|
||||
.unwrap_or(local_def_id)
|
||||
}
|
||||
|
||||
/// Freshen the `LoweringContext` and ready it to lower a nested item.
|
||||
/// The lowered item is registered into `self.children`.
|
||||
///
|
||||
@ -647,27 +597,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
||||
result
|
||||
}
|
||||
|
||||
/// Installs the remapping `remap` in scope while `f` is being executed.
|
||||
/// This causes references to the `LocalDefId` keys to be changed to
|
||||
/// refer to the values instead.
|
||||
///
|
||||
/// The remapping is used when one piece of AST expands to multiple
|
||||
/// pieces of HIR. For example, the function `fn foo<'a>(...) -> impl Debug + 'a`,
|
||||
/// expands to both a function definition (`foo`) and a TAIT for the return value,
|
||||
/// both of which have a lifetime parameter `'a`. The remapping allows us to
|
||||
/// rewrite the `'a` in the return value to refer to the
|
||||
/// `'a` declared on the TAIT, instead of the function.
|
||||
fn with_remapping<R>(
|
||||
&mut self,
|
||||
remap: LocalDefIdMap<LocalDefId>,
|
||||
f: impl FnOnce(&mut Self) -> R,
|
||||
) -> R {
|
||||
self.generics_def_id_map.push(remap);
|
||||
let res = f(self);
|
||||
self.generics_def_id_map.pop();
|
||||
res
|
||||
}
|
||||
|
||||
fn make_owner_info(&mut self, node: hir::OwnerNode<'hir>) -> &'hir hir::OwnerInfo<'hir> {
|
||||
let attrs = std::mem::take(&mut self.attrs);
|
||||
let mut bodies = std::mem::take(&mut self.bodies);
|
||||
@ -1499,27 +1428,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
||||
// frequently opened issues show.
|
||||
let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::OpaqueTy, span, None);
|
||||
|
||||
// Whether this opaque always captures lifetimes in scope.
|
||||
// Right now, this is all RPITIT and TAITs, and when `lifetime_capture_rules_2024`
|
||||
// is enabled. We don't check the span of the edition, since this is done
|
||||
// on a per-opaque basis to account for nested opaques.
|
||||
let always_capture_in_scope = match origin {
|
||||
_ if self.tcx.features().lifetime_capture_rules_2024() => true,
|
||||
hir::OpaqueTyOrigin::TyAlias { .. } => true,
|
||||
hir::OpaqueTyOrigin::FnReturn { in_trait_or_impl, .. } => in_trait_or_impl.is_some(),
|
||||
hir::OpaqueTyOrigin::AsyncFn { .. } => {
|
||||
unreachable!("should be using `lower_coroutine_fn_ret_ty`")
|
||||
}
|
||||
};
|
||||
let captured_lifetimes_to_duplicate = lifetime_collector::lifetimes_for_opaque(
|
||||
self.resolver,
|
||||
always_capture_in_scope,
|
||||
opaque_ty_node_id,
|
||||
bounds,
|
||||
span,
|
||||
);
|
||||
debug!(?captured_lifetimes_to_duplicate);
|
||||
|
||||
// Feature gate for RPITIT + use<..>
|
||||
match origin {
|
||||
rustc_hir::OpaqueTyOrigin::FnReturn { in_trait_or_impl: Some(_), .. } => {
|
||||
@ -1542,22 +1450,15 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
||||
_ => {}
|
||||
}
|
||||
|
||||
self.lower_opaque_inner(
|
||||
opaque_ty_node_id,
|
||||
origin,
|
||||
captured_lifetimes_to_duplicate,
|
||||
span,
|
||||
opaque_ty_span,
|
||||
|this| this.lower_param_bounds(bounds, itctx),
|
||||
)
|
||||
self.lower_opaque_inner(opaque_ty_node_id, origin, opaque_ty_span, |this| {
|
||||
this.lower_param_bounds(bounds, itctx)
|
||||
})
|
||||
}
|
||||
|
||||
fn lower_opaque_inner(
|
||||
&mut self,
|
||||
opaque_ty_node_id: NodeId,
|
||||
origin: hir::OpaqueTyOrigin,
|
||||
captured_lifetimes_to_duplicate: FxIndexSet<Lifetime>,
|
||||
span: Span,
|
||||
opaque_ty_span: Span,
|
||||
lower_item_bounds: impl FnOnce(&mut Self) -> &'hir [hir::GenericBound<'hir>],
|
||||
) -> hir::TyKind<'hir> {
|
||||
@ -1565,145 +1466,19 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
||||
let opaque_ty_hir_id = self.lower_node_id(opaque_ty_node_id);
|
||||
debug!(?opaque_ty_def_id, ?opaque_ty_hir_id);
|
||||
|
||||
// Map from captured (old) lifetime to synthetic (new) lifetime.
|
||||
// Used to resolve lifetimes in the bounds of the opaque.
|
||||
let mut captured_to_synthesized_mapping = LocalDefIdMap::default();
|
||||
// List of (early-bound) synthetic lifetimes that are owned by the opaque.
|
||||
// This is used to create the `hir::Generics` owned by the opaque.
|
||||
let mut synthesized_lifetime_definitions = vec![];
|
||||
// Pairs of lifetime arg (that resolves to the captured lifetime)
|
||||
// and the def-id of the (early-bound) synthetic lifetime definition.
|
||||
// This is used both to create generics for the `TyKind::OpaqueDef` that
|
||||
// we return, and also as a captured lifetime mapping for RPITITs.
|
||||
let mut synthesized_lifetime_args = vec![];
|
||||
|
||||
for lifetime in captured_lifetimes_to_duplicate {
|
||||
let res = self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error);
|
||||
let (old_def_id, missing_kind) = match res {
|
||||
LifetimeRes::Param { param: old_def_id, binder: _ } => (old_def_id, None),
|
||||
|
||||
LifetimeRes::Fresh { param, kind, .. } => {
|
||||
debug_assert_eq!(lifetime.ident.name, kw::UnderscoreLifetime);
|
||||
if let Some(old_def_id) = self.orig_opt_local_def_id(param) {
|
||||
(old_def_id, Some(kind))
|
||||
} else {
|
||||
self.dcx()
|
||||
.span_delayed_bug(lifetime.ident.span, "no def-id for fresh lifetime");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Opaques do not capture `'static`
|
||||
LifetimeRes::Static { .. } | LifetimeRes::Error => {
|
||||
continue;
|
||||
}
|
||||
|
||||
res => {
|
||||
let bug_msg = format!(
|
||||
"Unexpected lifetime resolution {:?} for {:?} at {:?}",
|
||||
res, lifetime.ident, lifetime.ident.span
|
||||
);
|
||||
span_bug!(lifetime.ident.span, "{}", bug_msg);
|
||||
}
|
||||
};
|
||||
|
||||
if captured_to_synthesized_mapping.get(&old_def_id).is_none() {
|
||||
// Create a new lifetime parameter local to the opaque.
|
||||
let duplicated_lifetime_node_id = self.next_node_id();
|
||||
let duplicated_lifetime_def_id = self.create_def(
|
||||
opaque_ty_def_id,
|
||||
duplicated_lifetime_node_id,
|
||||
lifetime.ident.name,
|
||||
DefKind::LifetimeParam,
|
||||
self.lower_span(lifetime.ident.span),
|
||||
);
|
||||
captured_to_synthesized_mapping.insert(old_def_id, duplicated_lifetime_def_id);
|
||||
// FIXME: Instead of doing this, we could move this whole loop
|
||||
// into the `with_hir_id_owner`, then just directly construct
|
||||
// the `hir::GenericParam` here.
|
||||
synthesized_lifetime_definitions.push((
|
||||
duplicated_lifetime_node_id,
|
||||
duplicated_lifetime_def_id,
|
||||
self.lower_ident(lifetime.ident),
|
||||
missing_kind,
|
||||
));
|
||||
|
||||
// Now make an arg that we can use for the generic params of the opaque tykind.
|
||||
let id = self.next_node_id();
|
||||
let lifetime_arg = self.new_named_lifetime_with_res(id, lifetime.ident, res);
|
||||
let duplicated_lifetime_def_id = self.local_def_id(duplicated_lifetime_node_id);
|
||||
synthesized_lifetime_args.push((lifetime_arg, duplicated_lifetime_def_id))
|
||||
}
|
||||
}
|
||||
|
||||
let opaque_ty_def = self.with_def_id_parent(opaque_ty_def_id, |this| {
|
||||
// Install the remapping from old to new (if any). This makes sure that
|
||||
// any lifetimes that would have resolved to the def-id of captured
|
||||
// lifetimes are remapped to the new *synthetic* lifetimes of the opaque.
|
||||
let bounds = this
|
||||
.with_remapping(captured_to_synthesized_mapping, |this| lower_item_bounds(this));
|
||||
|
||||
let generic_params =
|
||||
this.arena.alloc_from_iter(synthesized_lifetime_definitions.iter().map(
|
||||
|&(new_node_id, new_def_id, ident, missing_kind)| {
|
||||
let hir_id = this.lower_node_id(new_node_id);
|
||||
let (name, kind) = if ident.name == kw::UnderscoreLifetime {
|
||||
(
|
||||
hir::ParamName::Fresh,
|
||||
hir::LifetimeParamKind::Elided(
|
||||
missing_kind.unwrap_or(MissingLifetimeKind::Underscore),
|
||||
),
|
||||
)
|
||||
} else {
|
||||
(hir::ParamName::Plain(ident), hir::LifetimeParamKind::Explicit)
|
||||
};
|
||||
|
||||
hir::GenericParam {
|
||||
hir_id,
|
||||
def_id: new_def_id,
|
||||
name,
|
||||
span: ident.span,
|
||||
pure_wrt_drop: false,
|
||||
kind: hir::GenericParamKind::Lifetime { kind },
|
||||
colon_span: None,
|
||||
source: hir::GenericParamSource::Generics,
|
||||
}
|
||||
},
|
||||
));
|
||||
debug!("lower_async_fn_ret_ty: generic_params={:#?}", generic_params);
|
||||
|
||||
let lifetime_mapping = self.arena.alloc_slice(&synthesized_lifetime_args);
|
||||
|
||||
trace!("registering opaque type with id {:#?}", opaque_ty_def_id);
|
||||
let bounds = lower_item_bounds(this);
|
||||
let opaque_ty_def = hir::OpaqueTy {
|
||||
hir_id: opaque_ty_hir_id,
|
||||
def_id: opaque_ty_def_id,
|
||||
generics: this.arena.alloc(hir::Generics {
|
||||
params: generic_params,
|
||||
predicates: &[],
|
||||
has_where_clause_predicates: false,
|
||||
where_clause_span: this.lower_span(span),
|
||||
span: this.lower_span(span),
|
||||
}),
|
||||
bounds,
|
||||
origin,
|
||||
lifetime_mapping,
|
||||
span: this.lower_span(opaque_ty_span),
|
||||
};
|
||||
this.arena.alloc(opaque_ty_def)
|
||||
});
|
||||
|
||||
let generic_args = self.arena.alloc_from_iter(
|
||||
synthesized_lifetime_args
|
||||
.iter()
|
||||
.map(|(lifetime, _)| hir::GenericArg::Lifetime(*lifetime)),
|
||||
);
|
||||
|
||||
// Create the `Foo<...>` reference itself. Note that the `type
|
||||
// Foo = impl Trait` is, internally, created as a child of the
|
||||
// async fn, so the *type parameters* are inherited. It's
|
||||
// only the lifetime parameters that we must supply.
|
||||
hir::TyKind::OpaqueDef(opaque_ty_def, generic_args)
|
||||
hir::TyKind::OpaqueDef(opaque_ty_def)
|
||||
}
|
||||
|
||||
fn lower_precise_capturing_args(
|
||||
@ -1885,13 +1660,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
||||
let opaque_ty_span =
|
||||
self.mark_span_with_reason(DesugaringKind::Async, span, allowed_features);
|
||||
|
||||
let captured_lifetimes = self
|
||||
.resolver
|
||||
.extra_lifetime_params(opaque_ty_node_id)
|
||||
.into_iter()
|
||||
.map(|(ident, id, _)| Lifetime { id, ident })
|
||||
.collect();
|
||||
|
||||
let in_trait_or_impl = match fn_kind {
|
||||
FnDeclKind::Trait => Some(hir::RpitContext::Trait),
|
||||
FnDeclKind::Impl => Some(hir::RpitContext::TraitImpl),
|
||||
@ -1902,8 +1670,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
||||
let opaque_ty_ref = self.lower_opaque_inner(
|
||||
opaque_ty_node_id,
|
||||
hir::OpaqueTyOrigin::AsyncFn { parent: fn_def_id, in_trait_or_impl },
|
||||
captured_lifetimes,
|
||||
span,
|
||||
opaque_ty_span,
|
||||
|this| {
|
||||
let bound = this.lower_coroutine_fn_output_type_to_bound(
|
||||
@ -2000,10 +1766,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
||||
res: LifetimeRes,
|
||||
) -> &'hir hir::Lifetime {
|
||||
let res = match res {
|
||||
LifetimeRes::Param { param, .. } => {
|
||||
let param = self.get_remapped_def_id(param);
|
||||
hir::LifetimeName::Param(param)
|
||||
}
|
||||
LifetimeRes::Param { param, .. } => hir::LifetimeName::Param(param),
|
||||
LifetimeRes::Fresh { param, .. } => {
|
||||
let param = self.local_def_id(param);
|
||||
hir::LifetimeName::Param(param)
|
||||
|
@ -1,151 +0,0 @@
|
||||
use rustc_ast::visit::{self, BoundKind, LifetimeCtxt, Visitor};
|
||||
use rustc_ast::{
|
||||
GenericBound, GenericBounds, Lifetime, NodeId, PathSegment, PolyTraitRef, Ty, TyKind,
|
||||
};
|
||||
use rustc_data_structures::fx::FxIndexSet;
|
||||
use rustc_hir::def::{DefKind, LifetimeRes, Res};
|
||||
use rustc_middle::span_bug;
|
||||
use rustc_middle::ty::ResolverAstLowering;
|
||||
use rustc_span::Span;
|
||||
use rustc_span::symbol::{Ident, kw};
|
||||
|
||||
use super::ResolverAstLoweringExt;
|
||||
|
||||
struct LifetimeCollectVisitor<'ast> {
|
||||
resolver: &'ast mut ResolverAstLowering,
|
||||
always_capture_in_scope: bool,
|
||||
current_binders: Vec<NodeId>,
|
||||
collected_lifetimes: FxIndexSet<Lifetime>,
|
||||
}
|
||||
|
||||
impl<'ast> LifetimeCollectVisitor<'ast> {
|
||||
fn new(resolver: &'ast mut ResolverAstLowering, always_capture_in_scope: bool) -> Self {
|
||||
Self {
|
||||
resolver,
|
||||
always_capture_in_scope,
|
||||
current_binders: Vec::new(),
|
||||
collected_lifetimes: FxIndexSet::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_opaque(&mut self, opaque_ty_node_id: NodeId, bounds: &'ast GenericBounds, span: Span) {
|
||||
// If we're edition 2024 or within a TAIT or RPITIT, *and* there is no
|
||||
// `use<>` statement to override the default capture behavior, then
|
||||
// capture all of the in-scope lifetimes.
|
||||
if (self.always_capture_in_scope || span.at_least_rust_2024())
|
||||
&& bounds.iter().all(|bound| !matches!(bound, GenericBound::Use(..)))
|
||||
{
|
||||
for (ident, id, _) in self.resolver.extra_lifetime_params(opaque_ty_node_id) {
|
||||
self.record_lifetime_use(Lifetime { id, ident });
|
||||
}
|
||||
}
|
||||
|
||||
// We also recurse on the bounds to make sure we capture all the lifetimes
|
||||
// mentioned in the bounds. These may disagree with the `use<>` list, in which
|
||||
// case we will error on these later. We will also recurse to visit any
|
||||
// nested opaques, which may *implicitly* capture lifetimes.
|
||||
for bound in bounds {
|
||||
self.visit_param_bound(bound, BoundKind::Bound);
|
||||
}
|
||||
}
|
||||
|
||||
fn record_lifetime_use(&mut self, lifetime: Lifetime) {
|
||||
match self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error) {
|
||||
LifetimeRes::Param { binder, .. } | LifetimeRes::Fresh { binder, .. } => {
|
||||
if !self.current_binders.contains(&binder) {
|
||||
self.collected_lifetimes.insert(lifetime);
|
||||
}
|
||||
}
|
||||
LifetimeRes::Static { .. } | LifetimeRes::Error => {
|
||||
self.collected_lifetimes.insert(lifetime);
|
||||
}
|
||||
LifetimeRes::Infer => {}
|
||||
res => {
|
||||
let bug_msg = format!(
|
||||
"Unexpected lifetime resolution {:?} for {:?} at {:?}",
|
||||
res, lifetime.ident, lifetime.ident.span
|
||||
);
|
||||
span_bug!(lifetime.ident.span, "{}", bug_msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This collect lifetimes that are elided, for nodes like `Foo<T>` where there are no explicit
|
||||
/// lifetime nodes. Is equivalent to having "pseudo" nodes introduced for each of the node ids
|
||||
/// in the list start..end.
|
||||
fn record_elided_anchor(&mut self, node_id: NodeId, span: Span) {
|
||||
if let Some(LifetimeRes::ElidedAnchor { start, end }) =
|
||||
self.resolver.get_lifetime_res(node_id)
|
||||
{
|
||||
for i in start..end {
|
||||
let lifetime = Lifetime { id: i, ident: Ident::new(kw::UnderscoreLifetime, span) };
|
||||
self.record_lifetime_use(lifetime);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ast> Visitor<'ast> for LifetimeCollectVisitor<'ast> {
|
||||
fn visit_lifetime(&mut self, lifetime: &'ast Lifetime, _: LifetimeCtxt) {
|
||||
self.record_lifetime_use(*lifetime);
|
||||
}
|
||||
|
||||
fn visit_path_segment(&mut self, path_segment: &'ast PathSegment) {
|
||||
self.record_elided_anchor(path_segment.id, path_segment.ident.span);
|
||||
visit::walk_path_segment(self, path_segment);
|
||||
}
|
||||
|
||||
fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef) {
|
||||
self.current_binders.push(t.trait_ref.ref_id);
|
||||
|
||||
visit::walk_poly_trait_ref(self, t);
|
||||
|
||||
self.current_binders.pop();
|
||||
}
|
||||
|
||||
fn visit_ty(&mut self, t: &'ast Ty) {
|
||||
match &t.kind {
|
||||
TyKind::Path(None, _) => {
|
||||
// We can sometimes encounter bare trait objects
|
||||
// which are represented in AST as paths.
|
||||
if let Some(partial_res) = self.resolver.get_partial_res(t.id)
|
||||
&& let Some(Res::Def(DefKind::Trait | DefKind::TraitAlias, _)) =
|
||||
partial_res.full_res()
|
||||
{
|
||||
self.current_binders.push(t.id);
|
||||
visit::walk_ty(self, t);
|
||||
self.current_binders.pop();
|
||||
} else {
|
||||
visit::walk_ty(self, t);
|
||||
}
|
||||
}
|
||||
TyKind::BareFn(_) => {
|
||||
self.current_binders.push(t.id);
|
||||
visit::walk_ty(self, t);
|
||||
self.current_binders.pop();
|
||||
}
|
||||
TyKind::Ref(None, _) | TyKind::PinnedRef(None, _) => {
|
||||
self.record_elided_anchor(t.id, t.span);
|
||||
visit::walk_ty(self, t);
|
||||
}
|
||||
TyKind::ImplTrait(opaque_ty_node_id, bounds) => {
|
||||
self.visit_opaque(*opaque_ty_node_id, bounds, t.span)
|
||||
}
|
||||
_ => {
|
||||
visit::walk_ty(self, t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn lifetimes_for_opaque(
|
||||
resolver: &mut ResolverAstLowering,
|
||||
always_capture_in_scope: bool,
|
||||
opaque_ty_node_id: NodeId,
|
||||
bounds: &GenericBounds,
|
||||
span: Span,
|
||||
) -> FxIndexSet<Lifetime> {
|
||||
let mut visitor = LifetimeCollectVisitor::new(resolver, always_capture_in_scope);
|
||||
visitor.visit_opaque(opaque_ty_node_id, bounds, span);
|
||||
visitor.collected_lifetimes
|
||||
}
|
@ -830,7 +830,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> {
|
||||
///
|
||||
/// [`OpaqueDef`]: hir::TyKind::OpaqueDef
|
||||
fn get_future_inner_return_ty(&self, hir_ty: &'tcx hir::Ty<'tcx>) -> &'tcx hir::Ty<'tcx> {
|
||||
let hir::TyKind::OpaqueDef(opaque_ty, _) = hir_ty.kind else {
|
||||
let hir::TyKind::OpaqueDef(opaque_ty) = hir_ty.kind else {
|
||||
span_bug!(
|
||||
hir_ty.span,
|
||||
"lowered return type of async fn is not OpaqueDef: {:?}",
|
||||
|
@ -193,7 +193,7 @@ fn make_local_place<'tcx>(
|
||||
);
|
||||
}
|
||||
let place = if is_ssa {
|
||||
if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
|
||||
if let BackendRepr::ScalarPair(_, _) = layout.backend_repr {
|
||||
CPlace::new_var_pair(fx, local, layout)
|
||||
} else {
|
||||
CPlace::new_var(fx, local, layout)
|
||||
|
@ -78,19 +78,19 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
|
||||
fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
|
||||
match self.mode {
|
||||
PassMode::Ignore => smallvec![],
|
||||
PassMode::Direct(attrs) => match self.layout.abi {
|
||||
Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
|
||||
PassMode::Direct(attrs) => match self.layout.backend_repr {
|
||||
BackendRepr::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
|
||||
AbiParam::new(scalar_to_clif_type(tcx, scalar)),
|
||||
attrs
|
||||
)],
|
||||
Abi::Vector { .. } => {
|
||||
BackendRepr::Vector { .. } => {
|
||||
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
|
||||
smallvec![AbiParam::new(vector_ty)]
|
||||
}
|
||||
_ => unreachable!("{:?}", self.layout.abi),
|
||||
_ => unreachable!("{:?}", self.layout.backend_repr),
|
||||
},
|
||||
PassMode::Pair(attrs_a, attrs_b) => match self.layout.abi {
|
||||
Abi::ScalarPair(a, b) => {
|
||||
PassMode::Pair(attrs_a, attrs_b) => match self.layout.backend_repr {
|
||||
BackendRepr::ScalarPair(a, b) => {
|
||||
let a = scalar_to_clif_type(tcx, a);
|
||||
let b = scalar_to_clif_type(tcx, b);
|
||||
smallvec![
|
||||
@ -98,7 +98,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
|
||||
apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
|
||||
]
|
||||
}
|
||||
_ => unreachable!("{:?}", self.layout.abi),
|
||||
_ => unreachable!("{:?}", self.layout.backend_repr),
|
||||
},
|
||||
PassMode::Cast { ref cast, pad_i32 } => {
|
||||
assert!(!pad_i32, "padding support not yet implemented");
|
||||
@ -130,23 +130,23 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
|
||||
fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
|
||||
match self.mode {
|
||||
PassMode::Ignore => (None, vec![]),
|
||||
PassMode::Direct(_) => match self.layout.abi {
|
||||
Abi::Scalar(scalar) => {
|
||||
PassMode::Direct(_) => match self.layout.backend_repr {
|
||||
BackendRepr::Scalar(scalar) => {
|
||||
(None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
|
||||
}
|
||||
Abi::Vector { .. } => {
|
||||
BackendRepr::Vector { .. } => {
|
||||
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
|
||||
(None, vec![AbiParam::new(vector_ty)])
|
||||
}
|
||||
_ => unreachable!("{:?}", self.layout.abi),
|
||||
_ => unreachable!("{:?}", self.layout.backend_repr),
|
||||
},
|
||||
PassMode::Pair(_, _) => match self.layout.abi {
|
||||
Abi::ScalarPair(a, b) => {
|
||||
PassMode::Pair(_, _) => match self.layout.backend_repr {
|
||||
BackendRepr::ScalarPair(a, b) => {
|
||||
let a = scalar_to_clif_type(tcx, a);
|
||||
let b = scalar_to_clif_type(tcx, b);
|
||||
(None, vec![AbiParam::new(a), AbiParam::new(b)])
|
||||
}
|
||||
_ => unreachable!("{:?}", self.layout.abi),
|
||||
_ => unreachable!("{:?}", self.layout.backend_repr),
|
||||
},
|
||||
PassMode::Cast { ref cast, .. } => {
|
||||
(None, cast_target_to_abi_params(cast).into_iter().collect())
|
||||
|
@ -290,7 +290,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
|
||||
let arg_uninhabited = fx
|
||||
.mir
|
||||
.args_iter()
|
||||
.any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
|
||||
.any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).is_uninhabited());
|
||||
if arg_uninhabited {
|
||||
fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
|
||||
fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
|
||||
@ -644,9 +644,9 @@ fn codegen_stmt<'tcx>(
|
||||
_ => unreachable!("un op Neg for {:?}", layout.ty),
|
||||
}
|
||||
}
|
||||
UnOp::PtrMetadata => match layout.abi {
|
||||
Abi::Scalar(_) => CValue::zst(dest_layout),
|
||||
Abi::ScalarPair(_, _) => {
|
||||
UnOp::PtrMetadata => match layout.backend_repr {
|
||||
BackendRepr::Scalar(_) => CValue::zst(dest_layout),
|
||||
BackendRepr::ScalarPair(_, _) => {
|
||||
CValue::by_val(operand.load_scalar_pair(fx).1, dest_layout)
|
||||
}
|
||||
_ => bug!("Unexpected `PtrToMetadata` operand: {operand:?}"),
|
||||
|
@ -14,7 +14,7 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
|
||||
variant_index: VariantIdx,
|
||||
) {
|
||||
let layout = place.layout();
|
||||
if layout.for_variant(fx, variant_index).abi.is_uninhabited() {
|
||||
if layout.for_variant(fx, variant_index).is_uninhabited() {
|
||||
return;
|
||||
}
|
||||
match layout.variants {
|
||||
@ -80,7 +80,7 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
|
||||
) {
|
||||
let layout = value.layout();
|
||||
|
||||
if layout.abi.is_uninhabited() {
|
||||
if layout.is_uninhabited() {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -51,8 +51,8 @@ fn report_atomic_type_validation_error<'tcx>(
|
||||
}
|
||||
|
||||
pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Type {
|
||||
let (element, count) = match layout.abi {
|
||||
Abi::Vector { element, count } => (element, count),
|
||||
let (element, count) = match layout.backend_repr {
|
||||
BackendRepr::Vector { element, count } => (element, count),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
@ -505,7 +505,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
|
||||
let layout = fx.layout_of(generic_args.type_at(0));
|
||||
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
|
||||
// branch
|
||||
let meta = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
|
||||
let meta = if let BackendRepr::ScalarPair(_, _) = ptr.layout().backend_repr {
|
||||
Some(ptr.load_scalar_pair(fx).1)
|
||||
} else {
|
||||
None
|
||||
@ -519,7 +519,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
|
||||
let layout = fx.layout_of(generic_args.type_at(0));
|
||||
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
|
||||
// branch
|
||||
let meta = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
|
||||
let meta = if let BackendRepr::ScalarPair(_, _) = ptr.layout().backend_repr {
|
||||
Some(ptr.load_scalar_pair(fx).1)
|
||||
} else {
|
||||
None
|
||||
@ -693,7 +693,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
|
||||
let layout = fx.layout_of(ty);
|
||||
let msg_str = with_no_visible_paths!({
|
||||
with_no_trimmed_paths!({
|
||||
if layout.abi.is_uninhabited() {
|
||||
if layout.is_uninhabited() {
|
||||
// Use this error even for the other intrinsics as it is more precise.
|
||||
format!("attempted to instantiate uninhabited type `{}`", ty)
|
||||
} else if intrinsic == sym::assert_zero_valid {
|
||||
|
@ -92,6 +92,7 @@ mod prelude {
|
||||
StackSlotData, StackSlotKind, TrapCode, Type, Value, types,
|
||||
};
|
||||
pub(crate) use cranelift_module::{self, DataDescription, FuncId, Linkage, Module};
|
||||
pub(crate) use rustc_abi::{BackendRepr, FIRST_VARIANT, FieldIdx, Scalar, Size, VariantIdx};
|
||||
pub(crate) use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
|
||||
pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
|
||||
pub(crate) use rustc_index::Idx;
|
||||
@ -101,7 +102,6 @@ mod prelude {
|
||||
self, FloatTy, Instance, InstanceKind, IntTy, ParamEnv, Ty, TyCtxt, UintTy,
|
||||
};
|
||||
pub(crate) use rustc_span::Span;
|
||||
pub(crate) use rustc_target::abi::{Abi, FIRST_VARIANT, FieldIdx, Scalar, Size, VariantIdx};
|
||||
|
||||
pub(crate) use crate::abi::*;
|
||||
pub(crate) use crate::base::{codegen_operand, codegen_place};
|
||||
|
@ -131,8 +131,8 @@ impl<'tcx> CValue<'tcx> {
|
||||
|
||||
match self.0 {
|
||||
CValueInner::ByRef(ptr, None) => {
|
||||
let (a_scalar, b_scalar) = match self.1.abi {
|
||||
Abi::ScalarPair(a, b) => (a, b),
|
||||
let (a_scalar, b_scalar) = match self.1.backend_repr {
|
||||
BackendRepr::ScalarPair(a, b) => (a, b),
|
||||
_ => unreachable!("dyn_star_force_data_on_stack({:?})", self),
|
||||
};
|
||||
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
|
||||
@ -164,15 +164,15 @@ impl<'tcx> CValue<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Load a value with layout.abi of scalar
|
||||
/// Load a value with layout.backend_repr of scalar
|
||||
#[track_caller]
|
||||
pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
|
||||
let layout = self.1;
|
||||
match self.0 {
|
||||
CValueInner::ByRef(ptr, None) => {
|
||||
let clif_ty = match layout.abi {
|
||||
Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
|
||||
Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
|
||||
let clif_ty = match layout.backend_repr {
|
||||
BackendRepr::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
|
||||
BackendRepr::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
|
||||
.by(u32::try_from(count).unwrap())
|
||||
.unwrap(),
|
||||
_ => unreachable!("{:?}", layout.ty),
|
||||
@ -187,14 +187,14 @@ impl<'tcx> CValue<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Load a value pair with layout.abi of scalar pair
|
||||
/// Load a value pair with layout.backend_repr of scalar pair
|
||||
#[track_caller]
|
||||
pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
|
||||
let layout = self.1;
|
||||
match self.0 {
|
||||
CValueInner::ByRef(ptr, None) => {
|
||||
let (a_scalar, b_scalar) = match layout.abi {
|
||||
Abi::ScalarPair(a, b) => (a, b),
|
||||
let (a_scalar, b_scalar) = match layout.backend_repr {
|
||||
BackendRepr::ScalarPair(a, b) => (a, b),
|
||||
_ => unreachable!("load_scalar_pair({:?})", self),
|
||||
};
|
||||
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
|
||||
@ -222,8 +222,8 @@ impl<'tcx> CValue<'tcx> {
|
||||
let layout = self.1;
|
||||
match self.0 {
|
||||
CValueInner::ByVal(_) => unreachable!(),
|
||||
CValueInner::ByValPair(val1, val2) => match layout.abi {
|
||||
Abi::ScalarPair(_, _) => {
|
||||
CValueInner::ByValPair(val1, val2) => match layout.backend_repr {
|
||||
BackendRepr::ScalarPair(_, _) => {
|
||||
let val = match field.as_u32() {
|
||||
0 => val1,
|
||||
1 => val2,
|
||||
@ -232,7 +232,7 @@ impl<'tcx> CValue<'tcx> {
|
||||
let field_layout = layout.field(&*fx, usize::from(field));
|
||||
CValue::by_val(val, field_layout)
|
||||
}
|
||||
_ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
|
||||
_ => unreachable!("value_field for ByValPair with abi {:?}", layout.backend_repr),
|
||||
},
|
||||
CValueInner::ByRef(ptr, None) => {
|
||||
let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
|
||||
@ -360,7 +360,7 @@ impl<'tcx> CValue<'tcx> {
|
||||
pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
|
||||
assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
|
||||
assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
|
||||
assert_eq!(self.layout().abi, layout.abi);
|
||||
assert_eq!(self.layout().backend_repr, layout.backend_repr);
|
||||
CValue(self.0, layout)
|
||||
}
|
||||
}
|
||||
@ -609,8 +609,8 @@ impl<'tcx> CPlace<'tcx> {
|
||||
let dst_layout = self.layout();
|
||||
match self.inner {
|
||||
CPlaceInner::Var(_local, var) => {
|
||||
let data = match from.1.abi {
|
||||
Abi::Scalar(_) => CValue(from.0, dst_layout).load_scalar(fx),
|
||||
let data = match from.1.backend_repr {
|
||||
BackendRepr::Scalar(_) => CValue(from.0, dst_layout).load_scalar(fx),
|
||||
_ => {
|
||||
let (ptr, meta) = from.force_stack(fx);
|
||||
assert!(meta.is_none());
|
||||
@ -621,8 +621,10 @@ impl<'tcx> CPlace<'tcx> {
|
||||
transmute_scalar(fx, var, data, dst_ty);
|
||||
}
|
||||
CPlaceInner::VarPair(_local, var1, var2) => {
|
||||
let (data1, data2) = match from.1.abi {
|
||||
Abi::ScalarPair(_, _) => CValue(from.0, dst_layout).load_scalar_pair(fx),
|
||||
let (data1, data2) = match from.1.backend_repr {
|
||||
BackendRepr::ScalarPair(_, _) => {
|
||||
CValue(from.0, dst_layout).load_scalar_pair(fx)
|
||||
}
|
||||
_ => {
|
||||
let (ptr, meta) = from.force_stack(fx);
|
||||
assert!(meta.is_none());
|
||||
@ -635,7 +637,9 @@ impl<'tcx> CPlace<'tcx> {
|
||||
}
|
||||
CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
|
||||
CPlaceInner::Addr(to_ptr, None) => {
|
||||
if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
|
||||
if dst_layout.size == Size::ZERO
|
||||
|| dst_layout.backend_repr == BackendRepr::Uninhabited
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
@ -646,23 +650,28 @@ impl<'tcx> CPlace<'tcx> {
|
||||
CValueInner::ByVal(val) => {
|
||||
to_ptr.store(fx, val, flags);
|
||||
}
|
||||
CValueInner::ByValPair(val1, val2) => match from.layout().abi {
|
||||
Abi::ScalarPair(a_scalar, b_scalar) => {
|
||||
CValueInner::ByValPair(val1, val2) => match from.layout().backend_repr {
|
||||
BackendRepr::ScalarPair(a_scalar, b_scalar) => {
|
||||
let b_offset =
|
||||
scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
|
||||
to_ptr.store(fx, val1, flags);
|
||||
to_ptr.offset(fx, b_offset).store(fx, val2, flags);
|
||||
}
|
||||
_ => bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi),
|
||||
_ => {
|
||||
bug!(
|
||||
"Non ScalarPair repr {:?} for ByValPair CValue",
|
||||
dst_layout.backend_repr
|
||||
)
|
||||
}
|
||||
},
|
||||
CValueInner::ByRef(from_ptr, None) => {
|
||||
match from.layout().abi {
|
||||
Abi::Scalar(_) => {
|
||||
match from.layout().backend_repr {
|
||||
BackendRepr::Scalar(_) => {
|
||||
let val = from.load_scalar(fx);
|
||||
to_ptr.store(fx, val, flags);
|
||||
return;
|
||||
}
|
||||
Abi::ScalarPair(a_scalar, b_scalar) => {
|
||||
BackendRepr::ScalarPair(a_scalar, b_scalar) => {
|
||||
let b_offset =
|
||||
scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
|
||||
let (val1, val2) = from.load_scalar_pair(fx);
|
||||
|
@ -47,7 +47,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
|
||||
idx: usize,
|
||||
) -> (Pointer, Value) {
|
||||
let (ptr, vtable) = 'block: {
|
||||
if let Abi::Scalar(_) = arg.layout().abi {
|
||||
if let BackendRepr::Scalar(_) = arg.layout().backend_repr {
|
||||
while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() {
|
||||
let (idx, _) = arg
|
||||
.layout()
|
||||
@ -68,7 +68,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
|
||||
}
|
||||
}
|
||||
|
||||
if let Abi::ScalarPair(_, _) = arg.layout().abi {
|
||||
if let BackendRepr::ScalarPair(_, _) = arg.layout().backend_repr {
|
||||
let (ptr, vtable) = arg.load_scalar_pair(fx);
|
||||
(Pointer::new(ptr), vtable)
|
||||
} else {
|
||||
|
@ -1016,11 +1016,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
OperandValue::Ref(place.val)
|
||||
} else if place.layout.is_gcc_immediate() {
|
||||
let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align);
|
||||
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
|
||||
if let abi::BackendRepr::Scalar(ref scalar) = place.layout.backend_repr {
|
||||
scalar_load_metadata(self, load, scalar);
|
||||
}
|
||||
OperandValue::Immediate(self.to_immediate(load, place.layout))
|
||||
} else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
|
||||
} else if let abi::BackendRepr::ScalarPair(ref a, ref b) = place.layout.backend_repr {
|
||||
let b_offset = a.size(self).align_to(b.align(self).abi);
|
||||
|
||||
let mut load = |i, scalar: &abi::Scalar, align| {
|
||||
|
@ -294,13 +294,13 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
|
||||
}
|
||||
|
||||
sym::raw_eq => {
|
||||
use rustc_target::abi::Abi::*;
|
||||
use rustc_abi::BackendRepr::*;
|
||||
let tp_ty = fn_args.type_at(0);
|
||||
let layout = self.layout_of(tp_ty).layout;
|
||||
let _use_integer_compare = match layout.abi() {
|
||||
let _use_integer_compare = match layout.backend_repr() {
|
||||
Scalar(_) | ScalarPair(_, _) => true,
|
||||
Uninhabited | Vector { .. } => false,
|
||||
Aggregate { .. } => {
|
||||
Memory { .. } => {
|
||||
// For rusty ABIs, small aggregates are actually passed
|
||||
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
|
||||
// so we re-use that same threshold here.
|
||||
|
@ -3,7 +3,7 @@ use std::fmt::Write;
|
||||
use gccjit::{Struct, Type};
|
||||
use rustc_abi as abi;
|
||||
use rustc_abi::Primitive::*;
|
||||
use rustc_abi::{Abi, FieldsShape, Integer, PointeeInfo, Size, Variants};
|
||||
use rustc_abi::{BackendRepr, FieldsShape, Integer, PointeeInfo, Size, Variants};
|
||||
use rustc_codegen_ssa::traits::{
|
||||
BaseTypeCodegenMethods, DerivedTypeCodegenMethods, LayoutTypeCodegenMethods,
|
||||
};
|
||||
@ -60,9 +60,9 @@ fn uncached_gcc_type<'gcc, 'tcx>(
|
||||
layout: TyAndLayout<'tcx>,
|
||||
defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>,
|
||||
) -> Type<'gcc> {
|
||||
match layout.abi {
|
||||
Abi::Scalar(_) => bug!("handled elsewhere"),
|
||||
Abi::Vector { ref element, count } => {
|
||||
match layout.backend_repr {
|
||||
BackendRepr::Scalar(_) => bug!("handled elsewhere"),
|
||||
BackendRepr::Vector { ref element, count } => {
|
||||
let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO);
|
||||
let element =
|
||||
// NOTE: gcc doesn't allow pointer types in vectors.
|
||||
@ -74,7 +74,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(
|
||||
};
|
||||
return cx.context.new_vector_type(element, count);
|
||||
}
|
||||
Abi::ScalarPair(..) => {
|
||||
BackendRepr::ScalarPair(..) => {
|
||||
return cx.type_struct(
|
||||
&[
|
||||
layout.scalar_pair_element_gcc_type(cx, 0),
|
||||
@ -83,7 +83,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(
|
||||
false,
|
||||
);
|
||||
}
|
||||
Abi::Uninhabited | Abi::Aggregate { .. } => {}
|
||||
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {}
|
||||
}
|
||||
|
||||
let name = match *layout.ty.kind() {
|
||||
@ -176,16 +176,21 @@ pub trait LayoutGccExt<'tcx> {
|
||||
|
||||
impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
|
||||
fn is_gcc_immediate(&self) -> bool {
|
||||
match self.abi {
|
||||
Abi::Scalar(_) | Abi::Vector { .. } => true,
|
||||
Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
|
||||
match self.backend_repr {
|
||||
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
|
||||
BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_gcc_scalar_pair(&self) -> bool {
|
||||
match self.abi {
|
||||
Abi::ScalarPair(..) => true,
|
||||
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
|
||||
match self.backend_repr {
|
||||
BackendRepr::ScalarPair(..) => true,
|
||||
BackendRepr::Uninhabited
|
||||
| BackendRepr::Scalar(_)
|
||||
| BackendRepr::Vector { .. }
|
||||
| BackendRepr::Memory { .. } => false,
|
||||
}
|
||||
}
|
||||
|
||||
@ -205,7 +210,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
|
||||
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
|
||||
// In other words, this should generally not look at the type at all, but only at the
|
||||
// layout.
|
||||
if let Abi::Scalar(ref scalar) = self.abi {
|
||||
if let BackendRepr::Scalar(ref scalar) = self.backend_repr {
|
||||
// Use a different cache for scalars because pointers to DSTs
|
||||
// can be either wide or thin (data pointers of wide pointers).
|
||||
if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
|
||||
@ -261,7 +266,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
|
||||
}
|
||||
|
||||
fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
|
||||
if let Abi::Scalar(ref scalar) = self.abi {
|
||||
if let BackendRepr::Scalar(ref scalar) = self.backend_repr {
|
||||
if scalar.is_bool() {
|
||||
return cx.type_i1();
|
||||
}
|
||||
@ -299,8 +304,8 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
|
||||
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
|
||||
// In other words, this should generally not look at the type at all, but only at the
|
||||
// layout.
|
||||
let (a, b) = match self.abi {
|
||||
Abi::ScalarPair(ref a, ref b) => (a, b),
|
||||
let (a, b) = match self.backend_repr {
|
||||
BackendRepr::ScalarPair(ref a, ref b) => (a, b),
|
||||
_ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
|
||||
};
|
||||
let scalar = [a, b][index];
|
||||
|
@ -458,7 +458,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
||||
match &self.ret.mode {
|
||||
PassMode::Direct(attrs) => {
|
||||
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
|
||||
if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
|
||||
if let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr {
|
||||
apply_range_attr(llvm::AttributePlace::ReturnValue, scalar);
|
||||
}
|
||||
}
|
||||
@ -495,7 +495,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
||||
}
|
||||
PassMode::Direct(attrs) => {
|
||||
let i = apply(attrs);
|
||||
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
|
||||
if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
|
||||
apply_range_attr(llvm::AttributePlace::Argument(i), scalar);
|
||||
}
|
||||
}
|
||||
@ -510,7 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
||||
PassMode::Pair(a, b) => {
|
||||
let i = apply(a);
|
||||
let ii = apply(b);
|
||||
if let abi::Abi::ScalarPair(scalar_a, scalar_b) = arg.layout.abi {
|
||||
if let abi::BackendRepr::ScalarPair(scalar_a, scalar_b) =
|
||||
arg.layout.backend_repr
|
||||
{
|
||||
apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a);
|
||||
apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b);
|
||||
}
|
||||
@ -570,7 +572,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
||||
}
|
||||
if bx.cx.sess().opts.optimize != config::OptLevel::No
|
||||
&& llvm_util::get_version() < (19, 0, 0)
|
||||
&& let abi::Abi::Scalar(scalar) = self.ret.layout.abi
|
||||
&& let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr
|
||||
&& matches!(scalar.primitive(), Int(..))
|
||||
// If the value is a boolean, the range is 0..2 and that ultimately
|
||||
// become 0..0 when the type becomes i1, which would be rejected
|
||||
|
@ -880,8 +880,8 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||
) -> &'ll Value {
|
||||
use InlineAsmRegClass::*;
|
||||
let dl = &bx.tcx.data_layout;
|
||||
match (reg, layout.abi) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
match (reg, layout.backend_repr) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
|
||||
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
|
||||
@ -889,7 +889,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||
value
|
||||
}
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
|
||||
if s.primitive() != Primitive::Float(Float::F128) =>
|
||||
{
|
||||
let elem_ty = llvm_asm_scalar_type(bx.cx, s);
|
||||
@ -902,7 +902,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||
}
|
||||
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
|
||||
if layout.size.bytes() == 8 =>
|
||||
{
|
||||
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
|
||||
@ -910,14 +910,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
|
||||
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
||||
}
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F64) =>
|
||||
{
|
||||
bx.bitcast(value, bx.cx.type_i64())
|
||||
}
|
||||
(
|
||||
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
||||
Abi::Vector { .. },
|
||||
BackendRepr::Vector { .. },
|
||||
) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
|
||||
(
|
||||
X86(
|
||||
@ -925,7 +925,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
|
||||
&& s.primitive() == Primitive::Float(Float::F128) =>
|
||||
{
|
||||
@ -937,7 +937,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if s.primitive() == Primitive::Float(Float::F16) => {
|
||||
let value = bx.insert_element(
|
||||
bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
|
||||
@ -952,11 +952,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (8 | 16) },
|
||||
BackendRepr::Vector { element, count: count @ (8 | 16) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
|
||||
}
|
||||
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
|
||||
(
|
||||
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_f32())
|
||||
} else {
|
||||
@ -969,7 +972,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||
| ArmInlineAsmRegClass::dreg_low8
|
||||
| ArmInlineAsmRegClass::dreg_low16,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_f64())
|
||||
@ -986,11 +989,11 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||
| ArmInlineAsmRegClass::qreg_low4
|
||||
| ArmInlineAsmRegClass::qreg_low8,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (4 | 8) },
|
||||
BackendRepr::Vector { element, count: count @ (4 | 8) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
|
||||
}
|
||||
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
||||
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
|
||||
match s.primitive() {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
|
||||
@ -999,7 +1002,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||
_ => value,
|
||||
}
|
||||
}
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F16)
|
||||
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
|
||||
{
|
||||
@ -1022,15 +1025,15 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||
instance: Instance<'_>,
|
||||
) -> &'ll Value {
|
||||
use InlineAsmRegClass::*;
|
||||
match (reg, layout.abi) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
match (reg, layout.backend_repr) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||
bx.extract_element(value, bx.const_i32(0))
|
||||
} else {
|
||||
value
|
||||
}
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
|
||||
if s.primitive() != Primitive::Float(Float::F128) =>
|
||||
{
|
||||
value = bx.extract_element(value, bx.const_i32(0));
|
||||
@ -1039,7 +1042,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||
}
|
||||
value
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
|
||||
if layout.size.bytes() == 8 =>
|
||||
{
|
||||
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
|
||||
@ -1047,14 +1050,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||
let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
|
||||
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
||||
}
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F64) =>
|
||||
{
|
||||
bx.bitcast(value, bx.cx.type_f64())
|
||||
}
|
||||
(
|
||||
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
||||
Abi::Vector { .. },
|
||||
BackendRepr::Vector { .. },
|
||||
) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
|
||||
(
|
||||
X86(
|
||||
@ -1062,7 +1065,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
|
||||
&& s.primitive() == Primitive::Float(Float::F128) =>
|
||||
{
|
||||
@ -1074,7 +1077,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if s.primitive() == Primitive::Float(Float::F16) => {
|
||||
let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
|
||||
bx.extract_element(value, bx.const_usize(0))
|
||||
@ -1085,11 +1088,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (8 | 16) },
|
||||
BackendRepr::Vector { element, count: count @ (8 | 16) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
|
||||
}
|
||||
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
|
||||
(
|
||||
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_i32())
|
||||
} else {
|
||||
@ -1102,7 +1108,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||
| ArmInlineAsmRegClass::dreg_low8
|
||||
| ArmInlineAsmRegClass::dreg_low16,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_i64())
|
||||
@ -1119,11 +1125,11 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||
| ArmInlineAsmRegClass::qreg_low4
|
||||
| ArmInlineAsmRegClass::qreg_low8,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (4 | 8) },
|
||||
BackendRepr::Vector { element, count: count @ (4 | 8) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
|
||||
}
|
||||
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
||||
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
|
||||
match s.primitive() {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
|
||||
@ -1133,7 +1139,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||
_ => value,
|
||||
}
|
||||
}
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F16)
|
||||
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
|
||||
{
|
||||
@ -1153,35 +1159,35 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||
instance: Instance<'_>,
|
||||
) -> &'ll Type {
|
||||
use InlineAsmRegClass::*;
|
||||
match (reg, layout.abi) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
match (reg, layout.backend_repr) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||
cx.type_vector(cx.type_i8(), 8)
|
||||
} else {
|
||||
layout.llvm_type(cx)
|
||||
}
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
|
||||
if s.primitive() != Primitive::Float(Float::F128) =>
|
||||
{
|
||||
let elem_ty = llvm_asm_scalar_type(cx, s);
|
||||
let count = 16 / layout.size.bytes();
|
||||
cx.type_vector(elem_ty, count)
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
|
||||
if layout.size.bytes() == 8 =>
|
||||
{
|
||||
let elem_ty = llvm_asm_scalar_type(cx, element);
|
||||
cx.type_vector(elem_ty, count * 2)
|
||||
}
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F64) =>
|
||||
{
|
||||
cx.type_i64()
|
||||
}
|
||||
(
|
||||
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
||||
Abi::Vector { .. },
|
||||
BackendRepr::Vector { .. },
|
||||
) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
|
||||
(
|
||||
X86(
|
||||
@ -1189,7 +1195,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
|
||||
&& s.primitive() == Primitive::Float(Float::F128) =>
|
||||
{
|
||||
@ -1201,7 +1207,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
|
||||
(
|
||||
X86(
|
||||
@ -1209,11 +1215,14 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (8 | 16) },
|
||||
BackendRepr::Vector { element, count: count @ (8 | 16) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
cx.type_vector(cx.type_i16(), count)
|
||||
}
|
||||
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
|
||||
(
|
||||
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||
cx.type_f32()
|
||||
} else {
|
||||
@ -1226,7 +1235,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||
| ArmInlineAsmRegClass::dreg_low8
|
||||
| ArmInlineAsmRegClass::dreg_low16,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||
cx.type_f64()
|
||||
@ -1243,11 +1252,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||
| ArmInlineAsmRegClass::qreg_low4
|
||||
| ArmInlineAsmRegClass::qreg_low8,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (4 | 8) },
|
||||
BackendRepr::Vector { element, count: count @ (4 | 8) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
cx.type_vector(cx.type_i16(), count)
|
||||
}
|
||||
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
||||
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
|
||||
match s.primitive() {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
|
||||
@ -1256,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||
_ => layout.llvm_type(cx),
|
||||
}
|
||||
}
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F16)
|
||||
&& !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
|
||||
{
|
||||
|
@ -543,13 +543,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
let llval = const_llval.unwrap_or_else(|| {
|
||||
let load = self.load(llty, place.val.llval, place.val.align);
|
||||
if let abi::Abi::Scalar(scalar) = place.layout.abi {
|
||||
if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr {
|
||||
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
|
||||
}
|
||||
load
|
||||
});
|
||||
OperandValue::Immediate(self.to_immediate(llval, place.layout))
|
||||
} else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
|
||||
} else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr {
|
||||
let b_offset = a.size(self).align_to(b.align(self).abi);
|
||||
|
||||
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
|
||||
|
@ -258,8 +258,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
||||
self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
|
||||
}
|
||||
sym::va_arg => {
|
||||
match fn_abi.ret.layout.abi {
|
||||
abi::Abi::Scalar(scalar) => {
|
||||
match fn_abi.ret.layout.backend_repr {
|
||||
abi::BackendRepr::Scalar(scalar) => {
|
||||
match scalar.primitive() {
|
||||
Primitive::Int(..) => {
|
||||
if self.cx().size_of(ret_ty).bytes() < 4 {
|
||||
@ -436,13 +436,13 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
sym::raw_eq => {
|
||||
use abi::Abi::*;
|
||||
use abi::BackendRepr::*;
|
||||
let tp_ty = fn_args.type_at(0);
|
||||
let layout = self.layout_of(tp_ty).layout;
|
||||
let use_integer_compare = match layout.abi() {
|
||||
let use_integer_compare = match layout.backend_repr() {
|
||||
Scalar(_) | ScalarPair(_, _) => true,
|
||||
Uninhabited | Vector { .. } => false,
|
||||
Aggregate { .. } => {
|
||||
Memory { .. } => {
|
||||
// For rusty ABIs, small aggregates are actually passed
|
||||
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
|
||||
// so we re-use that same threshold here.
|
||||
@ -549,7 +549,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
let llret_ty = if ret_ty.is_simd()
|
||||
&& let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi
|
||||
&& let abi::BackendRepr::Memory { .. } =
|
||||
self.layout_of(ret_ty).layout.backend_repr
|
||||
{
|
||||
let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
|
||||
let elem_ll_ty = match elem_ty.kind() {
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::fmt::Write;
|
||||
|
||||
use rustc_abi::Primitive::{Float, Int, Pointer};
|
||||
use rustc_abi::{Abi, Align, FieldsShape, Scalar, Size, Variants};
|
||||
use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants};
|
||||
use rustc_codegen_ssa::traits::*;
|
||||
use rustc_middle::bug;
|
||||
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
||||
@ -17,13 +17,13 @@ fn uncached_llvm_type<'a, 'tcx>(
|
||||
layout: TyAndLayout<'tcx>,
|
||||
defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
|
||||
) -> &'a Type {
|
||||
match layout.abi {
|
||||
Abi::Scalar(_) => bug!("handled elsewhere"),
|
||||
Abi::Vector { element, count } => {
|
||||
match layout.backend_repr {
|
||||
BackendRepr::Scalar(_) => bug!("handled elsewhere"),
|
||||
BackendRepr::Vector { element, count } => {
|
||||
let element = layout.scalar_llvm_type_at(cx, element);
|
||||
return cx.type_vector(element, count);
|
||||
}
|
||||
Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {}
|
||||
BackendRepr::Uninhabited | BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {}
|
||||
}
|
||||
|
||||
let name = match layout.ty.kind() {
|
||||
@ -170,16 +170,21 @@ pub(crate) trait LayoutLlvmExt<'tcx> {
|
||||
|
||||
impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||
fn is_llvm_immediate(&self) -> bool {
|
||||
match self.abi {
|
||||
Abi::Scalar(_) | Abi::Vector { .. } => true,
|
||||
Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
|
||||
match self.backend_repr {
|
||||
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
|
||||
BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_llvm_scalar_pair(&self) -> bool {
|
||||
match self.abi {
|
||||
Abi::ScalarPair(..) => true,
|
||||
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
|
||||
match self.backend_repr {
|
||||
BackendRepr::ScalarPair(..) => true,
|
||||
BackendRepr::Uninhabited
|
||||
| BackendRepr::Scalar(_)
|
||||
| BackendRepr::Vector { .. }
|
||||
| BackendRepr::Memory { .. } => false,
|
||||
}
|
||||
}
|
||||
|
||||
@ -198,7 +203,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
|
||||
// In other words, this should generally not look at the type at all, but only at the
|
||||
// layout.
|
||||
if let Abi::Scalar(scalar) = self.abi {
|
||||
if let BackendRepr::Scalar(scalar) = self.backend_repr {
|
||||
// Use a different cache for scalars because pointers to DSTs
|
||||
// can be either wide or thin (data pointers of wide pointers).
|
||||
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
|
||||
@ -248,13 +253,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||
}
|
||||
|
||||
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
|
||||
match self.abi {
|
||||
Abi::Scalar(scalar) => {
|
||||
match self.backend_repr {
|
||||
BackendRepr::Scalar(scalar) => {
|
||||
if scalar.is_bool() {
|
||||
return cx.type_i1();
|
||||
}
|
||||
}
|
||||
Abi::ScalarPair(..) => {
|
||||
BackendRepr::ScalarPair(..) => {
|
||||
// An immediate pair always contains just the two elements, without any padding
|
||||
// filler, as it should never be stored to memory.
|
||||
return cx.type_struct(
|
||||
@ -287,7 +292,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
|
||||
// In other words, this should generally not look at the type at all, but only at the
|
||||
// layout.
|
||||
let Abi::ScalarPair(a, b) = self.abi else {
|
||||
let BackendRepr::ScalarPair(a, b) = self.backend_repr else {
|
||||
bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
|
||||
};
|
||||
let scalar = [a, b][index];
|
||||
|
@ -1532,7 +1532,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
// the load would just produce `OperandValue::Ref` instead
|
||||
// of the `OperandValue::Immediate` we need for the call.
|
||||
llval = bx.load(bx.backend_type(arg.layout), llval, align);
|
||||
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
|
||||
if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
|
||||
if scalar.is_bool() {
|
||||
bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
use rustc_abi::BackendRepr;
|
||||
use rustc_middle::mir::interpret::ErrorHandled;
|
||||
use rustc_middle::ty::layout::HasTyCtxt;
|
||||
use rustc_middle::ty::{self, Ty};
|
||||
use rustc_middle::{bug, mir, span_bug};
|
||||
use rustc_target::abi::Abi;
|
||||
|
||||
use super::FunctionCx;
|
||||
use crate::errors;
|
||||
@ -86,7 +86,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
.map(|field| {
|
||||
if let Some(prim) = field.try_to_scalar() {
|
||||
let layout = bx.layout_of(field_ty);
|
||||
let Abi::Scalar(scalar) = layout.abi else {
|
||||
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
|
||||
bug!("from_const: invalid ByVal layout: {:#?}", layout);
|
||||
};
|
||||
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
|
||||
|
@ -2,6 +2,7 @@ use std::collections::hash_map::Entry;
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::Range;
|
||||
|
||||
use rustc_abi::{BackendRepr, FieldIdx, FieldsShape, Size, VariantIdx};
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_index::IndexVec;
|
||||
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
|
||||
@ -11,7 +12,6 @@ use rustc_middle::{bug, mir, ty};
|
||||
use rustc_session::config::DebugInfo;
|
||||
use rustc_span::symbol::{Symbol, kw};
|
||||
use rustc_span::{BytePos, Span, hygiene};
|
||||
use rustc_target::abi::{Abi, FieldIdx, FieldsShape, Size, VariantIdx};
|
||||
|
||||
use super::operand::{OperandRef, OperandValue};
|
||||
use super::place::{PlaceRef, PlaceValue};
|
||||
@ -510,7 +510,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
// be marked as a `LocalVariable` for MSVC debuggers to visualize
|
||||
// their data correctly. (See #81894 & #88625)
|
||||
let var_ty_layout = self.cx.layout_of(var_ty);
|
||||
if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
|
||||
if let BackendRepr::ScalarPair(_, _) = var_ty_layout.backend_repr {
|
||||
VariableKind::LocalVariable
|
||||
} else {
|
||||
VariableKind::ArgumentVariable(arg_index)
|
||||
|
@ -4,7 +4,7 @@ use std::fmt;
|
||||
use arrayvec::ArrayVec;
|
||||
use either::Either;
|
||||
use rustc_abi as abi;
|
||||
use rustc_abi::{Abi, Align, Size};
|
||||
use rustc_abi::{Align, BackendRepr, Size};
|
||||
use rustc_middle::bug;
|
||||
use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
|
||||
use rustc_middle::mir::{self, ConstValue};
|
||||
@ -163,7 +163,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
|
||||
let val = match val {
|
||||
ConstValue::Scalar(x) => {
|
||||
let Abi::Scalar(scalar) = layout.abi else {
|
||||
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
|
||||
bug!("from_const: invalid ByVal layout: {:#?}", layout);
|
||||
};
|
||||
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
|
||||
@ -171,7 +171,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
}
|
||||
ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
|
||||
ConstValue::Slice { data, meta } => {
|
||||
let Abi::ScalarPair(a_scalar, _) = layout.abi else {
|
||||
let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else {
|
||||
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
|
||||
};
|
||||
let a = Scalar::from_pointer(
|
||||
@ -221,14 +221,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
// case where some of the bytes are initialized and others are not. So, we need an extra
|
||||
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
|
||||
// like a `Scalar` (or `ScalarPair`).
|
||||
match layout.abi {
|
||||
Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => {
|
||||
match layout.backend_repr {
|
||||
BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => {
|
||||
let size = s.size(bx);
|
||||
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
|
||||
let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
|
||||
OperandRef { val: OperandValue::Immediate(val), layout }
|
||||
}
|
||||
Abi::ScalarPair(
|
||||
BackendRepr::ScalarPair(
|
||||
a @ abi::Scalar::Initialized { .. },
|
||||
b @ abi::Scalar::Initialized { .. },
|
||||
) => {
|
||||
@ -322,7 +322,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
llval: V,
|
||||
layout: TyAndLayout<'tcx>,
|
||||
) -> Self {
|
||||
let val = if let Abi::ScalarPair(..) = layout.abi {
|
||||
let val = if let BackendRepr::ScalarPair(..) = layout.backend_repr {
|
||||
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
|
||||
|
||||
// Deconstruct the immediate aggregate.
|
||||
@ -343,7 +343,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
let field = self.layout.field(bx.cx(), i);
|
||||
let offset = self.layout.fields.offset(i);
|
||||
|
||||
let mut val = match (self.val, self.layout.abi) {
|
||||
let mut val = match (self.val, self.layout.backend_repr) {
|
||||
// If the field is ZST, it has no data.
|
||||
_ if field.is_zst() => OperandValue::ZeroSized,
|
||||
|
||||
@ -356,7 +356,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
}
|
||||
|
||||
// Extract a scalar component from a pair.
|
||||
(OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
|
||||
(OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
|
||||
if offset.bytes() == 0 {
|
||||
assert_eq!(field.size, a.size(bx.cx()));
|
||||
OperandValue::Immediate(a_llval)
|
||||
@ -368,30 +368,30 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
}
|
||||
|
||||
// `#[repr(simd)]` types are also immediate.
|
||||
(OperandValue::Immediate(llval), Abi::Vector { .. }) => {
|
||||
(OperandValue::Immediate(llval), BackendRepr::Vector { .. }) => {
|
||||
OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
|
||||
}
|
||||
|
||||
_ => bug!("OperandRef::extract_field({:?}): not applicable", self),
|
||||
};
|
||||
|
||||
match (&mut val, field.abi) {
|
||||
match (&mut val, field.backend_repr) {
|
||||
(OperandValue::ZeroSized, _) => {}
|
||||
(
|
||||
OperandValue::Immediate(llval),
|
||||
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. },
|
||||
BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. },
|
||||
) => {
|
||||
// Bools in union fields needs to be truncated.
|
||||
*llval = bx.to_immediate(*llval, field);
|
||||
}
|
||||
(OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
|
||||
(OperandValue::Pair(a, b), BackendRepr::ScalarPair(a_abi, b_abi)) => {
|
||||
// Bools in union fields needs to be truncated.
|
||||
*a = bx.to_immediate_scalar(*a, a_abi);
|
||||
*b = bx.to_immediate_scalar(*b, b_abi);
|
||||
}
|
||||
// Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
|
||||
(OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => {
|
||||
assert_matches!(self.layout.abi, Abi::Vector { .. });
|
||||
(OperandValue::Immediate(llval), BackendRepr::Memory { sized: true }) => {
|
||||
assert_matches!(self.layout.backend_repr, BackendRepr::Vector { .. });
|
||||
|
||||
let llfield_ty = bx.cx().backend_type(field);
|
||||
|
||||
@ -400,7 +400,10 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
bx.store(*llval, llptr, field.align.abi);
|
||||
*llval = bx.load(llfield_ty, llptr, field.align.abi);
|
||||
}
|
||||
(OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => {
|
||||
(
|
||||
OperandValue::Immediate(_),
|
||||
BackendRepr::Uninhabited | BackendRepr::Memory { sized: false },
|
||||
) => {
|
||||
bug!()
|
||||
}
|
||||
(OperandValue::Pair(..), _) => bug!(),
|
||||
@ -494,7 +497,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
|
||||
bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
|
||||
}
|
||||
OperandValue::Pair(a, b) => {
|
||||
let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
|
||||
let BackendRepr::ScalarPair(a_scalar, b_scalar) = dest.layout.backend_repr else {
|
||||
bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
|
||||
};
|
||||
let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
|
||||
@ -645,7 +648,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
// However, some SIMD types do not actually use the vector ABI
|
||||
// (in particular, packed SIMD types do not). Ensure we exclude those.
|
||||
let layout = bx.layout_of(constant_ty);
|
||||
if let Abi::Vector { .. } = layout.abi {
|
||||
if let BackendRepr::Vector { .. } = layout.backend_repr {
|
||||
let (llval, ty) = self.immediate_const_vector(bx, constant);
|
||||
return OperandRef {
|
||||
val: OperandValue::Immediate(llval),
|
||||
|
@ -1136,17 +1136,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
OperandValueKind::ZeroSized
|
||||
} else if self.cx.is_backend_immediate(layout) {
|
||||
assert!(!self.cx.is_backend_scalar_pair(layout));
|
||||
OperandValueKind::Immediate(match layout.abi {
|
||||
abi::Abi::Scalar(s) => s,
|
||||
abi::Abi::Vector { element, .. } => element,
|
||||
OperandValueKind::Immediate(match layout.backend_repr {
|
||||
abi::BackendRepr::Scalar(s) => s,
|
||||
abi::BackendRepr::Vector { element, .. } => element,
|
||||
x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
|
||||
})
|
||||
} else if self.cx.is_backend_scalar_pair(layout) {
|
||||
let abi::Abi::ScalarPair(s1, s2) = layout.abi else {
|
||||
let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else {
|
||||
span_bug!(
|
||||
self.mir.span,
|
||||
"Couldn't translate {:?} as backend scalar pair",
|
||||
layout.abi,
|
||||
layout.backend_repr,
|
||||
);
|
||||
};
|
||||
OperandValueKind::Pair(s1, s2)
|
||||
|
@ -1,13 +1,13 @@
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::ops::Deref;
|
||||
|
||||
use rustc_abi::{Align, BackendRepr, Scalar, Size, WrappingRange};
|
||||
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
|
||||
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
|
||||
use rustc_middle::ty::{Instance, Ty};
|
||||
use rustc_session::config::OptLevel;
|
||||
use rustc_span::Span;
|
||||
use rustc_target::abi::call::FnAbi;
|
||||
use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
|
||||
|
||||
use super::abi::AbiBuilderMethods;
|
||||
use super::asm::AsmBuilderMethods;
|
||||
@ -162,7 +162,7 @@ pub trait BuilderMethods<'a, 'tcx>:
|
||||
|
||||
fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
|
||||
fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
|
||||
if let Abi::Scalar(scalar) = layout.abi {
|
||||
if let BackendRepr::Scalar(scalar) = layout.backend_repr {
|
||||
self.to_immediate_scalar(val, scalar)
|
||||
} else {
|
||||
val
|
||||
|
@ -131,7 +131,7 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
|
||||
interp_ok(match bin_op {
|
||||
Eq | Ne | Lt | Le | Gt | Ge => {
|
||||
// Types can differ, e.g. fn ptrs with different `for`.
|
||||
assert_eq!(left.layout.abi, right.layout.abi);
|
||||
assert_eq!(left.layout.backend_repr, right.layout.backend_repr);
|
||||
let size = ecx.pointer_size();
|
||||
// Just compare the bits. ScalarPairs are compared lexicographically.
|
||||
// We thus always compare pairs and simply fill scalars up with 0.
|
||||
|
@ -1,6 +1,7 @@
|
||||
use std::sync::atomic::Ordering::Relaxed;
|
||||
|
||||
use either::{Left, Right};
|
||||
use rustc_abi::{self as abi, BackendRepr};
|
||||
use rustc_hir::def::DefKind;
|
||||
use rustc_middle::bug;
|
||||
use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo};
|
||||
@ -12,7 +13,6 @@ use rustc_middle::ty::print::with_no_trimmed_paths;
|
||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||
use rustc_span::def_id::LocalDefId;
|
||||
use rustc_span::{DUMMY_SP, Span};
|
||||
use rustc_target::abi::{self, Abi};
|
||||
use tracing::{debug, instrument, trace};
|
||||
|
||||
use super::{CanAccessMutGlobal, CompileTimeInterpCx, CompileTimeMachine};
|
||||
@ -174,8 +174,8 @@ pub(super) fn op_to_const<'tcx>(
|
||||
// type (it's used throughout the compiler and having it work just on literals is not enough)
|
||||
// and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
|
||||
// from its byte-serialized form).
|
||||
let force_as_immediate = match op.layout.abi {
|
||||
Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
|
||||
let force_as_immediate = match op.layout.backend_repr {
|
||||
BackendRepr::Scalar(abi::Scalar::Initialized { .. }) => true,
|
||||
// We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
|
||||
// input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
|
||||
// not have to generate any duplicate allocations (we preserve the original `AllocId` in
|
||||
|
@ -1,10 +1,10 @@
|
||||
use rustc_abi::{BackendRepr, VariantIdx};
|
||||
use rustc_data_structures::stack::ensure_sufficient_stack;
|
||||
use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
|
||||
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
|
||||
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
|
||||
use rustc_middle::{bug, mir};
|
||||
use rustc_span::DUMMY_SP;
|
||||
use rustc_target::abi::{Abi, VariantIdx};
|
||||
use tracing::{debug, instrument, trace};
|
||||
|
||||
use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const};
|
||||
@ -117,7 +117,7 @@ fn const_to_valtree_inner<'tcx>(
|
||||
let val = ecx.read_immediate(place).unwrap();
|
||||
// We could allow wide raw pointers where both sides are integers in the future,
|
||||
// but for now we reject them.
|
||||
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
|
||||
if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
|
||||
return Err(ValTreeCreationError::NonSupportedType(ty));
|
||||
}
|
||||
let val = val.to_scalar();
|
||||
@ -311,7 +311,7 @@ pub fn valtree_to_const_value<'tcx>(
|
||||
// Fast path to avoid some allocations.
|
||||
return mir::ConstValue::ZeroSized;
|
||||
}
|
||||
if layout.abi.is_scalar()
|
||||
if layout.backend_repr.is_scalar()
|
||||
&& (matches!(ty.kind(), ty::Tuple(_))
|
||||
|| matches!(ty.kind(), ty::Adt(def, _) if def.is_struct()))
|
||||
{
|
||||
|
@ -172,8 +172,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
// must be compatible. So we just accept everything with Pointer ABI as compatible,
|
||||
// even if this will accept some code that is not stably guaranteed to work.
|
||||
// This also handles function pointers.
|
||||
let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi {
|
||||
abi::Abi::Scalar(s) => match s.primitive() {
|
||||
let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.backend_repr {
|
||||
abi::BackendRepr::Scalar(s) => match s.primitive() {
|
||||
abi::Primitive::Pointer(addr_space) => Some(addr_space),
|
||||
_ => None,
|
||||
},
|
||||
|
@ -274,7 +274,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
cast_ty: Ty<'tcx>,
|
||||
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
|
||||
// Let's make sure v is sign-extended *if* it has a signed type.
|
||||
let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
|
||||
let signed = src_layout.backend_repr.is_signed(); // Also asserts that abi is `Scalar`.
|
||||
|
||||
let v = match src_layout.ty.kind() {
|
||||
Uint(_) | RawPtr(..) | FnPtr(..) => scalar.to_uint(src_layout.size)?,
|
||||
|
@ -112,7 +112,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
// Read tag and sanity-check `tag_layout`.
|
||||
let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
|
||||
assert_eq!(tag_layout.size, tag_val.layout.size);
|
||||
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
|
||||
assert_eq!(tag_layout.backend_repr.is_signed(), tag_val.layout.backend_repr.is_signed());
|
||||
trace!("tag value: {}", tag_val);
|
||||
|
||||
// Figure out which discriminant and variant this corresponds to.
|
||||
|
@ -563,7 +563,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
|
||||
interp_ok(if overflowed.to_bool()? {
|
||||
let size = l.layout.size;
|
||||
if l.layout.abi.is_signed() {
|
||||
if l.layout.backend_repr.is_signed() {
|
||||
// For signed ints the saturated value depends on the sign of the first
|
||||
// term since the sign of the second term can be inferred from this and
|
||||
// the fact that the operation has overflowed (if either is 0 no
|
||||
|
@ -5,7 +5,7 @@ use std::assert_matches::assert_matches;
|
||||
|
||||
use either::{Either, Left, Right};
|
||||
use rustc_abi as abi;
|
||||
use rustc_abi::{Abi, HasDataLayout, Size};
|
||||
use rustc_abi::{BackendRepr, HasDataLayout, Size};
|
||||
use rustc_hir::def::Namespace;
|
||||
use rustc_middle::mir::interpret::ScalarSizeMismatch;
|
||||
use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout};
|
||||
@ -114,9 +114,9 @@ impl<Prov: Provenance> Immediate<Prov> {
|
||||
}
|
||||
|
||||
/// Assert that this immediate is a valid value for the given ABI.
|
||||
pub fn assert_matches_abi(self, abi: Abi, msg: &str, cx: &impl HasDataLayout) {
|
||||
pub fn assert_matches_abi(self, abi: BackendRepr, msg: &str, cx: &impl HasDataLayout) {
|
||||
match (self, abi) {
|
||||
(Immediate::Scalar(scalar), Abi::Scalar(s)) => {
|
||||
(Immediate::Scalar(scalar), BackendRepr::Scalar(s)) => {
|
||||
assert_eq!(scalar.size(), s.size(cx), "{msg}: scalar value has wrong size");
|
||||
if !matches!(s.primitive(), abi::Primitive::Pointer(..)) {
|
||||
// This is not a pointer, it should not carry provenance.
|
||||
@ -126,7 +126,7 @@ impl<Prov: Provenance> Immediate<Prov> {
|
||||
);
|
||||
}
|
||||
}
|
||||
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
|
||||
(Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
|
||||
assert_eq!(
|
||||
a_val.size(),
|
||||
a.size(cx),
|
||||
@ -244,7 +244,7 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
|
||||
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||
#[inline]
|
||||
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
|
||||
debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
|
||||
debug_assert!(layout.backend_repr.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
|
||||
debug_assert_eq!(val.size(), layout.size);
|
||||
ImmTy { imm: val.into(), layout }
|
||||
}
|
||||
@ -252,7 +252,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||
#[inline]
|
||||
pub fn from_scalar_pair(a: Scalar<Prov>, b: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
|
||||
debug_assert!(
|
||||
matches!(layout.abi, Abi::ScalarPair(..)),
|
||||
matches!(layout.backend_repr, BackendRepr::ScalarPair(..)),
|
||||
"`ImmTy::from_scalar_pair` on non-scalar-pair layout"
|
||||
);
|
||||
let imm = Immediate::ScalarPair(a, b);
|
||||
@ -263,9 +263,9 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
|
||||
// Without a `cx` we cannot call `assert_matches_abi`.
|
||||
debug_assert!(
|
||||
match (imm, layout.abi) {
|
||||
(Immediate::Scalar(..), Abi::Scalar(..)) => true,
|
||||
(Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true,
|
||||
match (imm, layout.backend_repr) {
|
||||
(Immediate::Scalar(..), BackendRepr::Scalar(..)) => true,
|
||||
(Immediate::ScalarPair(..), BackendRepr::ScalarPair(..)) => true,
|
||||
(Immediate::Uninit, _) if layout.is_sized() => true,
|
||||
_ => false,
|
||||
},
|
||||
@ -356,7 +356,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||
fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
|
||||
// Verify that the input matches its type.
|
||||
if cfg!(debug_assertions) {
|
||||
self.assert_matches_abi(self.layout.abi, "invalid input to Immediate::offset", cx);
|
||||
self.assert_matches_abi(
|
||||
self.layout.backend_repr,
|
||||
"invalid input to Immediate::offset",
|
||||
cx,
|
||||
);
|
||||
}
|
||||
// `ImmTy` have already been checked to be in-bounds, so we can just check directly if this
|
||||
// remains in-bounds. This cannot actually be violated since projections are type-checked
|
||||
@ -370,19 +374,19 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||
);
|
||||
// This makes several assumptions about what layouts we will encounter; we match what
|
||||
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
|
||||
let inner_val: Immediate<_> = match (**self, self.layout.abi) {
|
||||
let inner_val: Immediate<_> = match (**self, self.layout.backend_repr) {
|
||||
// If the entire value is uninit, then so is the field (can happen in ConstProp).
|
||||
(Immediate::Uninit, _) => Immediate::Uninit,
|
||||
// If the field is uninhabited, we can forget the data (can happen in ConstProp).
|
||||
// `enum S { A(!), B, C }` is an example of an enum with Scalar layout that
|
||||
// has an `Uninhabited` variant, which means this case is possible.
|
||||
_ if layout.abi.is_uninhabited() => Immediate::Uninit,
|
||||
_ if layout.is_uninhabited() => Immediate::Uninit,
|
||||
// the field contains no information, can be left uninit
|
||||
// (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
|
||||
_ if layout.is_zst() => Immediate::Uninit,
|
||||
// some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
|
||||
// to detect those here and also give them no data
|
||||
_ if matches!(layout.abi, Abi::Aggregate { .. })
|
||||
_ if matches!(layout.backend_repr, BackendRepr::Memory { .. })
|
||||
&& matches!(layout.variants, abi::Variants::Single { .. })
|
||||
&& matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
|
||||
{
|
||||
@ -394,7 +398,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||
**self
|
||||
}
|
||||
// extract fields from types with `ScalarPair` ABI
|
||||
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
|
||||
(Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
|
||||
Immediate::from(if offset.bytes() == 0 {
|
||||
a_val
|
||||
} else {
|
||||
@ -411,7 +415,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||
),
|
||||
};
|
||||
// Ensure the new layout matches the new value.
|
||||
inner_val.assert_matches_abi(layout.abi, "invalid field type in Immediate::offset", cx);
|
||||
inner_val.assert_matches_abi(
|
||||
layout.backend_repr,
|
||||
"invalid field type in Immediate::offset",
|
||||
cx,
|
||||
);
|
||||
|
||||
ImmTy::from_immediate(inner_val, layout)
|
||||
}
|
||||
@ -567,8 +575,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
// case where some of the bytes are initialized and others are not. So, we need an extra
|
||||
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
|
||||
// like a `Scalar` (or `ScalarPair`).
|
||||
interp_ok(match mplace.layout.abi {
|
||||
Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
|
||||
interp_ok(match mplace.layout.backend_repr {
|
||||
BackendRepr::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
|
||||
let size = s.size(self);
|
||||
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
|
||||
let scalar = alloc.read_scalar(
|
||||
@ -577,7 +585,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
)?;
|
||||
Some(ImmTy::from_scalar(scalar, mplace.layout))
|
||||
}
|
||||
Abi::ScalarPair(
|
||||
BackendRepr::ScalarPair(
|
||||
abi::Scalar::Initialized { value: a, .. },
|
||||
abi::Scalar::Initialized { value: b, .. },
|
||||
) => {
|
||||
@ -637,9 +645,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
op: &impl Projectable<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
|
||||
if !matches!(
|
||||
op.layout().abi,
|
||||
Abi::Scalar(abi::Scalar::Initialized { .. })
|
||||
| Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
|
||||
op.layout().backend_repr,
|
||||
BackendRepr::Scalar(abi::Scalar::Initialized { .. })
|
||||
| BackendRepr::ScalarPair(
|
||||
abi::Scalar::Initialized { .. },
|
||||
abi::Scalar::Initialized { .. }
|
||||
)
|
||||
) {
|
||||
span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
let l_bits = left.layout.size.bits();
|
||||
// Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is
|
||||
// the one MIR operator that does *not* directly map to a single LLVM operation.)
|
||||
let (shift_amount, overflow) = if right.layout.abi.is_signed() {
|
||||
let (shift_amount, overflow) = if right.layout.backend_repr.is_signed() {
|
||||
let shift_amount = r_signed();
|
||||
let rem = shift_amount.rem_euclid(l_bits.into());
|
||||
// `rem` is guaranteed positive, so the `unwrap` cannot fail
|
||||
@ -126,7 +126,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
};
|
||||
let shift_amount = u32::try_from(shift_amount).unwrap(); // we brought this in the range `0..size` so this will always fit
|
||||
// Compute the shifted result.
|
||||
let result = if left.layout.abi.is_signed() {
|
||||
let result = if left.layout.backend_repr.is_signed() {
|
||||
let l = l_signed();
|
||||
let result = match bin_op {
|
||||
Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
|
||||
@ -147,7 +147,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
if overflow && let Some(intrinsic) = throw_ub_on_overflow {
|
||||
throw_ub!(ShiftOverflow {
|
||||
intrinsic,
|
||||
shift_amount: if right.layout.abi.is_signed() {
|
||||
shift_amount: if right.layout.backend_repr.is_signed() {
|
||||
Either::Right(r_signed())
|
||||
} else {
|
||||
Either::Left(r_unsigned())
|
||||
@ -171,7 +171,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
let size = left.layout.size;
|
||||
|
||||
// Operations that need special treatment for signed integers
|
||||
if left.layout.abi.is_signed() {
|
||||
if left.layout.backend_repr.is_signed() {
|
||||
let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
|
||||
Lt => Some(i128::lt),
|
||||
Le => Some(i128::le),
|
||||
@ -250,7 +250,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
BitXor => ImmTy::from_uint(l ^ r, left.layout),
|
||||
|
||||
_ => {
|
||||
assert!(!left.layout.abi.is_signed());
|
||||
assert!(!left.layout.backend_repr.is_signed());
|
||||
let op: fn(u128, u128) -> (u128, bool) = match bin_op {
|
||||
Add | AddUnchecked | AddWithOverflow => u128::overflowing_add,
|
||||
Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub,
|
||||
@ -332,7 +332,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
}
|
||||
|
||||
let offset_bytes = val.to_target_isize(self)?;
|
||||
if !right.layout.abi.is_signed() && offset_bytes < 0 {
|
||||
if !right.layout.backend_repr.is_signed() && offset_bytes < 0 {
|
||||
// We were supposed to do an unsigned offset but the result is negative -- this
|
||||
// can only mean that the cast wrapped around.
|
||||
throw_ub!(PointerArithOverflow)
|
||||
|
@ -5,11 +5,11 @@
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use either::{Either, Left, Right};
|
||||
use rustc_abi::{Align, BackendRepr, HasDataLayout, Size};
|
||||
use rustc_ast::Mutability;
|
||||
use rustc_middle::ty::Ty;
|
||||
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
||||
use rustc_middle::{bug, mir, span_bug};
|
||||
use rustc_target::abi::{Abi, Align, HasDataLayout, Size};
|
||||
use tracing::{instrument, trace};
|
||||
|
||||
use super::{
|
||||
@ -659,7 +659,7 @@ where
|
||||
// Unfortunately this is too expensive to do in release builds.
|
||||
if cfg!(debug_assertions) {
|
||||
src.assert_matches_abi(
|
||||
local_layout.abi,
|
||||
local_layout.backend_repr,
|
||||
"invalid immediate for given destination place",
|
||||
self,
|
||||
);
|
||||
@ -683,7 +683,11 @@ where
|
||||
) -> InterpResult<'tcx> {
|
||||
// We use the sizes from `value` below.
|
||||
// Ensure that matches the type of the place it is written to.
|
||||
value.assert_matches_abi(layout.abi, "invalid immediate for given destination place", self);
|
||||
value.assert_matches_abi(
|
||||
layout.backend_repr,
|
||||
"invalid immediate for given destination place",
|
||||
self,
|
||||
);
|
||||
// Note that it is really important that the type here is the right one, and matches the
|
||||
// type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
|
||||
// to handle padding properly, which is only correct if we never look at this data with the
|
||||
@ -700,7 +704,7 @@ where
|
||||
alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
|
||||
}
|
||||
Immediate::ScalarPair(a_val, b_val) => {
|
||||
let Abi::ScalarPair(a, b) = layout.abi else {
|
||||
let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
|
||||
span_bug!(
|
||||
self.cur_span(),
|
||||
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
|
||||
|
@ -11,6 +11,10 @@ use std::num::NonZero;
|
||||
|
||||
use either::{Left, Right};
|
||||
use hir::def::DefKind;
|
||||
use rustc_abi::{
|
||||
BackendRepr, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants,
|
||||
WrappingRange,
|
||||
};
|
||||
use rustc_ast::Mutability;
|
||||
use rustc_data_structures::fx::FxHashSet;
|
||||
use rustc_hir as hir;
|
||||
@ -23,9 +27,6 @@ use rustc_middle::mir::interpret::{
|
||||
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
|
||||
use rustc_middle::ty::{self, Ty};
|
||||
use rustc_span::symbol::{Symbol, sym};
|
||||
use rustc_target::abi::{
|
||||
Abi, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
|
||||
};
|
||||
use tracing::trace;
|
||||
|
||||
use super::machine::AllocMap;
|
||||
@ -422,7 +423,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
||||
// Reset provenance: ensure slice tail metadata does not preserve provenance,
|
||||
// and ensure all pointers do not preserve partial provenance.
|
||||
if self.reset_provenance_and_padding {
|
||||
if matches!(imm.layout.abi, Abi::Scalar(..)) {
|
||||
if matches!(imm.layout.backend_repr, BackendRepr::Scalar(..)) {
|
||||
// A thin pointer. If it has provenance, we don't have to do anything.
|
||||
// If it does not, ensure we clear the provenance in memory.
|
||||
if matches!(imm.to_scalar(), Scalar::Int(..)) {
|
||||
@ -981,7 +982,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
||||
let elem = layout.field(cx, 0);
|
||||
|
||||
// Fast-path for large arrays of simple types that do not contain any padding.
|
||||
if elem.abi.is_scalar() {
|
||||
if elem.backend_repr.is_scalar() {
|
||||
out.add_range(base_offset, elem.size * count);
|
||||
} else {
|
||||
for idx in 0..count {
|
||||
@ -1299,19 +1300,19 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
||||
// FIXME: We could avoid some redundant checks here. For newtypes wrapping
|
||||
// scalars, we do the same check on every "level" (e.g., first we check
|
||||
// MyNewtype and then the scalar in there).
|
||||
match val.layout.abi {
|
||||
Abi::Uninhabited => {
|
||||
match val.layout.backend_repr {
|
||||
BackendRepr::Uninhabited => {
|
||||
let ty = val.layout.ty;
|
||||
throw_validation_failure!(self.path, UninhabitedVal { ty });
|
||||
}
|
||||
Abi::Scalar(scalar_layout) => {
|
||||
BackendRepr::Scalar(scalar_layout) => {
|
||||
if !scalar_layout.is_uninit_valid() {
|
||||
// There is something to check here.
|
||||
let scalar = self.read_scalar(val, ExpectedKind::InitScalar)?;
|
||||
self.visit_scalar(scalar, scalar_layout)?;
|
||||
}
|
||||
}
|
||||
Abi::ScalarPair(a_layout, b_layout) => {
|
||||
BackendRepr::ScalarPair(a_layout, b_layout) => {
|
||||
// We can only proceed if *both* scalars need to be initialized.
|
||||
// FIXME: find a way to also check ScalarPair when one side can be uninit but
|
||||
// the other must be init.
|
||||
@ -1322,12 +1323,12 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
||||
self.visit_scalar(b, b_layout)?;
|
||||
}
|
||||
}
|
||||
Abi::Vector { .. } => {
|
||||
BackendRepr::Vector { .. } => {
|
||||
// No checks here, we assume layout computation gets this right.
|
||||
// (This is harder to check since Miri does not represent these as `Immediate`. We
|
||||
// also cannot use field projections since this might be a newtype around a vector.)
|
||||
}
|
||||
Abi::Aggregate { .. } => {
|
||||
BackendRepr::Memory { .. } => {
|
||||
// Nothing to do.
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,9 @@
|
||||
use rustc_abi::{BackendRepr, FieldsShape, Scalar, Variants};
|
||||
use rustc_middle::bug;
|
||||
use rustc_middle::ty::layout::{
|
||||
HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement,
|
||||
};
|
||||
use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
|
||||
use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
|
||||
|
||||
use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeMachine};
|
||||
use crate::interpret::{InterpCx, MemoryKind};
|
||||
@ -111,12 +111,12 @@ fn check_validity_requirement_lax<'tcx>(
|
||||
};
|
||||
|
||||
// Check the ABI.
|
||||
let valid = match this.abi {
|
||||
Abi::Uninhabited => false, // definitely UB
|
||||
Abi::Scalar(s) => scalar_allows_raw_init(s),
|
||||
Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
|
||||
Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
|
||||
Abi::Aggregate { .. } => true, // Fields are checked below.
|
||||
let valid = match this.backend_repr {
|
||||
BackendRepr::Uninhabited => false, // definitely UB
|
||||
BackendRepr::Scalar(s) => scalar_allows_raw_init(s),
|
||||
BackendRepr::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
|
||||
BackendRepr::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
|
||||
BackendRepr::Memory { .. } => true, // Fields are checked below.
|
||||
};
|
||||
if !valid {
|
||||
// This is definitely not okay.
|
||||
|
@ -2627,7 +2627,6 @@ impl<'hir> Ty<'hir> {
|
||||
}
|
||||
TyKind::Tup(tys) => tys.iter().any(Self::is_suggestable_infer_ty),
|
||||
TyKind::Ptr(mut_ty) | TyKind::Ref(_, mut_ty) => mut_ty.ty.is_suggestable_infer_ty(),
|
||||
TyKind::OpaqueDef(_, generic_args) => are_suggestable_generic_args(generic_args),
|
||||
TyKind::Path(QPath::TypeRelative(ty, segment)) => {
|
||||
ty.is_suggestable_infer_ty() || are_suggestable_generic_args(segment.args().args)
|
||||
}
|
||||
@ -2746,19 +2745,8 @@ pub struct BareFnTy<'hir> {
|
||||
pub struct OpaqueTy<'hir> {
|
||||
pub hir_id: HirId,
|
||||
pub def_id: LocalDefId,
|
||||
pub generics: &'hir Generics<'hir>,
|
||||
pub bounds: GenericBounds<'hir>,
|
||||
pub origin: OpaqueTyOrigin,
|
||||
/// Return-position impl traits (and async futures) must "reify" any late-bound
|
||||
/// lifetimes that are captured from the function signature they originate from.
|
||||
///
|
||||
/// This is done by generating a new early-bound lifetime parameter local to the
|
||||
/// opaque which is instantiated in the function signature with the late-bound
|
||||
/// lifetime.
|
||||
///
|
||||
/// This mapping associated a captured lifetime (first parameter) with the new
|
||||
/// early-bound lifetime that was generated for the opaque.
|
||||
pub lifetime_mapping: &'hir [(&'hir Lifetime, LocalDefId)],
|
||||
pub span: Span,
|
||||
}
|
||||
|
||||
@ -2861,12 +2849,7 @@ pub enum TyKind<'hir> {
|
||||
/// Type parameters may be stored in each `PathSegment`.
|
||||
Path(QPath<'hir>),
|
||||
/// An opaque type definition itself. This is only used for `impl Trait`.
|
||||
///
|
||||
/// The generic argument list contains the lifetimes (and in the future
|
||||
/// possibly parameters) that are actually bound on the `impl Trait`.
|
||||
///
|
||||
/// The last parameter specifies whether this opaque appears in a trait definition.
|
||||
OpaqueDef(&'hir OpaqueTy<'hir>, &'hir [GenericArg<'hir>]),
|
||||
OpaqueDef(&'hir OpaqueTy<'hir>),
|
||||
/// A trait object type `Bound1 + Bound2 + Bound3`
|
||||
/// where `Bound` is a trait or a lifetime.
|
||||
TraitObject(&'hir [PolyTraitRef<'hir>], &'hir Lifetime, TraitObjectSyntax),
|
||||
@ -3991,7 +3974,6 @@ impl<'hir> Node<'hir> {
|
||||
| Node::TraitItem(TraitItem { generics, .. })
|
||||
| Node::ImplItem(ImplItem { generics, .. }) => Some(generics),
|
||||
Node::Item(item) => item.kind.generics(),
|
||||
Node::OpaqueTy(opaque) => Some(opaque.generics),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
@ -896,9 +896,8 @@ pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty<'v>) -> V::Resul
|
||||
TyKind::Path(ref qpath) => {
|
||||
try_visit!(visitor.visit_qpath(qpath, typ.hir_id, typ.span));
|
||||
}
|
||||
TyKind::OpaqueDef(opaque, lifetimes) => {
|
||||
TyKind::OpaqueDef(opaque) => {
|
||||
try_visit!(visitor.visit_opaque_ty(opaque));
|
||||
walk_list!(visitor, visit_generic_arg, lifetimes);
|
||||
}
|
||||
TyKind::Array(ref ty, ref length) => {
|
||||
try_visit!(visitor.visit_ty(ty));
|
||||
@ -1188,10 +1187,8 @@ pub fn walk_poly_trait_ref<'v, V: Visitor<'v>>(
|
||||
}
|
||||
|
||||
pub fn walk_opaque_ty<'v, V: Visitor<'v>>(visitor: &mut V, opaque: &'v OpaqueTy<'v>) -> V::Result {
|
||||
let &OpaqueTy { hir_id, def_id: _, generics, bounds, origin: _, lifetime_mapping: _, span: _ } =
|
||||
opaque;
|
||||
let &OpaqueTy { hir_id, def_id: _, bounds, origin: _, span: _ } = opaque;
|
||||
try_visit!(visitor.visit_id(hir_id));
|
||||
try_visit!(walk_generics(visitor, generics));
|
||||
walk_list!(visitor, visit_param_bound, bounds);
|
||||
V::Result::output()
|
||||
}
|
||||
|
@ -84,11 +84,11 @@ impl<'tcx> Bounds<'tcx> {
|
||||
&mut self,
|
||||
tcx: TyCtxt<'tcx>,
|
||||
bound_trait_ref: ty::PolyTraitRef<'tcx>,
|
||||
host: ty::HostPolarity,
|
||||
constness: ty::BoundConstness,
|
||||
span: Span,
|
||||
) {
|
||||
if tcx.is_const_trait(bound_trait_ref.def_id()) {
|
||||
self.clauses.push((bound_trait_ref.to_host_effect_clause(tcx, host), span));
|
||||
self.clauses.push((bound_trait_ref.to_host_effect_clause(tcx, constness), span));
|
||||
} else {
|
||||
tcx.dcx().span_delayed_bug(span, "tried to lower {host:?} bound for non-const trait");
|
||||
}
|
||||
|
@ -218,7 +218,7 @@ fn compare_method_predicate_entailment<'tcx>(
|
||||
tcx.const_conditions(trait_m.def_id).instantiate_own(tcx, trait_to_impl_args),
|
||||
)
|
||||
.map(|(trait_ref, _)| {
|
||||
trait_ref.to_host_effect_clause(tcx, ty::HostPolarity::Maybe)
|
||||
trait_ref.to_host_effect_clause(tcx, ty::BoundConstness::Maybe)
|
||||
}),
|
||||
);
|
||||
}
|
||||
@ -272,7 +272,7 @@ fn compare_method_predicate_entailment<'tcx>(
|
||||
tcx,
|
||||
cause,
|
||||
param_env,
|
||||
const_condition.to_host_effect_clause(tcx, ty::HostPolarity::Maybe),
|
||||
const_condition.to_host_effect_clause(tcx, ty::BoundConstness::Maybe),
|
||||
));
|
||||
}
|
||||
}
|
||||
@ -1942,7 +1942,7 @@ fn compare_type_predicate_entailment<'tcx>(
|
||||
tcx.const_conditions(trait_ty.def_id).instantiate_own(tcx, trait_to_impl_args),
|
||||
)
|
||||
.map(|(trait_ref, _)| {
|
||||
trait_ref.to_host_effect_clause(tcx, ty::HostPolarity::Maybe)
|
||||
trait_ref.to_host_effect_clause(tcx, ty::BoundConstness::Maybe)
|
||||
}),
|
||||
);
|
||||
}
|
||||
@ -1985,7 +1985,7 @@ fn compare_type_predicate_entailment<'tcx>(
|
||||
tcx,
|
||||
cause,
|
||||
param_env,
|
||||
const_condition.to_host_effect_clause(tcx, ty::HostPolarity::Maybe),
|
||||
const_condition.to_host_effect_clause(tcx, ty::BoundConstness::Maybe),
|
||||
));
|
||||
}
|
||||
}
|
||||
@ -2091,7 +2091,7 @@ pub(super) fn check_type_bounds<'tcx>(
|
||||
tcx,
|
||||
mk_cause(span),
|
||||
param_env,
|
||||
c.to_host_effect_clause(tcx, ty::HostPolarity::Maybe),
|
||||
c.to_host_effect_clause(tcx, ty::BoundConstness::Maybe),
|
||||
)
|
||||
}),
|
||||
);
|
||||
|
@ -1388,7 +1388,7 @@ fn check_impl<'tcx>(
|
||||
ObligationCauseCode::WellFormed(None),
|
||||
),
|
||||
wfcx.param_env,
|
||||
bound.to_host_effect_clause(tcx, ty::HostPolarity::Maybe),
|
||||
bound.to_host_effect_clause(tcx, ty::BoundConstness::Maybe),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
@ -1302,7 +1302,7 @@ fn trait_def(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::TraitDef {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(tcx))]
|
||||
#[instrument(level = "debug", skip(tcx), ret)]
|
||||
fn fn_sig(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_, ty::PolyFnSig<'_>> {
|
||||
use rustc_hir::Node::*;
|
||||
use rustc_hir::*;
|
||||
|
@ -426,6 +426,21 @@ pub(super) fn generics_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Generics {
|
||||
});
|
||||
}
|
||||
|
||||
if let Node::OpaqueTy(&hir::OpaqueTy { .. }) = node {
|
||||
assert!(own_params.is_empty());
|
||||
|
||||
let lifetimes = tcx.opaque_captured_lifetimes(def_id);
|
||||
debug!(?lifetimes);
|
||||
|
||||
own_params.extend(lifetimes.iter().map(|&(_, param)| ty::GenericParamDef {
|
||||
name: tcx.item_name(param.to_def_id()),
|
||||
index: next_index(),
|
||||
def_id: param.to_def_id(),
|
||||
pure_wrt_drop: false,
|
||||
kind: ty::GenericParamDefKind::Lifetime,
|
||||
}))
|
||||
}
|
||||
|
||||
let param_def_id_to_index =
|
||||
own_params.iter().map(|param| (param.def_id, param.index)).collect();
|
||||
|
||||
|
@ -329,13 +329,6 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
|
||||
// We create bi-directional Outlives predicates between the original
|
||||
// and the duplicated parameter, to ensure that they do not get out of sync.
|
||||
if let Node::OpaqueTy(..) = node {
|
||||
let opaque_ty_node = tcx.parent_hir_node(hir_id);
|
||||
let Node::Ty(&hir::Ty { kind: TyKind::OpaqueDef(_, lifetimes), .. }) = opaque_ty_node
|
||||
else {
|
||||
bug!("unexpected {opaque_ty_node:?}")
|
||||
};
|
||||
debug!(?lifetimes);
|
||||
|
||||
compute_bidirectional_outlives_predicates(tcx, &generics.own_params, &mut predicates);
|
||||
debug!(?predicates);
|
||||
}
|
||||
@ -716,7 +709,7 @@ pub(super) fn assert_only_contains_predicates_from<'tcx>(
|
||||
match clause.kind().skip_binder() {
|
||||
ty::ClauseKind::HostEffect(ty::HostEffectPredicate {
|
||||
trait_ref: _,
|
||||
host: ty::HostPolarity::Maybe,
|
||||
constness: ty::BoundConstness::Maybe,
|
||||
}) => {}
|
||||
_ => {
|
||||
bug!(
|
||||
@ -732,8 +725,8 @@ pub(super) fn assert_only_contains_predicates_from<'tcx>(
|
||||
match clause.kind().skip_binder() {
|
||||
ty::ClauseKind::HostEffect(pred) => {
|
||||
assert_eq!(
|
||||
pred.host,
|
||||
ty::HostPolarity::Maybe,
|
||||
pred.constness,
|
||||
ty::BoundConstness::Maybe,
|
||||
"expected `~const` predicate when computing `{filter:?}` \
|
||||
implied bounds: {clause:?}",
|
||||
);
|
||||
@ -943,7 +936,7 @@ pub(super) fn const_conditions<'tcx>(
|
||||
bounds.push_const_bound(
|
||||
tcx,
|
||||
ty::Binder::dummy(ty::TraitRef::identity(tcx, def_id.to_def_id())),
|
||||
ty::HostPolarity::Maybe,
|
||||
ty::BoundConstness::Maybe,
|
||||
DUMMY_SP,
|
||||
);
|
||||
|
||||
@ -963,7 +956,7 @@ pub(super) fn const_conditions<'tcx>(
|
||||
clause.kind().map_bound(|clause| match clause {
|
||||
ty::ClauseKind::HostEffect(ty::HostEffectPredicate {
|
||||
trait_ref,
|
||||
host: ty::HostPolarity::Maybe,
|
||||
constness: ty::BoundConstness::Maybe,
|
||||
}) => trait_ref,
|
||||
_ => bug!("converted {clause:?}"),
|
||||
}),
|
||||
@ -1001,7 +994,7 @@ pub(super) fn implied_const_bounds<'tcx>(
|
||||
clause.kind().map_bound(|clause| match clause {
|
||||
ty::ClauseKind::HostEffect(ty::HostEffectPredicate {
|
||||
trait_ref,
|
||||
host: ty::HostPolarity::Maybe,
|
||||
constness: ty::BoundConstness::Maybe,
|
||||
}) => trait_ref,
|
||||
_ => bug!("converted {clause:?}"),
|
||||
}),
|
||||
|
@ -6,12 +6,14 @@
|
||||
//! the types in HIR to identify late-bound lifetimes and assign their Debruijn indices. This file
|
||||
//! is also responsible for assigning their semantics to implicit lifetimes in trait objects.
|
||||
|
||||
use core::ops::ControlFlow;
|
||||
use std::cell::RefCell;
|
||||
use std::fmt;
|
||||
use std::ops::ControlFlow;
|
||||
|
||||
use rustc_ast::visit::walk_list;
|
||||
use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet};
|
||||
use rustc_data_structures::sorted_map::SortedMap;
|
||||
use rustc_errors::ErrorGuaranteed;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def::{DefKind, Res};
|
||||
use rustc_hir::intravisit::{self, Visitor};
|
||||
@ -25,7 +27,7 @@ use rustc_middle::query::Providers;
|
||||
use rustc_middle::ty::{self, TyCtxt, TypeSuperVisitable, TypeVisitor};
|
||||
use rustc_middle::{bug, span_bug};
|
||||
use rustc_span::Span;
|
||||
use rustc_span::def_id::{DefId, LocalDefId};
|
||||
use rustc_span::def_id::{DefId, LocalDefId, LocalDefIdMap};
|
||||
use rustc_span::symbol::{Ident, sym};
|
||||
use tracing::{debug, debug_span, instrument};
|
||||
|
||||
@ -80,6 +82,9 @@ struct NamedVarMap {
|
||||
// - trait refs
|
||||
// - bound types (like `T` in `for<'a> T<'a>: Foo`)
|
||||
late_bound_vars: ItemLocalMap<Vec<ty::BoundVariableKind>>,
|
||||
|
||||
// List captured variables for each opaque type.
|
||||
opaque_captured_lifetimes: LocalDefIdMap<Vec<(ResolvedArg, LocalDefId)>>,
|
||||
}
|
||||
|
||||
struct BoundVarContext<'a, 'tcx> {
|
||||
@ -147,6 +152,23 @@ enum Scope<'a> {
|
||||
s: ScopeRef<'a>,
|
||||
},
|
||||
|
||||
/// Remap lifetimes that appear in opaque types to fresh lifetime parameters. Given:
|
||||
/// `fn foo<'a>() -> impl MyTrait<'a> { ... }`
|
||||
///
|
||||
/// HIR tells us that `'a` refer to the lifetime bound on `foo`.
|
||||
/// However, typeck and borrowck for opaques work based on using a new generic type.
|
||||
/// `type MyAnonTy<'b> = impl MyTrait<'b>;`
|
||||
///
|
||||
/// This scope collects the mapping `'a -> 'b`.
|
||||
Opaque {
|
||||
/// The opaque type we are traversing.
|
||||
def_id: LocalDefId,
|
||||
/// Mapping from each captured lifetime `'a` to the duplicate generic parameter `'b`.
|
||||
captures: &'a RefCell<FxIndexMap<ResolvedArg, LocalDefId>>,
|
||||
|
||||
s: ScopeRef<'a>,
|
||||
},
|
||||
|
||||
/// Disallows capturing late-bound vars from parent scopes.
|
||||
///
|
||||
/// This is necessary for something like `for<T> [(); { /* references T */ }]:`,
|
||||
@ -192,6 +214,12 @@ impl<'a> fmt::Debug for TruncatedScopeDebug<'a> {
|
||||
.field("where_bound_origin", where_bound_origin)
|
||||
.field("s", &"..")
|
||||
.finish(),
|
||||
Scope::Opaque { captures, def_id, s: _ } => f
|
||||
.debug_struct("Opaque")
|
||||
.field("def_id", def_id)
|
||||
.field("captures", &captures.borrow())
|
||||
.field("s", &"..")
|
||||
.finish(),
|
||||
Scope::Body { id, s: _ } => {
|
||||
f.debug_struct("Body").field("id", id).field("s", &"..").finish()
|
||||
}
|
||||
@ -226,6 +254,12 @@ pub(crate) fn provide(providers: &mut Providers) {
|
||||
is_late_bound_map,
|
||||
object_lifetime_default,
|
||||
late_bound_vars_map: |tcx, id| &tcx.resolve_bound_vars(id).late_bound_vars,
|
||||
opaque_captured_lifetimes: |tcx, id| {
|
||||
&tcx.resolve_bound_vars(tcx.local_def_id_to_hir_id(id).owner)
|
||||
.opaque_captured_lifetimes
|
||||
.get(&id)
|
||||
.map_or(&[][..], |x| &x[..])
|
||||
},
|
||||
|
||||
..*providers
|
||||
};
|
||||
@ -236,8 +270,11 @@ pub(crate) fn provide(providers: &mut Providers) {
|
||||
/// `named_variable_map`, `is_late_bound_map`, etc.
|
||||
#[instrument(level = "debug", skip(tcx))]
|
||||
fn resolve_bound_vars(tcx: TyCtxt<'_>, local_def_id: hir::OwnerId) -> ResolveBoundVars {
|
||||
let mut named_variable_map =
|
||||
NamedVarMap { defs: Default::default(), late_bound_vars: Default::default() };
|
||||
let mut named_variable_map = NamedVarMap {
|
||||
defs: Default::default(),
|
||||
late_bound_vars: Default::default(),
|
||||
opaque_captured_lifetimes: Default::default(),
|
||||
};
|
||||
let mut visitor = BoundVarContext {
|
||||
tcx,
|
||||
map: &mut named_variable_map,
|
||||
@ -264,13 +301,16 @@ fn resolve_bound_vars(tcx: TyCtxt<'_>, local_def_id: hir::OwnerId) -> ResolveBou
|
||||
|
||||
let defs = named_variable_map.defs.into_sorted_stable_ord();
|
||||
let late_bound_vars = named_variable_map.late_bound_vars.into_sorted_stable_ord();
|
||||
let opaque_captured_lifetimes = named_variable_map.opaque_captured_lifetimes;
|
||||
let rl = ResolveBoundVars {
|
||||
defs: SortedMap::from_presorted_elements(defs),
|
||||
late_bound_vars: SortedMap::from_presorted_elements(late_bound_vars),
|
||||
opaque_captured_lifetimes,
|
||||
};
|
||||
|
||||
debug!(?rl.defs);
|
||||
debug!(?rl.late_bound_vars);
|
||||
debug!(?rl.opaque_captured_lifetimes);
|
||||
rl
|
||||
}
|
||||
|
||||
@ -306,6 +346,26 @@ fn generic_param_def_as_bound_arg(param: &ty::GenericParamDef) -> ty::BoundVaria
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether this opaque always captures lifetimes in scope.
|
||||
/// Right now, this is all RPITIT and TAITs, and when `lifetime_capture_rules_2024`
|
||||
/// is enabled. We don't check the span of the edition, since this is done
|
||||
/// on a per-opaque basis to account for nested opaques.
|
||||
fn opaque_captures_all_in_scope_lifetimes<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
opaque: &'tcx hir::OpaqueTy<'tcx>,
|
||||
) -> bool {
|
||||
match opaque.origin {
|
||||
// if the opaque has the `use<...>` syntax, the user is telling us that they only want
|
||||
// to account for those lifetimes, so do not try to be clever.
|
||||
_ if opaque.bounds.iter().any(|bound| matches!(bound, hir::GenericBound::Use(..))) => false,
|
||||
hir::OpaqueTyOrigin::AsyncFn { .. } | hir::OpaqueTyOrigin::TyAlias { .. } => true,
|
||||
_ if tcx.features().lifetime_capture_rules_2024() || opaque.span.at_least_rust_2024() => {
|
||||
true
|
||||
}
|
||||
hir::OpaqueTyOrigin::FnReturn { in_trait_or_impl, .. } => in_trait_or_impl.is_some(),
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
/// Returns the binders in scope and the type of `Binder` that should be created for a poly trait ref.
|
||||
fn poly_trait_ref_binder_info(&mut self) -> (Vec<ty::BoundVariableKind>, BinderScopeType) {
|
||||
@ -317,7 +377,9 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
break (vec![], BinderScopeType::Normal);
|
||||
}
|
||||
|
||||
Scope::ObjectLifetimeDefault { s, .. } | Scope::LateBoundary { s, .. } => {
|
||||
Scope::Opaque { s, .. }
|
||||
| Scope::ObjectLifetimeDefault { s, .. }
|
||||
| Scope::LateBoundary { s, .. } => {
|
||||
scope = s;
|
||||
}
|
||||
|
||||
@ -488,29 +550,85 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve the lifetimes inside the opaque type, and save them into
|
||||
/// `opaque_captured_lifetimes`.
|
||||
///
|
||||
/// This method has special handling for opaques that capture all lifetimes,
|
||||
/// like async desugaring.
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn visit_opaque_ty(&mut self, opaque: &'tcx rustc_hir::OpaqueTy<'tcx>) {
|
||||
// We want to start our early-bound indices at the end of the parent scope,
|
||||
// not including any parent `impl Trait`s.
|
||||
let mut bound_vars = FxIndexMap::default();
|
||||
debug!(?opaque.generics.params);
|
||||
for param in opaque.generics.params {
|
||||
let arg = ResolvedArg::early(param);
|
||||
bound_vars.insert(param.def_id, arg);
|
||||
let captures = RefCell::new(FxIndexMap::default());
|
||||
|
||||
let capture_all_in_scope_lifetimes =
|
||||
opaque_captures_all_in_scope_lifetimes(self.tcx, opaque);
|
||||
if capture_all_in_scope_lifetimes {
|
||||
let lifetime_ident = |def_id: LocalDefId| {
|
||||
let name = self.tcx.item_name(def_id.to_def_id());
|
||||
let span = self.tcx.def_span(def_id);
|
||||
Ident::new(name, span)
|
||||
};
|
||||
|
||||
// We list scopes outwards, this causes us to see lifetime parameters in reverse
|
||||
// declaration order. In order to make it consistent with what `generics_of` might
|
||||
// give, we will reverse the IndexMap after early captures.
|
||||
let mut scope = self.scope;
|
||||
let mut opaque_capture_scopes = vec![(opaque.def_id, &captures)];
|
||||
loop {
|
||||
match *scope {
|
||||
Scope::Binder { ref bound_vars, s, .. } => {
|
||||
for (&original_lifetime, &def) in bound_vars.iter().rev() {
|
||||
if let DefKind::LifetimeParam = self.tcx.def_kind(original_lifetime) {
|
||||
let ident = lifetime_ident(original_lifetime);
|
||||
self.remap_opaque_captures(&opaque_capture_scopes, def, ident);
|
||||
}
|
||||
}
|
||||
scope = s;
|
||||
}
|
||||
|
||||
Scope::Root { mut opt_parent_item } => {
|
||||
while let Some(parent_item) = opt_parent_item {
|
||||
let parent_generics = self.tcx.generics_of(parent_item);
|
||||
for param in parent_generics.own_params.iter().rev() {
|
||||
if let ty::GenericParamDefKind::Lifetime = param.kind {
|
||||
let def = ResolvedArg::EarlyBound(param.def_id.expect_local());
|
||||
let ident = lifetime_ident(param.def_id.expect_local());
|
||||
self.remap_opaque_captures(&opaque_capture_scopes, def, ident);
|
||||
}
|
||||
}
|
||||
opt_parent_item = parent_generics.parent.and_then(DefId::as_local);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
Scope::Opaque { captures, def_id, s } => {
|
||||
opaque_capture_scopes.push((def_id, captures));
|
||||
scope = s;
|
||||
}
|
||||
|
||||
Scope::Body { .. } => {
|
||||
bug!("{:?}", scope)
|
||||
}
|
||||
|
||||
Scope::ObjectLifetimeDefault { s, .. }
|
||||
| Scope::Supertrait { s, .. }
|
||||
| Scope::TraitRefBoundary { s, .. }
|
||||
| Scope::LateBoundary { s, .. } => {
|
||||
scope = s;
|
||||
}
|
||||
}
|
||||
}
|
||||
captures.borrow_mut().reverse();
|
||||
}
|
||||
|
||||
let hir_id = self.tcx.local_def_id_to_hir_id(opaque.def_id);
|
||||
let scope = Scope::Binder {
|
||||
hir_id,
|
||||
bound_vars,
|
||||
s: self.scope,
|
||||
scope_type: BinderScopeType::Normal,
|
||||
where_bound_origin: None,
|
||||
};
|
||||
let scope = Scope::Opaque { captures: &captures, def_id: opaque.def_id, s: self.scope };
|
||||
self.with(scope, |this| {
|
||||
let scope = Scope::TraitRefBoundary { s: this.scope };
|
||||
this.with(scope, |this| intravisit::walk_opaque_ty(this, opaque))
|
||||
})
|
||||
});
|
||||
|
||||
let captures = captures.into_inner().into_iter().collect();
|
||||
debug!(?captures);
|
||||
self.map.opaque_captured_lifetimes.insert(opaque.def_id, captures);
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
@ -685,67 +803,6 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
|
||||
};
|
||||
self.with(scope, |this| this.visit_ty(mt.ty));
|
||||
}
|
||||
hir::TyKind::OpaqueDef(opaque_ty, lifetimes) => {
|
||||
self.visit_opaque_ty(opaque_ty);
|
||||
|
||||
// Resolve the lifetimes in the bounds to the lifetime defs in the generics.
|
||||
// `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
|
||||
// `type MyAnonTy<'b> = impl MyTrait<'b>;`
|
||||
// ^ ^ this gets resolved in the scope of
|
||||
// the opaque_ty generics
|
||||
|
||||
// Resolve the lifetimes that are applied to the opaque type.
|
||||
// These are resolved in the current scope.
|
||||
// `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
|
||||
// `fn foo<'a>() -> MyAnonTy<'a> { ... }`
|
||||
// ^ ^this gets resolved in the current scope
|
||||
for lifetime in lifetimes {
|
||||
let hir::GenericArg::Lifetime(lifetime) = lifetime else { continue };
|
||||
self.visit_lifetime(lifetime);
|
||||
|
||||
// Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>`
|
||||
// and ban them. Type variables instantiated inside binders aren't
|
||||
// well-supported at the moment, so this doesn't work.
|
||||
// In the future, this should be fixed and this error should be removed.
|
||||
let def = self.map.defs.get(&lifetime.hir_id.local_id).copied();
|
||||
let Some(ResolvedArg::LateBound(_, _, lifetime_def_id)) = def else { continue };
|
||||
let lifetime_hir_id = self.tcx.local_def_id_to_hir_id(lifetime_def_id);
|
||||
|
||||
let bad_place = match self.tcx.hir_node(self.tcx.parent_hir_id(lifetime_hir_id))
|
||||
{
|
||||
// Opaques do not declare their own lifetimes, so if a lifetime comes from an opaque
|
||||
// it must be a reified late-bound lifetime from a trait goal.
|
||||
hir::Node::OpaqueTy(_) => "higher-ranked lifetime from outer `impl Trait`",
|
||||
// Other items are fine.
|
||||
hir::Node::Item(_) | hir::Node::TraitItem(_) | hir::Node::ImplItem(_) => {
|
||||
continue;
|
||||
}
|
||||
hir::Node::Ty(hir::Ty { kind: hir::TyKind::BareFn(_), .. }) => {
|
||||
"higher-ranked lifetime from function pointer"
|
||||
}
|
||||
hir::Node::Ty(hir::Ty { kind: hir::TyKind::TraitObject(..), .. }) => {
|
||||
"higher-ranked lifetime from `dyn` type"
|
||||
}
|
||||
_ => "higher-ranked lifetime",
|
||||
};
|
||||
|
||||
let (span, label) = if lifetime.ident.span == self.tcx.def_span(lifetime_def_id)
|
||||
{
|
||||
(opaque_ty.span, Some(opaque_ty.span))
|
||||
} else {
|
||||
(lifetime.ident.span, None)
|
||||
};
|
||||
|
||||
// Ensure that the parent of the def is an item, not HRTB
|
||||
self.tcx.dcx().emit_err(errors::OpaqueCapturesHigherRankedLifetime {
|
||||
span,
|
||||
label,
|
||||
decl_span: self.tcx.def_span(lifetime_def_id),
|
||||
bad_place,
|
||||
});
|
||||
self.uninsert_lifetime_on_error(lifetime, def.unwrap());
|
||||
}
|
||||
}
|
||||
_ => intravisit::walk_ty(self, ty),
|
||||
}
|
||||
}
|
||||
@ -1129,6 +1186,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
let mut scope = self.scope;
|
||||
let mut outermost_body = None;
|
||||
let mut crossed_late_boundary = None;
|
||||
let mut opaque_capture_scopes = vec![];
|
||||
let result = loop {
|
||||
match *scope {
|
||||
Scope::Body { id, s } => {
|
||||
@ -1204,6 +1262,12 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
scope = s;
|
||||
}
|
||||
|
||||
Scope::Opaque { captures, def_id, s } => {
|
||||
opaque_capture_scopes.push((def_id, captures));
|
||||
late_depth = 0;
|
||||
scope = s;
|
||||
}
|
||||
|
||||
Scope::ObjectLifetimeDefault { s, .. }
|
||||
| Scope::Supertrait { s, .. }
|
||||
| Scope::TraitRefBoundary { s, .. } => {
|
||||
@ -1218,6 +1282,8 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
};
|
||||
|
||||
if let Some(mut def) = result {
|
||||
def = self.remap_opaque_captures(&opaque_capture_scopes, def, lifetime_ref.ident);
|
||||
|
||||
if let ResolvedArg::EarlyBound(..) = def {
|
||||
// Do not free early-bound regions, only late-bound ones.
|
||||
} else if let ResolvedArg::LateBound(_, _, param_def_id) = def
|
||||
@ -1291,6 +1357,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
Scope::Root { .. } => break,
|
||||
Scope::Binder { s, .. }
|
||||
| Scope::Body { s, .. }
|
||||
| Scope::Opaque { s, .. }
|
||||
| Scope::ObjectLifetimeDefault { s, .. }
|
||||
| Scope::Supertrait { s, .. }
|
||||
| Scope::TraitRefBoundary { s, .. }
|
||||
@ -1306,6 +1373,79 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
);
|
||||
}
|
||||
|
||||
/// Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>`
|
||||
/// and ban them. Type variables instantiated inside binders aren't
|
||||
/// well-supported at the moment, so this doesn't work.
|
||||
/// In the future, this should be fixed and this error should be removed.
|
||||
fn check_lifetime_is_capturable(
|
||||
&self,
|
||||
opaque_def_id: LocalDefId,
|
||||
lifetime: ResolvedArg,
|
||||
capture_span: Span,
|
||||
) -> Result<(), ErrorGuaranteed> {
|
||||
let ResolvedArg::LateBound(_, _, lifetime_def_id) = lifetime else { return Ok(()) };
|
||||
let lifetime_hir_id = self.tcx.local_def_id_to_hir_id(lifetime_def_id);
|
||||
let bad_place = match self.tcx.hir_node(self.tcx.parent_hir_id(lifetime_hir_id)) {
|
||||
// Opaques do not declare their own lifetimes, so if a lifetime comes from an opaque
|
||||
// it must be a reified late-bound lifetime from a trait goal.
|
||||
hir::Node::OpaqueTy(_) => "higher-ranked lifetime from outer `impl Trait`",
|
||||
// Other items are fine.
|
||||
hir::Node::Item(_) | hir::Node::TraitItem(_) | hir::Node::ImplItem(_) => return Ok(()),
|
||||
hir::Node::Ty(hir::Ty { kind: hir::TyKind::BareFn(_), .. }) => {
|
||||
"higher-ranked lifetime from function pointer"
|
||||
}
|
||||
hir::Node::Ty(hir::Ty { kind: hir::TyKind::TraitObject(..), .. }) => {
|
||||
"higher-ranked lifetime from `dyn` type"
|
||||
}
|
||||
_ => "higher-ranked lifetime",
|
||||
};
|
||||
|
||||
let decl_span = self.tcx.def_span(lifetime_def_id);
|
||||
let (span, label) = if capture_span != decl_span {
|
||||
(capture_span, None)
|
||||
} else {
|
||||
let opaque_span = self.tcx.def_span(opaque_def_id);
|
||||
(opaque_span, Some(opaque_span))
|
||||
};
|
||||
|
||||
// Ensure that the parent of the def is an item, not HRTB
|
||||
let guar = self.tcx.dcx().emit_err(errors::OpaqueCapturesHigherRankedLifetime {
|
||||
span,
|
||||
label,
|
||||
decl_span,
|
||||
bad_place,
|
||||
});
|
||||
Err(guar)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, opaque_capture_scopes), ret)]
|
||||
fn remap_opaque_captures(
|
||||
&self,
|
||||
opaque_capture_scopes: &Vec<(LocalDefId, &RefCell<FxIndexMap<ResolvedArg, LocalDefId>>)>,
|
||||
mut lifetime: ResolvedArg,
|
||||
ident: Ident,
|
||||
) -> ResolvedArg {
|
||||
if let Some(&(opaque_def_id, _)) = opaque_capture_scopes.last() {
|
||||
if let Err(guar) =
|
||||
self.check_lifetime_is_capturable(opaque_def_id, lifetime, ident.span)
|
||||
{
|
||||
lifetime = ResolvedArg::Error(guar);
|
||||
}
|
||||
}
|
||||
|
||||
for &(opaque_def_id, captures) in opaque_capture_scopes.iter().rev() {
|
||||
let mut captures = captures.borrow_mut();
|
||||
let remapped = *captures.entry(lifetime).or_insert_with(|| {
|
||||
let feed = self.tcx.create_def(opaque_def_id, ident.name, DefKind::LifetimeParam);
|
||||
feed.def_span(ident.span);
|
||||
feed.def_ident_span(Some(ident.span));
|
||||
feed.def_id()
|
||||
});
|
||||
lifetime = ResolvedArg::EarlyBound(remapped);
|
||||
}
|
||||
lifetime
|
||||
}
|
||||
|
||||
fn resolve_type_ref(&mut self, param_def_id: LocalDefId, hir_id: HirId) {
|
||||
// Walk up the scope chain, tracking the number of fn scopes
|
||||
// that we pass through, until we find a lifetime with the
|
||||
@ -1345,6 +1485,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
}
|
||||
|
||||
Scope::ObjectLifetimeDefault { s, .. }
|
||||
| Scope::Opaque { s, .. }
|
||||
| Scope::Supertrait { s, .. }
|
||||
| Scope::TraitRefBoundary { s, .. } => {
|
||||
scope = s;
|
||||
@ -1425,6 +1566,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
Scope::Root { .. } => break,
|
||||
Scope::Binder { s, .. }
|
||||
| Scope::Body { s, .. }
|
||||
| Scope::Opaque { s, .. }
|
||||
| Scope::ObjectLifetimeDefault { s, .. }
|
||||
| Scope::Supertrait { s, .. }
|
||||
| Scope::TraitRefBoundary { s, .. }
|
||||
@ -1501,6 +1643,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
|
||||
Scope::Binder { s, .. }
|
||||
| Scope::ObjectLifetimeDefault { s, .. }
|
||||
| Scope::Opaque { s, .. }
|
||||
| Scope::Supertrait { s, .. }
|
||||
| Scope::TraitRefBoundary { s, .. }
|
||||
| Scope::LateBoundary { s, .. } => {
|
||||
@ -1786,7 +1929,8 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
fn resolve_object_lifetime_default(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
|
||||
let mut late_depth = 0;
|
||||
let mut scope = self.scope;
|
||||
let lifetime = loop {
|
||||
let mut opaque_capture_scopes = vec![];
|
||||
let mut lifetime = loop {
|
||||
match *scope {
|
||||
Scope::Binder { s, scope_type, .. } => {
|
||||
match scope_type {
|
||||
@ -1800,7 +1944,15 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
|
||||
Scope::Body { .. } | Scope::ObjectLifetimeDefault { lifetime: None, .. } => return,
|
||||
|
||||
Scope::ObjectLifetimeDefault { lifetime: Some(l), .. } => break l,
|
||||
Scope::ObjectLifetimeDefault { lifetime: Some(l), .. } => {
|
||||
break l.shifted(late_depth);
|
||||
}
|
||||
|
||||
Scope::Opaque { captures, def_id, s } => {
|
||||
opaque_capture_scopes.push((def_id, captures));
|
||||
late_depth = 0;
|
||||
scope = s;
|
||||
}
|
||||
|
||||
Scope::Supertrait { s, .. }
|
||||
| Scope::TraitRefBoundary { s, .. }
|
||||
@ -1809,7 +1961,10 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
}
|
||||
}
|
||||
};
|
||||
self.insert_lifetime(lifetime_ref, lifetime.shifted(late_depth));
|
||||
|
||||
lifetime = self.remap_opaque_captures(&opaque_capture_scopes, lifetime, lifetime_ref.ident);
|
||||
|
||||
self.insert_lifetime(lifetime_ref, lifetime);
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
@ -1818,18 +1973,6 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
|
||||
self.map.defs.insert(lifetime_ref.hir_id.local_id, def);
|
||||
}
|
||||
|
||||
/// Sometimes we resolve a lifetime, but later find that it is an
|
||||
/// error (esp. around impl trait). In that case, we remove the
|
||||
/// entry into `map.defs` so as not to confuse later code.
|
||||
fn uninsert_lifetime_on_error(
|
||||
&mut self,
|
||||
lifetime_ref: &'tcx hir::Lifetime,
|
||||
bad_def: ResolvedArg,
|
||||
) {
|
||||
let old_value = self.map.defs.remove(&lifetime_ref.hir_id.local_id);
|
||||
assert_eq!(old_value, Some(bad_def));
|
||||
}
|
||||
|
||||
// When we have a return type notation type in a where clause, like
|
||||
// `where <T as Trait>::method(..): Send`, we need to introduce new bound
|
||||
// vars to the existing where clause's binder, to represent the lifetimes
|
||||
@ -2013,18 +2156,22 @@ fn is_late_bound_map(
|
||||
tcx: TyCtxt<'_>,
|
||||
owner_id: hir::OwnerId,
|
||||
) -> Option<&FxIndexSet<hir::ItemLocalId>> {
|
||||
let decl = tcx.hir().fn_decl_by_hir_id(owner_id.into())?;
|
||||
let sig = tcx.hir().fn_sig_by_hir_id(owner_id.into())?;
|
||||
let generics = tcx.hir().get_generics(owner_id.def_id)?;
|
||||
|
||||
let mut late_bound = FxIndexSet::default();
|
||||
|
||||
let mut constrained_by_input = ConstrainedCollector { regions: Default::default(), tcx };
|
||||
for arg_ty in decl.inputs {
|
||||
for arg_ty in sig.decl.inputs {
|
||||
constrained_by_input.visit_ty(arg_ty);
|
||||
}
|
||||
|
||||
let mut appears_in_output = AllCollector::default();
|
||||
intravisit::walk_fn_ret_ty(&mut appears_in_output, &decl.output);
|
||||
let mut appears_in_output =
|
||||
AllCollector { tcx, has_fully_capturing_opaque: false, regions: Default::default() };
|
||||
intravisit::walk_fn_ret_ty(&mut appears_in_output, &sig.decl.output);
|
||||
if appears_in_output.has_fully_capturing_opaque {
|
||||
appears_in_output.regions.extend(generics.params.iter().map(|param| param.def_id));
|
||||
}
|
||||
|
||||
debug!(?constrained_by_input.regions);
|
||||
|
||||
@ -2032,7 +2179,8 @@ fn is_late_bound_map(
|
||||
//
|
||||
// Subtle point: because we disallow nested bindings, we can just
|
||||
// ignore binders here and scrape up all names we see.
|
||||
let mut appears_in_where_clause = AllCollector::default();
|
||||
let mut appears_in_where_clause =
|
||||
AllCollector { tcx, has_fully_capturing_opaque: true, regions: Default::default() };
|
||||
appears_in_where_clause.visit_generics(generics);
|
||||
debug!(?appears_in_where_clause.regions);
|
||||
|
||||
@ -2198,17 +2346,26 @@ fn is_late_bound_map(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct AllCollector {
|
||||
struct AllCollector<'tcx> {
|
||||
tcx: TyCtxt<'tcx>,
|
||||
has_fully_capturing_opaque: bool,
|
||||
regions: FxHashSet<LocalDefId>,
|
||||
}
|
||||
|
||||
impl<'v> Visitor<'v> for AllCollector {
|
||||
impl<'v> Visitor<'v> for AllCollector<'v> {
|
||||
fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
|
||||
if let hir::LifetimeName::Param(def_id) = lifetime_ref.res {
|
||||
self.regions.insert(def_id);
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_opaque_ty(&mut self, opaque: &'v hir::OpaqueTy<'v>) {
|
||||
if !self.has_fully_capturing_opaque {
|
||||
self.has_fully_capturing_opaque =
|
||||
opaque_captures_all_in_scope_lifetimes(self.tcx, opaque);
|
||||
}
|
||||
intravisit::walk_opaque_ty(self, opaque);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -294,13 +294,23 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
||||
lifetime: &hir::Lifetime,
|
||||
reason: RegionInferReason<'_>,
|
||||
) -> ty::Region<'tcx> {
|
||||
if let Some(resolved) = self.tcx().named_bound_var(lifetime.hir_id) {
|
||||
self.lower_resolved_lifetime(resolved)
|
||||
} else {
|
||||
self.re_infer(lifetime.ident.span, reason)
|
||||
}
|
||||
}
|
||||
|
||||
/// Lower a lifetime from the HIR to our internal notion of a lifetime called a *region*.
|
||||
#[instrument(level = "debug", skip(self), ret)]
|
||||
pub fn lower_resolved_lifetime(&self, resolved: rbv::ResolvedArg) -> ty::Region<'tcx> {
|
||||
let tcx = self.tcx();
|
||||
let lifetime_name = |def_id| tcx.hir().name(tcx.local_def_id_to_hir_id(def_id));
|
||||
|
||||
match tcx.named_bound_var(lifetime.hir_id) {
|
||||
Some(rbv::ResolvedArg::StaticLifetime) => tcx.lifetimes.re_static,
|
||||
match resolved {
|
||||
rbv::ResolvedArg::StaticLifetime => tcx.lifetimes.re_static,
|
||||
|
||||
Some(rbv::ResolvedArg::LateBound(debruijn, index, def_id)) => {
|
||||
rbv::ResolvedArg::LateBound(debruijn, index, def_id) => {
|
||||
let name = lifetime_name(def_id);
|
||||
let br = ty::BoundRegion {
|
||||
var: ty::BoundVar::from_u32(index),
|
||||
@ -309,7 +319,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
||||
ty::Region::new_bound(tcx, debruijn, br)
|
||||
}
|
||||
|
||||
Some(rbv::ResolvedArg::EarlyBound(def_id)) => {
|
||||
rbv::ResolvedArg::EarlyBound(def_id) => {
|
||||
let name = tcx.hir().ty_param_name(def_id);
|
||||
let item_def_id = tcx.hir().ty_param_owner(def_id);
|
||||
let generics = tcx.generics_of(item_def_id);
|
||||
@ -317,7 +327,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
||||
ty::Region::new_early_param(tcx, ty::EarlyParamRegion { index, name })
|
||||
}
|
||||
|
||||
Some(rbv::ResolvedArg::Free(scope, id)) => {
|
||||
rbv::ResolvedArg::Free(scope, id) => {
|
||||
let name = lifetime_name(id);
|
||||
ty::Region::new_late_param(
|
||||
tcx,
|
||||
@ -328,9 +338,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
||||
// (*) -- not late-bound, won't change
|
||||
}
|
||||
|
||||
Some(rbv::ResolvedArg::Error(guar)) => ty::Region::new_error(tcx, guar),
|
||||
|
||||
None => self.re_infer(lifetime.ident.span, reason),
|
||||
rbv::ResolvedArg::Error(guar) => ty::Region::new_error(tcx, guar),
|
||||
}
|
||||
}
|
||||
|
||||
@ -713,7 +721,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
||||
bounds.push_const_bound(
|
||||
tcx,
|
||||
poly_trait_ref,
|
||||
ty::HostPolarity::Const,
|
||||
ty::BoundConstness::Const,
|
||||
span,
|
||||
);
|
||||
}
|
||||
@ -736,7 +744,12 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
||||
PredicateFilter::ConstIfConst | PredicateFilter::SelfConstIfConst => match constness {
|
||||
hir::BoundConstness::Maybe(span) => {
|
||||
if polarity == ty::PredicatePolarity::Positive {
|
||||
bounds.push_const_bound(tcx, poly_trait_ref, ty::HostPolarity::Maybe, span);
|
||||
bounds.push_const_bound(
|
||||
tcx,
|
||||
poly_trait_ref,
|
||||
ty::BoundConstness::Maybe,
|
||||
span,
|
||||
);
|
||||
}
|
||||
}
|
||||
hir::BoundConstness::Always(_) | hir::BoundConstness::Never => {}
|
||||
@ -2094,13 +2107,11 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
||||
let opt_self_ty = maybe_qself.as_ref().map(|qself| self.lower_ty(qself));
|
||||
self.lower_path(opt_self_ty, path, hir_ty.hir_id, false)
|
||||
}
|
||||
&hir::TyKind::OpaqueDef(opaque_ty, lifetimes) => {
|
||||
let local_def_id = opaque_ty.def_id;
|
||||
|
||||
&hir::TyKind::OpaqueDef(opaque_ty) => {
|
||||
// If this is an RPITIT and we are using the new RPITIT lowering scheme, we
|
||||
// generate the def_id of an associated type for the trait and return as
|
||||
// type a projection.
|
||||
match opaque_ty.origin {
|
||||
let in_trait = match opaque_ty.origin {
|
||||
hir::OpaqueTyOrigin::FnReturn {
|
||||
in_trait_or_impl: Some(hir::RpitContext::Trait),
|
||||
..
|
||||
@ -2108,11 +2119,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
||||
| hir::OpaqueTyOrigin::AsyncFn {
|
||||
in_trait_or_impl: Some(hir::RpitContext::Trait),
|
||||
..
|
||||
} => self.lower_opaque_ty(
|
||||
tcx.associated_type_for_impl_trait_in_trait(local_def_id).to_def_id(),
|
||||
lifetimes,
|
||||
true,
|
||||
),
|
||||
} => true,
|
||||
hir::OpaqueTyOrigin::FnReturn {
|
||||
in_trait_or_impl: None | Some(hir::RpitContext::TraitImpl),
|
||||
..
|
||||
@ -2121,10 +2128,10 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
||||
in_trait_or_impl: None | Some(hir::RpitContext::TraitImpl),
|
||||
..
|
||||
}
|
||||
| hir::OpaqueTyOrigin::TyAlias { .. } => {
|
||||
self.lower_opaque_ty(local_def_id.to_def_id(), lifetimes, false)
|
||||
}
|
||||
}
|
||||
| hir::OpaqueTyOrigin::TyAlias { .. } => false,
|
||||
};
|
||||
|
||||
self.lower_opaque_ty(opaque_ty.def_id, in_trait)
|
||||
}
|
||||
// If we encounter a type relative path with RTN generics, then it must have
|
||||
// *not* gone through `lower_ty_maybe_return_type_notation`, and therefore
|
||||
@ -2264,40 +2271,34 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
||||
}
|
||||
|
||||
/// Lower an opaque type (i.e., an existential impl-Trait type) from the HIR.
|
||||
#[instrument(level = "debug", skip_all, ret)]
|
||||
fn lower_opaque_ty(
|
||||
&self,
|
||||
def_id: DefId,
|
||||
lifetimes: &[hir::GenericArg<'_>],
|
||||
in_trait: bool,
|
||||
) -> Ty<'tcx> {
|
||||
debug!(?def_id, ?lifetimes);
|
||||
#[instrument(level = "debug", skip(self), ret)]
|
||||
fn lower_opaque_ty(&self, def_id: LocalDefId, in_trait: bool) -> Ty<'tcx> {
|
||||
let tcx = self.tcx();
|
||||
|
||||
let lifetimes = tcx.opaque_captured_lifetimes(def_id);
|
||||
debug!(?lifetimes);
|
||||
|
||||
// If this is an RPITIT and we are using the new RPITIT lowering scheme, we
|
||||
// generate the def_id of an associated type for the trait and return as
|
||||
// type a projection.
|
||||
let def_id = if in_trait {
|
||||
tcx.associated_type_for_impl_trait_in_trait(def_id).to_def_id()
|
||||
} else {
|
||||
def_id.to_def_id()
|
||||
};
|
||||
|
||||
let generics = tcx.generics_of(def_id);
|
||||
debug!(?generics);
|
||||
|
||||
// We use `generics.count() - lifetimes.len()` here instead of `generics.parent_count`
|
||||
// since return-position impl trait in trait squashes all of the generics from its source fn
|
||||
// into its own generics, so the opaque's "own" params isn't always just lifetimes.
|
||||
let offset = generics.count() - lifetimes.len();
|
||||
|
||||
let args = ty::GenericArgs::for_item(tcx, def_id, |param, _| {
|
||||
// We use `generics.count() - lifetimes.len()` here instead of `generics.parent_count`
|
||||
// since return-position impl trait in trait squashes all of the generics from its source fn
|
||||
// into its own generics, so the opaque's "own" params isn't always just lifetimes.
|
||||
if let Some(i) = (param.index as usize).checked_sub(generics.count() - lifetimes.len())
|
||||
{
|
||||
// Resolve our own lifetime parameters.
|
||||
let GenericParamDefKind::Lifetime { .. } = param.kind else {
|
||||
span_bug!(
|
||||
tcx.def_span(param.def_id),
|
||||
"only expected lifetime for opaque's own generics, got {:?}",
|
||||
param
|
||||
);
|
||||
};
|
||||
let hir::GenericArg::Lifetime(lifetime) = &lifetimes[i] else {
|
||||
bug!(
|
||||
"expected lifetime argument for param {param:?}, found {:?}",
|
||||
&lifetimes[i]
|
||||
)
|
||||
};
|
||||
self.lower_lifetime(lifetime, RegionInferReason::Param(¶m)).into()
|
||||
if let Some(i) = (param.index as usize).checked_sub(offset) {
|
||||
let (lifetime, _) = lifetimes[i];
|
||||
self.lower_resolved_lifetime(lifetime).into()
|
||||
} else {
|
||||
tcx.mk_param_from_def(param)
|
||||
}
|
||||
|
@ -659,8 +659,6 @@ impl<'a> State<'a> {
|
||||
|
||||
fn print_opaque_ty(&mut self, o: &hir::OpaqueTy<'_>) {
|
||||
self.head("opaque");
|
||||
self.print_generic_params(o.generics.params);
|
||||
self.print_where_clause(o.generics);
|
||||
self.word("{");
|
||||
self.print_bounds("impl", o.bounds);
|
||||
self.word("}");
|
||||
|
@ -853,9 +853,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
|
||||
let host = match self.tcx.hir().body_const_context(self.body_id) {
|
||||
Some(hir::ConstContext::Const { .. } | hir::ConstContext::Static(_)) => {
|
||||
ty::HostPolarity::Const
|
||||
ty::BoundConstness::Const
|
||||
}
|
||||
Some(hir::ConstContext::ConstFn) => ty::HostPolarity::Maybe,
|
||||
Some(hir::ConstContext::ConstFn) => ty::BoundConstness::Maybe,
|
||||
None => return,
|
||||
};
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
use std::fmt::Write;
|
||||
|
||||
use ast::token::TokenKind;
|
||||
use rustc_abi::BackendRepr;
|
||||
use rustc_ast::tokenstream::{TokenStream, TokenTree};
|
||||
use rustc_ast::visit::{FnCtxt, FnKind};
|
||||
use rustc_ast::{self as ast, *};
|
||||
@ -40,7 +41,6 @@ use rustc_span::edition::Edition;
|
||||
use rustc_span::source_map::Spanned;
|
||||
use rustc_span::symbol::{Ident, Symbol, kw, sym};
|
||||
use rustc_span::{BytePos, InnerSpan, Span};
|
||||
use rustc_target::abi::Abi;
|
||||
use rustc_target::asm::InlineAsmArch;
|
||||
use rustc_trait_selection::infer::{InferCtxtExt, TyCtxtInferExt};
|
||||
use rustc_trait_selection::traits::misc::type_allowed_to_implement_copy;
|
||||
@ -2468,7 +2468,9 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
|
||||
|
||||
// Check if this ADT has a constrained layout (like `NonNull` and friends).
|
||||
if let Ok(layout) = cx.tcx.layout_of(cx.param_env.and(ty)) {
|
||||
if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &layout.abi {
|
||||
if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) =
|
||||
&layout.backend_repr
|
||||
{
|
||||
let range = scalar.valid_range(cx);
|
||||
let msg = if !range.contains(0) {
|
||||
"must be non-null"
|
||||
|
@ -217,7 +217,7 @@ fn structurally_same_type<'tcx>(
|
||||
// `extern` blocks cannot be generic, so we'll always get a layout here.
|
||||
let a_layout = tcx.layout_of(param_env.and(a)).unwrap();
|
||||
let b_layout = tcx.layout_of(param_env.and(b)).unwrap();
|
||||
assert_eq!(a_layout.abi, b_layout.abi);
|
||||
assert_eq!(a_layout.backend_repr, b_layout.backend_repr);
|
||||
assert_eq!(a_layout.size, b_layout.size);
|
||||
assert_eq!(a_layout.align, b_layout.align);
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ declare_lint_pass!(OpaqueHiddenInferredBound => [OPAQUE_HIDDEN_INFERRED_BOUND]);
|
||||
|
||||
impl<'tcx> LateLintPass<'tcx> for OpaqueHiddenInferredBound {
|
||||
fn check_ty(&mut self, cx: &LateContext<'tcx>, ty: &'tcx hir::Ty<'tcx>) {
|
||||
let hir::TyKind::OpaqueDef(opaque, _) = &ty.kind else {
|
||||
let hir::TyKind::OpaqueDef(opaque) = &ty.kind else {
|
||||
return;
|
||||
};
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
use std::iter;
|
||||
use std::ops::ControlFlow;
|
||||
|
||||
use rustc_abi::{BackendRepr, TagEncoding, Variants, WrappingRange};
|
||||
use rustc_data_structures::fx::FxHashSet;
|
||||
use rustc_errors::DiagMessage;
|
||||
use rustc_hir::{Expr, ExprKind};
|
||||
@ -13,7 +14,6 @@ use rustc_session::{declare_lint, declare_lint_pass, impl_lint_pass};
|
||||
use rustc_span::def_id::LocalDefId;
|
||||
use rustc_span::symbol::sym;
|
||||
use rustc_span::{Span, Symbol, source_map};
|
||||
use rustc_target::abi::{Abi, TagEncoding, Variants, WrappingRange};
|
||||
use rustc_target::spec::abi::Abi as SpecAbi;
|
||||
use tracing::debug;
|
||||
use {rustc_ast as ast, rustc_hir as hir};
|
||||
@ -776,8 +776,8 @@ pub(crate) fn repr_nullable_ptr<'tcx>(
|
||||
bug!("should be able to compute the layout of non-polymorphic type");
|
||||
}
|
||||
|
||||
let field_ty_abi = &field_ty_layout.ok()?.abi;
|
||||
if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
|
||||
let field_ty_abi = &field_ty_layout.ok()?.backend_repr;
|
||||
if let BackendRepr::Scalar(field_ty_scalar) = field_ty_abi {
|
||||
match field_ty_scalar.valid_range(&tcx) {
|
||||
WrappingRange { start: 0, end }
|
||||
if end == field_ty_scalar.size(&tcx).unsigned_int_max() - 1 =>
|
||||
|
@ -3,7 +3,7 @@
|
||||
use rustc_data_structures::sorted_map::SortedMap;
|
||||
use rustc_errors::ErrorGuaranteed;
|
||||
use rustc_hir::ItemLocalId;
|
||||
use rustc_hir::def_id::{DefId, LocalDefId};
|
||||
use rustc_hir::def_id::{DefId, LocalDefId, LocalDefIdMap};
|
||||
use rustc_macros::{Decodable, Encodable, HashStable, TyDecodable, TyEncodable};
|
||||
|
||||
use crate::ty;
|
||||
@ -54,4 +54,6 @@ pub struct ResolveBoundVars {
|
||||
pub defs: SortedMap<ItemLocalId, ResolvedArg>,
|
||||
|
||||
pub late_bound_vars: SortedMap<ItemLocalId, Vec<ty::BoundVariableKind>>,
|
||||
|
||||
pub opaque_captured_lifetimes: LocalDefIdMap<Vec<(ResolvedArg, LocalDefId)>>,
|
||||
}
|
||||
|
@ -1781,6 +1781,23 @@ rustc_queries! {
|
||||
-> &'tcx SortedMap<ItemLocalId, Vec<ty::BoundVariableKind>> {
|
||||
desc { |tcx| "looking up late bound vars inside `{}`", tcx.def_path_str(owner_id) }
|
||||
}
|
||||
/// For an opaque type, return the list of (captured lifetime, inner generic param).
|
||||
/// ```ignore (illustrative)
|
||||
/// fn foo<'a: 'a, 'b, T>(&'b u8) -> impl Into<Self> + 'b { ... }
|
||||
/// ```
|
||||
///
|
||||
/// We would return `[('a, '_a), ('b, '_b)]`, with `'a` early-bound and `'b` late-bound.
|
||||
///
|
||||
/// After hir_ty_lowering, we get:
|
||||
/// ```ignore (pseudo-code)
|
||||
/// opaque foo::<'a>::opaque<'_a, '_b>: Into<Foo<'_a>> + '_b;
|
||||
/// ^^^^^^^^ inner generic params
|
||||
/// fn foo<'a>: for<'b> fn(&'b u8) -> foo::<'a>::opaque::<'a, 'b>
|
||||
/// ^^^^^^ captured lifetimes
|
||||
/// ```
|
||||
query opaque_captured_lifetimes(def_id: LocalDefId) -> &'tcx [(ResolvedArg, LocalDefId)] {
|
||||
desc { |tcx| "listing captured lifetimes for opaque `{}`", tcx.def_path_str(def_id) }
|
||||
}
|
||||
|
||||
/// Computes the visibility of the provided `def_id`.
|
||||
///
|
||||
|
@ -76,8 +76,8 @@ use crate::traits::solve::{
|
||||
};
|
||||
use crate::ty::predicate::ExistentialPredicateStableCmpExt as _;
|
||||
use crate::ty::{
|
||||
self, AdtDef, AdtDefData, AdtKind, Binder, Clause, Clauses, Const, GenericArg, GenericArgs,
|
||||
GenericArgsRef, GenericParamDefKind, HostPolarity, ImplPolarity, List, ListWithCachedTypeInfo,
|
||||
self, AdtDef, AdtDefData, AdtKind, Binder, BoundConstness, Clause, Clauses, Const, GenericArg,
|
||||
GenericArgs, GenericArgsRef, GenericParamDefKind, ImplPolarity, List, ListWithCachedTypeInfo,
|
||||
ParamConst, ParamTy, Pattern, PatternKind, PolyExistentialPredicate, PolyFnSig, Predicate,
|
||||
PredicateKind, PredicatePolarity, Region, RegionKind, ReprOptions, TraitObjectVisitor, Ty,
|
||||
TyKind, TyVid, Visibility,
|
||||
@ -2205,7 +2205,7 @@ macro_rules! nop_slice_lift {
|
||||
nop_slice_lift! {ty::ValTree<'a> => ty::ValTree<'tcx>}
|
||||
|
||||
TrivialLiftImpls! {
|
||||
ImplPolarity, PredicatePolarity, Promoted, HostPolarity,
|
||||
ImplPolarity, PredicatePolarity, Promoted, BoundConstness,
|
||||
}
|
||||
|
||||
macro_rules! sty_debug_print {
|
||||
@ -3060,7 +3060,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
|
||||
loop {
|
||||
let parent = self.local_parent(opaque_lifetime_param_def_id);
|
||||
let hir::OpaqueTy { lifetime_mapping, .. } = self.hir().expect_opaque_ty(parent);
|
||||
let lifetime_mapping = self.opaque_captured_lifetimes(parent);
|
||||
|
||||
let Some((lifetime, _)) = lifetime_mapping
|
||||
.iter()
|
||||
@ -3069,8 +3069,8 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
bug!("duplicated lifetime param should be present");
|
||||
};
|
||||
|
||||
match self.named_bound_var(lifetime.hir_id) {
|
||||
Some(resolve_bound_vars::ResolvedArg::EarlyBound(ebv)) => {
|
||||
match *lifetime {
|
||||
resolve_bound_vars::ResolvedArg::EarlyBound(ebv) => {
|
||||
let new_parent = self.local_parent(ebv);
|
||||
|
||||
// If we map to another opaque, then it should be a parent
|
||||
@ -3089,7 +3089,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
name: self.item_name(ebv.to_def_id()),
|
||||
});
|
||||
}
|
||||
Some(resolve_bound_vars::ResolvedArg::LateBound(_, _, lbv)) => {
|
||||
resolve_bound_vars::ResolvedArg::LateBound(_, _, lbv) => {
|
||||
let new_parent = self.local_parent(lbv);
|
||||
return ty::Region::new_late_param(
|
||||
self,
|
||||
@ -3100,13 +3100,13 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
),
|
||||
);
|
||||
}
|
||||
Some(resolve_bound_vars::ResolvedArg::Error(guar)) => {
|
||||
resolve_bound_vars::ResolvedArg::Error(guar) => {
|
||||
return ty::Region::new_error(self, guar);
|
||||
}
|
||||
_ => {
|
||||
return ty::Region::new_error_with_message(
|
||||
self,
|
||||
lifetime.ident.span,
|
||||
self.def_span(opaque_lifetime_param_def_id),
|
||||
"cannot resolve lifetime",
|
||||
);
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ impl FlagComputation {
|
||||
}
|
||||
ty::PredicateKind::Clause(ty::ClauseKind::HostEffect(ty::HostEffectPredicate {
|
||||
trait_ref,
|
||||
host: _,
|
||||
constness: _,
|
||||
})) => {
|
||||
self.add_args(trait_ref.args);
|
||||
}
|
||||
|
@ -4,8 +4,9 @@ use std::{cmp, fmt};
|
||||
|
||||
use rustc_abi::Primitive::{self, Float, Int, Pointer};
|
||||
use rustc_abi::{
|
||||
Abi, AddressSpace, Align, FieldsShape, HasDataLayout, Integer, LayoutCalculator, LayoutData,
|
||||
PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout, Variants,
|
||||
AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutCalculator,
|
||||
LayoutData, PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout,
|
||||
Variants,
|
||||
};
|
||||
use rustc_error_messages::DiagMessage;
|
||||
use rustc_errors::{
|
||||
@ -757,7 +758,7 @@ where
|
||||
Some(fields) => FieldsShape::Union(fields),
|
||||
None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
|
||||
},
|
||||
abi: Abi::Uninhabited,
|
||||
backend_repr: BackendRepr::Uninhabited,
|
||||
largest_niche: None,
|
||||
align: tcx.data_layout.i8_align,
|
||||
size: Size::ZERO,
|
||||
|
@ -1959,7 +1959,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
|
||||
ty::BoundConstness::Const => {
|
||||
p!("const ");
|
||||
}
|
||||
ty::BoundConstness::ConstIfConst => {
|
||||
ty::BoundConstness::Maybe => {
|
||||
p!("~const ");
|
||||
}
|
||||
}
|
||||
@ -3076,9 +3076,9 @@ define_print! {
|
||||
}
|
||||
|
||||
ty::HostEffectPredicate<'tcx> {
|
||||
let constness = match self.host {
|
||||
ty::HostPolarity::Const => { "const" }
|
||||
ty::HostPolarity::Maybe => { "~const" }
|
||||
let constness = match self.constness {
|
||||
ty::BoundConstness::Const => { "const" }
|
||||
ty::BoundConstness::Maybe => { "~const" }
|
||||
};
|
||||
p!(print(self.trait_ref.self_ty()), ": {constness} ");
|
||||
p!(print(self.trait_ref.print_trait_sugared()))
|
||||
|
@ -1354,6 +1354,7 @@ impl<'tcx> Ty<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(tcx))]
|
||||
pub fn fn_sig(self, tcx: TyCtxt<'tcx>) -> PolyFnSig<'tcx> {
|
||||
match self.kind() {
|
||||
FnDef(def_id, args) => tcx.fn_sig(*def_id).instantiate(tcx, args),
|
||||
|
@ -13,7 +13,7 @@ use rustc_middle::ty::util::IntTypeExt;
|
||||
use rustc_middle::ty::{self, Ty, UpvarArgs};
|
||||
use rustc_span::source_map::Spanned;
|
||||
use rustc_span::{DUMMY_SP, Span};
|
||||
use rustc_target::abi::{Abi, FieldIdx, Primitive};
|
||||
use rustc_target::abi::{BackendRepr, FieldIdx, Primitive};
|
||||
use tracing::debug;
|
||||
|
||||
use crate::build::expr::as_place::PlaceBase;
|
||||
@ -207,7 +207,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
);
|
||||
let (op, ty) = (Operand::Move(discr), discr_ty);
|
||||
|
||||
if let Abi::Scalar(scalar) = layout.unwrap().abi
|
||||
if let BackendRepr::Scalar(scalar) = layout.unwrap().backend_repr
|
||||
&& !scalar.is_always_valid(&this.tcx)
|
||||
&& let Primitive::Int(int_width, _signed) = scalar.primitive()
|
||||
{
|
||||
|
@ -858,7 +858,7 @@ impl<'tcx> Map<'tcx> {
|
||||
// Allocate a value slot if it doesn't have one, and the user requested one.
|
||||
assert!(place_info.value_index.is_none());
|
||||
if let Ok(layout) = tcx.layout_of(param_env.and(place_info.ty))
|
||||
&& layout.abi.is_scalar()
|
||||
&& layout.backend_repr.is_scalar()
|
||||
{
|
||||
place_info.value_index = Some(self.value_count.into());
|
||||
self.value_count += 1;
|
||||
|
@ -2,6 +2,7 @@
|
||||
//!
|
||||
//! Currently, this pass only propagates scalar values.
|
||||
|
||||
use rustc_abi::{BackendRepr, FIRST_VARIANT, FieldIdx, Size, VariantIdx};
|
||||
use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str};
|
||||
use rustc_const_eval::interpret::{
|
||||
ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable, interp_ok,
|
||||
@ -20,7 +21,6 @@ use rustc_mir_dataflow::value_analysis::{
|
||||
};
|
||||
use rustc_mir_dataflow::{Analysis, Results, ResultsVisitor};
|
||||
use rustc_span::DUMMY_SP;
|
||||
use rustc_target::abi::{Abi, FIRST_VARIANT, FieldIdx, Size, VariantIdx};
|
||||
use tracing::{debug, debug_span, instrument};
|
||||
|
||||
// These constants are somewhat random guesses and have not been optimized.
|
||||
@ -457,7 +457,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
|
||||
// a pair and sometimes not. But as a hack we always return a pair
|
||||
// and just make the 2nd component `Bottom` when it does not exist.
|
||||
Some(val) => {
|
||||
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
|
||||
if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
|
||||
let (val, overflow) = val.to_scalar_pair();
|
||||
(FlatSet::Elem(val), FlatSet::Elem(overflow))
|
||||
} else {
|
||||
@ -470,7 +470,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
|
||||
// Exactly one side is known, attempt some algebraic simplifications.
|
||||
(FlatSet::Elem(const_arg), _) | (_, FlatSet::Elem(const_arg)) => {
|
||||
let layout = const_arg.layout;
|
||||
if !matches!(layout.abi, rustc_target::abi::Abi::Scalar(..)) {
|
||||
if !matches!(layout.backend_repr, rustc_target::abi::BackendRepr::Scalar(..)) {
|
||||
return (FlatSet::Top, FlatSet::Top);
|
||||
}
|
||||
|
||||
@ -589,13 +589,13 @@ impl<'a, 'tcx> Collector<'a, 'tcx> {
|
||||
}
|
||||
|
||||
let place = map.find(place.as_ref())?;
|
||||
if layout.abi.is_scalar()
|
||||
if layout.backend_repr.is_scalar()
|
||||
&& let Some(value) = propagatable_scalar(place, state, map)
|
||||
{
|
||||
return Some(Const::Val(ConstValue::Scalar(value), ty));
|
||||
}
|
||||
|
||||
if matches!(layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
||||
if matches!(layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
|
||||
let alloc_id = ecx
|
||||
.intern_with_temp_alloc(layout, |ecx, dest| {
|
||||
try_write_constant(ecx, dest, place, ty, state, map)
|
||||
@ -641,7 +641,7 @@ fn try_write_constant<'tcx>(
|
||||
}
|
||||
|
||||
// Fast path for scalars.
|
||||
if layout.abi.is_scalar()
|
||||
if layout.backend_repr.is_scalar()
|
||||
&& let Some(value) = propagatable_scalar(place, state, map)
|
||||
{
|
||||
return ecx.write_immediate(Immediate::Scalar(value), dest);
|
||||
|
@ -85,6 +85,7 @@
|
||||
use std::borrow::Cow;
|
||||
|
||||
use either::Either;
|
||||
use rustc_abi::{self as abi, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx};
|
||||
use rustc_const_eval::const_eval::DummyMachine;
|
||||
use rustc_const_eval::interpret::{
|
||||
ImmTy, Immediate, InterpCx, MemPlaceMeta, MemoryKind, OpTy, Projectable, Scalar,
|
||||
@ -103,7 +104,6 @@ use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
|
||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||
use rustc_span::DUMMY_SP;
|
||||
use rustc_span::def_id::DefId;
|
||||
use rustc_target::abi::{self, Abi, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx};
|
||||
use smallvec::SmallVec;
|
||||
use tracing::{debug, instrument, trace};
|
||||
|
||||
@ -427,7 +427,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
||||
};
|
||||
let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
|
||||
ImmTy::from_immediate(ptr_imm, ty).into()
|
||||
} else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
||||
} else if matches!(
|
||||
ty.backend_repr,
|
||||
BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)
|
||||
) {
|
||||
let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?;
|
||||
let variant_dest = if let Some(variant) = variant {
|
||||
self.ecx.project_downcast(&dest, variant).discard_err()?
|
||||
@ -573,12 +576,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
||||
// limited transmutes: it only works between types with the same layout, and
|
||||
// cannot transmute pointers to integers.
|
||||
if value.as_mplace_or_imm().is_right() {
|
||||
let can_transmute = match (value.layout.abi, to.abi) {
|
||||
(Abi::Scalar(s1), Abi::Scalar(s2)) => {
|
||||
let can_transmute = match (value.layout.backend_repr, to.backend_repr) {
|
||||
(BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => {
|
||||
s1.size(&self.ecx) == s2.size(&self.ecx)
|
||||
&& !matches!(s1.primitive(), Primitive::Pointer(..))
|
||||
}
|
||||
(Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
|
||||
(BackendRepr::ScalarPair(a1, b1), BackendRepr::ScalarPair(a2, b2)) => {
|
||||
a1.size(&self.ecx) == a2.size(&self.ecx) &&
|
||||
b1.size(&self.ecx) == b2.size(&self.ecx) &&
|
||||
// The alignment of the second component determines its offset, so that also needs to match.
|
||||
@ -1241,7 +1244,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
||||
|
||||
let as_bits = |value| {
|
||||
let constant = self.evaluated[value].as_ref()?;
|
||||
if layout.abi.is_scalar() {
|
||||
if layout.backend_repr.is_scalar() {
|
||||
let scalar = self.ecx.read_scalar(constant).discard_err()?;
|
||||
scalar.to_bits(constant.layout.size).discard_err()
|
||||
} else {
|
||||
@ -1497,12 +1500,12 @@ fn op_to_prop_const<'tcx>(
|
||||
|
||||
// Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to
|
||||
// avoid.
|
||||
if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
||||
if !matches!(op.layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
|
||||
if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi
|
||||
if let BackendRepr::Scalar(abi::Scalar::Initialized { .. }) = op.layout.backend_repr
|
||||
&& let Some(scalar) = ecx.read_scalar(op).discard_err()
|
||||
{
|
||||
if !scalar.try_to_scalar_int().is_ok() {
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
use std::fmt::Debug;
|
||||
|
||||
use rustc_abi::{BackendRepr, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx};
|
||||
use rustc_const_eval::const_eval::DummyMachine;
|
||||
use rustc_const_eval::interpret::{
|
||||
ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, interp_ok,
|
||||
@ -19,7 +20,6 @@ use rustc_middle::mir::*;
|
||||
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
|
||||
use rustc_middle::ty::{self, ConstInt, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt};
|
||||
use rustc_span::Span;
|
||||
use rustc_target::abi::{Abi, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx};
|
||||
use tracing::{debug, instrument, trace};
|
||||
|
||||
use crate::errors::{AssertLint, AssertLintKind};
|
||||
@ -557,7 +557,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
let right = self.use_ecx(|this| this.ecx.read_immediate(&right))?;
|
||||
|
||||
let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?;
|
||||
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
|
||||
if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
|
||||
// FIXME `Value` should properly support pairs in `Immediate`... but currently
|
||||
// it does not.
|
||||
let (val, overflow) = val.to_pair(&self.ecx);
|
||||
@ -651,9 +651,9 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
let to = self.ecx.layout_of(to).ok()?;
|
||||
// `offset` for immediates only supports scalar/scalar-pair ABIs,
|
||||
// so bail out if the target is not one.
|
||||
match (value.layout.abi, to.abi) {
|
||||
(Abi::Scalar(..), Abi::Scalar(..)) => {}
|
||||
(Abi::ScalarPair(..), Abi::ScalarPair(..)) => {}
|
||||
match (value.layout.backend_repr, to.backend_repr) {
|
||||
(BackendRepr::Scalar(..), BackendRepr::Scalar(..)) => {}
|
||||
(BackendRepr::ScalarPair(..), BackendRepr::ScalarPair(..)) => {}
|
||||
_ => return None,
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ where
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
if let Some(host_clause) = assumption.as_host_effect_clause() {
|
||||
if host_clause.def_id() == goal.predicate.def_id()
|
||||
&& host_clause.host().satisfies(goal.predicate.host)
|
||||
&& host_clause.constness().satisfies(goal.predicate.constness)
|
||||
{
|
||||
if !DeepRejectCtxt::relate_rigid_rigid(ecx.cx()).args_may_unify(
|
||||
goal.predicate.trait_ref.args,
|
||||
@ -91,7 +91,7 @@ where
|
||||
cx,
|
||||
cx.implied_const_bounds(alias_ty.def_id)
|
||||
.iter_instantiated(cx, alias_ty.args)
|
||||
.map(|trait_ref| trait_ref.to_host_effect_clause(cx, goal.predicate.host)),
|
||||
.map(|trait_ref| trait_ref.to_host_effect_clause(cx, goal.predicate.constness)),
|
||||
) {
|
||||
candidates.extend(Self::probe_and_match_goal_against_assumption(
|
||||
ecx,
|
||||
@ -107,7 +107,7 @@ where
|
||||
.map(|trait_ref| {
|
||||
goal.with(
|
||||
cx,
|
||||
trait_ref.to_host_effect_clause(cx, goal.predicate.host),
|
||||
trait_ref.to_host_effect_clause(cx, goal.predicate.constness),
|
||||
)
|
||||
}),
|
||||
);
|
||||
@ -163,7 +163,10 @@ where
|
||||
.const_conditions(impl_def_id)
|
||||
.iter_instantiated(cx, impl_args)
|
||||
.map(|bound_trait_ref| {
|
||||
goal.with(cx, bound_trait_ref.to_host_effect_clause(cx, goal.predicate.host))
|
||||
goal.with(
|
||||
cx,
|
||||
bound_trait_ref.to_host_effect_clause(cx, goal.predicate.constness),
|
||||
)
|
||||
});
|
||||
ecx.add_goals(GoalSource::ImplWhereBound, const_conditions);
|
||||
|
||||
|
@ -82,8 +82,12 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
|
||||
let meta_items = attr.meta_item_list().unwrap_or_default();
|
||||
for meta_item in meta_items {
|
||||
match meta_item.name_or_empty() {
|
||||
// FIXME: this never was about ABI and now this dump arg is confusing
|
||||
sym::abi => {
|
||||
tcx.dcx().emit_err(LayoutAbi { span, abi: format!("{:?}", ty_layout.abi) });
|
||||
tcx.dcx().emit_err(LayoutAbi {
|
||||
span,
|
||||
abi: format!("{:?}", ty_layout.backend_repr),
|
||||
});
|
||||
}
|
||||
|
||||
sym::align => {
|
||||
|
@ -139,7 +139,7 @@ where
|
||||
}
|
||||
ty::ClauseKind::HostEffect(pred) => {
|
||||
try_visit!(self.visit_trait(pred.trait_ref));
|
||||
pred.host.visit_with(self)
|
||||
pred.constness.visit_with(self)
|
||||
}
|
||||
ty::ClauseKind::Projection(ty::ProjectionPredicate {
|
||||
projection_term: projection_ty,
|
||||
|
@ -841,10 +841,9 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r
|
||||
self.r.record_partial_res(ty.id, PartialRes::new(res));
|
||||
visit::walk_ty(self, ty)
|
||||
}
|
||||
TyKind::ImplTrait(node_id, _) => {
|
||||
TyKind::ImplTrait(..) => {
|
||||
let candidates = self.lifetime_elision_candidates.take();
|
||||
visit::walk_ty(self, ty);
|
||||
self.record_lifetime_params_for_impl_trait(*node_id);
|
||||
self.lifetime_elision_candidates = candidates;
|
||||
}
|
||||
TyKind::TraitObject(bounds, ..) => {
|
||||
@ -977,14 +976,6 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r
|
||||
sig.decl.inputs.iter().map(|Param { ty, .. }| (None, &**ty)),
|
||||
&sig.decl.output,
|
||||
);
|
||||
|
||||
if let Some((coro_node_id, _)) = sig
|
||||
.header
|
||||
.coroutine_kind
|
||||
.map(|coroutine_kind| coroutine_kind.return_id())
|
||||
{
|
||||
this.record_lifetime_params_for_impl_trait(coro_node_id);
|
||||
}
|
||||
},
|
||||
);
|
||||
return;
|
||||
@ -1026,10 +1017,6 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r
|
||||
.map(|Param { pat, ty, .. }| (Some(&**pat), &**ty)),
|
||||
&declaration.output,
|
||||
);
|
||||
|
||||
if let Some((async_node_id, _)) = coro_node_id {
|
||||
this.record_lifetime_params_for_impl_trait(async_node_id);
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
@ -1220,7 +1207,6 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r
|
||||
}
|
||||
},
|
||||
AssocItemConstraintKind::Bound { ref bounds } => {
|
||||
self.record_lifetime_params_for_impl_trait(constraint.id);
|
||||
walk_list!(self, visit_param_bound, bounds, BoundKind::Bound);
|
||||
}
|
||||
}
|
||||
@ -4795,30 +4781,6 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
|
||||
)
|
||||
}
|
||||
|
||||
/// Construct the list of in-scope lifetime parameters for impl trait lowering.
|
||||
/// We include all lifetime parameters, either named or "Fresh".
|
||||
/// The order of those parameters does not matter, as long as it is
|
||||
/// deterministic.
|
||||
fn record_lifetime_params_for_impl_trait(&mut self, impl_trait_node_id: NodeId) {
|
||||
let mut extra_lifetime_params = vec![];
|
||||
|
||||
for rib in self.lifetime_ribs.iter().rev() {
|
||||
extra_lifetime_params
|
||||
.extend(rib.bindings.iter().map(|(&ident, &(node_id, res))| (ident, node_id, res)));
|
||||
match rib.kind {
|
||||
LifetimeRibKind::Item => break,
|
||||
LifetimeRibKind::AnonymousCreateParameter { binder, .. } => {
|
||||
if let Some(earlier_fresh) = self.r.extra_lifetime_params_map.get(&binder) {
|
||||
extra_lifetime_params.extend(earlier_fresh);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
self.r.extra_lifetime_params_map.insert(impl_trait_node_id, extra_lifetime_params);
|
||||
}
|
||||
|
||||
fn resolve_and_cache_rustdoc_path(&mut self, path_str: &str, ns: Namespace) -> Option<Res> {
|
||||
// FIXME: This caching may be incorrect in case of multiple `macro_rules`
|
||||
// items with the same name in the same module.
|
||||
|
@ -56,7 +56,7 @@ impl<'tcx> Stable<'tcx> for rustc_abi::LayoutData<rustc_abi::FieldIdx, rustc_abi
|
||||
LayoutShape {
|
||||
fields: self.fields.stable(tables),
|
||||
variants: self.variants.stable(tables),
|
||||
abi: self.abi.stable(tables),
|
||||
abi: self.backend_repr.stable(tables),
|
||||
abi_align: self.align.abi.stable(tables),
|
||||
size: self.size.stable(tables),
|
||||
}
|
||||
@ -196,20 +196,20 @@ impl<'tcx> Stable<'tcx> for rustc_abi::TagEncoding<rustc_abi::VariantIdx> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable<'tcx> for rustc_abi::Abi {
|
||||
impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr {
|
||||
type T = ValueAbi;
|
||||
|
||||
fn stable(&self, tables: &mut Tables<'_>) -> Self::T {
|
||||
match *self {
|
||||
rustc_abi::Abi::Uninhabited => ValueAbi::Uninhabited,
|
||||
rustc_abi::Abi::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)),
|
||||
rustc_abi::Abi::ScalarPair(first, second) => {
|
||||
rustc_abi::BackendRepr::Uninhabited => ValueAbi::Uninhabited,
|
||||
rustc_abi::BackendRepr::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)),
|
||||
rustc_abi::BackendRepr::ScalarPair(first, second) => {
|
||||
ValueAbi::ScalarPair(first.stable(tables), second.stable(tables))
|
||||
}
|
||||
rustc_abi::Abi::Vector { element, count } => {
|
||||
rustc_abi::BackendRepr::Vector { element, count } => {
|
||||
ValueAbi::Vector { element: element.stable(tables), count }
|
||||
}
|
||||
rustc_abi::Abi::Aggregate { sized } => ValueAbi::Aggregate { sized },
|
||||
rustc_abi::BackendRepr::Memory { sized } => ValueAbi::Aggregate { sized },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,7 @@
|
||||
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
|
||||
use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
||||
use crate::abi::{
|
||||
self, BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout,
|
||||
};
|
||||
use crate::spec::HasTargetSpec;
|
||||
use crate::spec::abi::Abi as SpecAbi;
|
||||
|
||||
@ -21,8 +23,8 @@ enum FloatConv {
|
||||
struct CannotUseFpConv;
|
||||
|
||||
fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
|
||||
match arg.layout.abi {
|
||||
Abi::Vector { .. } => true,
|
||||
match arg.layout.backend_repr {
|
||||
BackendRepr::Vector { .. } => true,
|
||||
_ => arg.layout.is_aggregate(),
|
||||
}
|
||||
}
|
||||
@ -38,8 +40,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
|
||||
where
|
||||
Ty: TyAbiInterface<'a, C> + Copy,
|
||||
{
|
||||
match arg_layout.abi {
|
||||
Abi::Scalar(scalar) => match scalar.primitive() {
|
||||
match arg_layout.backend_repr {
|
||||
BackendRepr::Scalar(scalar) => match scalar.primitive() {
|
||||
abi::Int(..) | abi::Pointer(_) => {
|
||||
if arg_layout.size.bits() > xlen {
|
||||
return Err(CannotUseFpConv);
|
||||
@ -77,8 +79,8 @@ where
|
||||
}
|
||||
}
|
||||
},
|
||||
Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
|
||||
Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
|
||||
BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
|
||||
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
|
||||
FieldsShape::Primitive => {
|
||||
unreachable!("aggregates can't have `FieldsShape::Primitive`")
|
||||
}
|
||||
@ -311,7 +313,7 @@ fn classify_arg<'a, Ty, C>(
|
||||
}
|
||||
|
||||
fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
|
||||
if let Abi::Scalar(scalar) = arg.layout.abi {
|
||||
if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
|
||||
if let abi::Int(i, _) = scalar.primitive() {
|
||||
// 32-bit integers are always sign-extended
|
||||
if i.size().bits() == 32 && xlen > 32 {
|
||||
|
@ -5,7 +5,7 @@ use crate::abi::{self, HasDataLayout, Size, TyAbiInterface};
|
||||
|
||||
fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
|
||||
// Always sign extend u32 values on 64-bit mips
|
||||
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
|
||||
if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
|
||||
if let abi::Int(i, signed) = scalar.primitive() {
|
||||
if !signed && i.size().bits() == 32 {
|
||||
if let PassMode::Direct(ref mut attrs) = arg.mode {
|
||||
@ -24,8 +24,8 @@ where
|
||||
Ty: TyAbiInterface<'a, C> + Copy,
|
||||
C: HasDataLayout,
|
||||
{
|
||||
match ret.layout.field(cx, i).abi {
|
||||
abi::Abi::Scalar(scalar) => match scalar.primitive() {
|
||||
match ret.layout.field(cx, i).backend_repr {
|
||||
abi::BackendRepr::Scalar(scalar) => match scalar.primitive() {
|
||||
abi::Float(abi::F32) => Some(Reg::f32()),
|
||||
abi::Float(abi::F64) => Some(Reg::f64()),
|
||||
_ => None,
|
||||
@ -109,7 +109,7 @@ where
|
||||
let offset = arg.layout.fields.offset(i);
|
||||
|
||||
// We only care about aligned doubles
|
||||
if let abi::Abi::Scalar(scalar) = field.abi {
|
||||
if let abi::BackendRepr::Scalar(scalar) = field.backend_repr {
|
||||
if scalar.primitive() == abi::Float(abi::F64) {
|
||||
if offset.is_aligned(dl.f64_align.abi) {
|
||||
// Insert enough integers to cover [last_offset, offset)
|
||||
|
@ -6,7 +6,8 @@ use rustc_macros::HashStable_Generic;
|
||||
use rustc_span::Symbol;
|
||||
|
||||
use crate::abi::{
|
||||
self, Abi, AddressSpace, Align, HasDataLayout, Pointer, Size, TyAbiInterface, TyAndLayout,
|
||||
self, AddressSpace, Align, BackendRepr, HasDataLayout, Pointer, Size, TyAbiInterface,
|
||||
TyAndLayout,
|
||||
};
|
||||
use crate::spec::abi::Abi as SpecAbi;
|
||||
use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
|
||||
@ -350,15 +351,17 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
|
||||
layout: TyAndLayout<'a, Ty>,
|
||||
scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
|
||||
) -> Self {
|
||||
let mode = match layout.abi {
|
||||
Abi::Uninhabited => PassMode::Ignore,
|
||||
Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
|
||||
Abi::ScalarPair(a, b) => PassMode::Pair(
|
||||
let mode = match layout.backend_repr {
|
||||
BackendRepr::Uninhabited => PassMode::Ignore,
|
||||
BackendRepr::Scalar(scalar) => {
|
||||
PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO))
|
||||
}
|
||||
BackendRepr::ScalarPair(a, b) => PassMode::Pair(
|
||||
scalar_attrs(&layout, a, Size::ZERO),
|
||||
scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
|
||||
),
|
||||
Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
|
||||
Abi::Aggregate { .. } => Self::indirect_pass_mode(&layout),
|
||||
BackendRepr::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
|
||||
BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
|
||||
};
|
||||
ArgAbi { layout, mode }
|
||||
}
|
||||
@ -460,7 +463,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
|
||||
|
||||
pub fn extend_integer_width_to(&mut self, bits: u64) {
|
||||
// Only integers have signedness
|
||||
if let Abi::Scalar(scalar) = self.layout.abi {
|
||||
if let BackendRepr::Scalar(scalar) = self.layout.backend_repr {
|
||||
if let abi::Int(i, signed) = scalar.primitive() {
|
||||
if i.size().bits() < bits {
|
||||
if let PassMode::Direct(ref mut attrs) = self.mode {
|
||||
@ -512,7 +515,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
|
||||
// That elevates any type difference to an ABI difference since we just use the
|
||||
// full Rust type as the LLVM argument/return type.
|
||||
if matches!(self.mode, PassMode::Direct(..))
|
||||
&& matches!(self.layout.abi, Abi::Aggregate { .. })
|
||||
&& matches!(self.layout.backend_repr, BackendRepr::Memory { .. })
|
||||
{
|
||||
// For aggregates in `Direct` mode to be compatible, the types need to be equal.
|
||||
self.layout.ty == other.layout.ty
|
||||
@ -791,8 +794,8 @@ impl<'a, Ty> FnAbi<'a, Ty> {
|
||||
continue;
|
||||
}
|
||||
|
||||
match arg.layout.abi {
|
||||
Abi::Aggregate { .. } => {}
|
||||
match arg.layout.backend_repr {
|
||||
BackendRepr::Memory { .. } => {}
|
||||
|
||||
// This is a fun case! The gist of what this is doing is
|
||||
// that we want callers and callees to always agree on the
|
||||
@ -813,7 +816,9 @@ impl<'a, Ty> FnAbi<'a, Ty> {
|
||||
// Note that the intrinsic ABI is exempt here as
|
||||
// that's how we connect up to LLVM and it's unstable
|
||||
// anyway, we control all calls to it in libstd.
|
||||
Abi::Vector { .. } if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => {
|
||||
BackendRepr::Vector { .. }
|
||||
if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect =>
|
||||
{
|
||||
arg.make_indirect();
|
||||
continue;
|
||||
}
|
||||
|
@ -4,8 +4,10 @@
|
||||
// Reference: Clang RISC-V ELF psABI lowering code
|
||||
// https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
|
||||
|
||||
use rustc_abi::{BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
||||
|
||||
use crate::abi;
|
||||
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
|
||||
use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
||||
use crate::spec::HasTargetSpec;
|
||||
use crate::spec::abi::Abi as SpecAbi;
|
||||
|
||||
@ -27,8 +29,8 @@ enum FloatConv {
|
||||
struct CannotUseFpConv;
|
||||
|
||||
fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
|
||||
match arg.layout.abi {
|
||||
Abi::Vector { .. } => true,
|
||||
match arg.layout.backend_repr {
|
||||
BackendRepr::Vector { .. } => true,
|
||||
_ => arg.layout.is_aggregate(),
|
||||
}
|
||||
}
|
||||
@ -44,8 +46,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
|
||||
where
|
||||
Ty: TyAbiInterface<'a, C> + Copy,
|
||||
{
|
||||
match arg_layout.abi {
|
||||
Abi::Scalar(scalar) => match scalar.primitive() {
|
||||
match arg_layout.backend_repr {
|
||||
BackendRepr::Scalar(scalar) => match scalar.primitive() {
|
||||
abi::Int(..) | abi::Pointer(_) => {
|
||||
if arg_layout.size.bits() > xlen {
|
||||
return Err(CannotUseFpConv);
|
||||
@ -83,8 +85,8 @@ where
|
||||
}
|
||||
}
|
||||
},
|
||||
Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
|
||||
Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
|
||||
BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
|
||||
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
|
||||
FieldsShape::Primitive => {
|
||||
unreachable!("aggregates can't have `FieldsShape::Primitive`")
|
||||
}
|
||||
@ -317,7 +319,7 @@ fn classify_arg<'a, Ty, C>(
|
||||
}
|
||||
|
||||
fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
|
||||
if let Abi::Scalar(scalar) = arg.layout.abi {
|
||||
if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
|
||||
if let abi::Int(i, _) = scalar.primitive() {
|
||||
// 32-bit integers are always sign-extended
|
||||
if i.size().bits() == 32 && xlen > 32 {
|
||||
|
@ -109,11 +109,11 @@ where
|
||||
return data;
|
||||
}
|
||||
|
||||
match layout.abi {
|
||||
abi::Abi::Scalar(scalar) => {
|
||||
match layout.backend_repr {
|
||||
abi::BackendRepr::Scalar(scalar) => {
|
||||
data = arg_scalar(cx, &scalar, offset, data);
|
||||
}
|
||||
abi::Abi::Aggregate { .. } => {
|
||||
abi::BackendRepr::Memory { .. } => {
|
||||
for i in 0..layout.fields.count() {
|
||||
if offset < layout.fields.offset(i) {
|
||||
offset = layout.fields.offset(i);
|
||||
@ -122,7 +122,7 @@ where
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if let abi::Abi::ScalarPair(scalar1, scalar2) = &layout.abi {
|
||||
if let abi::BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr {
|
||||
data = arg_scalar_pair(cx, scalar1, scalar2, offset, data);
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
|
||||
use crate::abi::{
|
||||
Abi, AddressSpace, Align, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
|
||||
AddressSpace, Align, BackendRepr, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
|
||||
};
|
||||
use crate::spec::HasTargetSpec;
|
||||
use crate::spec::abi::Abi as SpecAbi;
|
||||
@ -105,10 +105,12 @@ where
|
||||
where
|
||||
Ty: TyAbiInterface<'a, C> + Copy,
|
||||
{
|
||||
match layout.abi {
|
||||
Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) => false,
|
||||
Abi::Vector { .. } => true,
|
||||
Abi::Aggregate { .. } => {
|
||||
match layout.backend_repr {
|
||||
BackendRepr::Uninhabited
|
||||
| BackendRepr::Scalar(_)
|
||||
| BackendRepr::ScalarPair(..) => false,
|
||||
BackendRepr::Vector { .. } => true,
|
||||
BackendRepr::Memory { .. } => {
|
||||
for i in 0..layout.fields.count() {
|
||||
if contains_vector(cx, layout.field(cx, i)) {
|
||||
return true;
|
||||
@ -223,9 +225,9 @@ where
|
||||
// Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs.
|
||||
&& abi != SpecAbi::RustIntrinsic
|
||||
{
|
||||
let has_float = match fn_abi.ret.layout.abi {
|
||||
Abi::Scalar(s) => matches!(s.primitive(), Float(_)),
|
||||
Abi::ScalarPair(s1, s2) => {
|
||||
let has_float = match fn_abi.ret.layout.backend_repr {
|
||||
BackendRepr::Scalar(s) => matches!(s.primitive(), Float(_)),
|
||||
BackendRepr::ScalarPair(s1, s2) => {
|
||||
matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
|
||||
}
|
||||
_ => false, // anyway not passed via registers on x86
|
||||
|
@ -1,8 +1,10 @@
|
||||
// The classification code for the x86_64 ABI is taken from the clay language
|
||||
// https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp
|
||||
|
||||
use rustc_abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
||||
|
||||
use crate::abi;
|
||||
use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
|
||||
use crate::abi::{self, Abi, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
||||
|
||||
/// Classification of "eightbyte" components.
|
||||
// N.B., the order of the variants is from general to specific,
|
||||
@ -46,17 +48,17 @@ where
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut c = match layout.abi {
|
||||
Abi::Uninhabited => return Ok(()),
|
||||
let mut c = match layout.backend_repr {
|
||||
BackendRepr::Uninhabited => return Ok(()),
|
||||
|
||||
Abi::Scalar(scalar) => match scalar.primitive() {
|
||||
BackendRepr::Scalar(scalar) => match scalar.primitive() {
|
||||
abi::Int(..) | abi::Pointer(_) => Class::Int,
|
||||
abi::Float(_) => Class::Sse,
|
||||
},
|
||||
|
||||
Abi::Vector { .. } => Class::Sse,
|
||||
BackendRepr::Vector { .. } => Class::Sse,
|
||||
|
||||
Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
|
||||
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => {
|
||||
for i in 0..layout.fields.count() {
|
||||
let field_off = off + layout.fields.offset(i);
|
||||
classify(cx, layout.field(cx, i), cls, field_off)?;
|
||||
|
@ -1,25 +1,28 @@
|
||||
use rustc_abi::{BackendRepr, Float, Primitive};
|
||||
|
||||
use crate::abi::call::{ArgAbi, FnAbi, Reg};
|
||||
use crate::abi::{Abi, Float, Primitive};
|
||||
use crate::spec::HasTargetSpec;
|
||||
|
||||
// Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing
|
||||
|
||||
pub(crate) fn compute_abi_info<Ty>(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<'_, Ty>) {
|
||||
let fixup = |a: &mut ArgAbi<'_, Ty>| {
|
||||
match a.layout.abi {
|
||||
Abi::Uninhabited | Abi::Aggregate { sized: false } => {}
|
||||
Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => match a.layout.size.bits() {
|
||||
8 => a.cast_to(Reg::i8()),
|
||||
16 => a.cast_to(Reg::i16()),
|
||||
32 => a.cast_to(Reg::i32()),
|
||||
64 => a.cast_to(Reg::i64()),
|
||||
_ => a.make_indirect(),
|
||||
},
|
||||
Abi::Vector { .. } => {
|
||||
match a.layout.backend_repr {
|
||||
BackendRepr::Uninhabited | BackendRepr::Memory { sized: false } => {}
|
||||
BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => {
|
||||
match a.layout.size.bits() {
|
||||
8 => a.cast_to(Reg::i8()),
|
||||
16 => a.cast_to(Reg::i16()),
|
||||
32 => a.cast_to(Reg::i32()),
|
||||
64 => a.cast_to(Reg::i64()),
|
||||
_ => a.make_indirect(),
|
||||
}
|
||||
}
|
||||
BackendRepr::Vector { .. } => {
|
||||
// FIXME(eddyb) there should be a size cap here
|
||||
// (probably what clang calls "illegal vectors").
|
||||
}
|
||||
Abi::Scalar(scalar) => {
|
||||
BackendRepr::Scalar(scalar) => {
|
||||
// Match what LLVM does for `f128` so that `compiler-builtins` builtins match up
|
||||
// with what LLVM expects.
|
||||
if a.layout.size.bytes() > 8
|
||||
|
@ -6,7 +6,7 @@
|
||||
//! Section 2.3 from the Xtensa programmers guide.
|
||||
|
||||
use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
|
||||
use crate::abi::{Abi, HasDataLayout, Size, TyAbiInterface};
|
||||
use crate::abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface};
|
||||
use crate::spec::HasTargetSpec;
|
||||
|
||||
const NUM_ARG_GPRS: u64 = 6;
|
||||
@ -114,8 +114,8 @@ where
|
||||
}
|
||||
|
||||
fn is_xtensa_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
|
||||
match arg.layout.abi {
|
||||
Abi::Vector { .. } => true,
|
||||
match arg.layout.backend_repr {
|
||||
BackendRepr::Vector { .. } => true,
|
||||
_ => arg.layout.is_aggregate(),
|
||||
}
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ pub fn suggest_new_region_bound(
|
||||
}
|
||||
match fn_return.kind {
|
||||
// FIXME(precise_captures): Suggest adding to `use<...>` list instead.
|
||||
TyKind::OpaqueDef(opaque, _) => {
|
||||
TyKind::OpaqueDef(opaque) => {
|
||||
// Get the identity type for this RPIT
|
||||
let did = opaque.def_id.to_def_id();
|
||||
let ty = Ty::new_opaque(tcx, did, ty::GenericArgs::identity_for_item(tcx, did));
|
||||
|
@ -862,22 +862,6 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
|
||||
self.add_lt_suggs.push(lt.suggestion(self.new_lt));
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_ty(&mut self, ty: &'hir hir::Ty<'hir>) {
|
||||
let hir::TyKind::OpaqueDef(opaque_ty, _) = ty.kind else {
|
||||
return hir::intravisit::walk_ty(self, ty);
|
||||
};
|
||||
if let Some(&(_, b)) =
|
||||
opaque_ty.lifetime_mapping.iter().find(|&(a, _)| a.res == self.needle)
|
||||
{
|
||||
let prev_needle =
|
||||
std::mem::replace(&mut self.needle, hir::LifetimeName::Param(b));
|
||||
for bound in opaque_ty.bounds {
|
||||
self.visit_param_bound(bound);
|
||||
}
|
||||
self.needle = prev_needle;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let (lifetime_def_id, lifetime_scope) =
|
||||
|
@ -545,10 +545,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
|
||||
polarity: ty::PredicatePolarity::Positive,
|
||||
}),
|
||||
None,
|
||||
Some(match predicate.host {
|
||||
ty::HostPolarity::Maybe => ty::BoundConstness::ConstIfConst,
|
||||
ty::HostPolarity::Const => ty::BoundConstness::Const,
|
||||
}),
|
||||
Some(predicate.constness),
|
||||
None,
|
||||
String::new(),
|
||||
);
|
||||
@ -2238,18 +2235,16 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
|
||||
(None, _) => Some(cannot_do_this),
|
||||
// suggested using default post message
|
||||
(
|
||||
Some(ty::BoundConstness::Const | ty::BoundConstness::ConstIfConst),
|
||||
Some(ty::BoundConstness::Const | ty::BoundConstness::Maybe),
|
||||
Some(AppendConstMessage::Default),
|
||||
) => Some(format!("{cannot_do_this} in const contexts")),
|
||||
// overridden post message
|
||||
(
|
||||
Some(ty::BoundConstness::Const | ty::BoundConstness::ConstIfConst),
|
||||
Some(ty::BoundConstness::Const | ty::BoundConstness::Maybe),
|
||||
Some(AppendConstMessage::Custom(custom_msg, _)),
|
||||
) => Some(format!("{cannot_do_this}{custom_msg}")),
|
||||
// fallback to generic message
|
||||
(Some(ty::BoundConstness::Const | ty::BoundConstness::ConstIfConst), None) => {
|
||||
None
|
||||
}
|
||||
(Some(ty::BoundConstness::Const | ty::BoundConstness::Maybe), None) => None,
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
|
@ -361,7 +361,6 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
|
||||
})
|
||||
| hir::Node::TraitItem(hir::TraitItem { generics, .. })
|
||||
| hir::Node::ImplItem(hir::ImplItem { generics, .. })
|
||||
| hir::Node::OpaqueTy(hir::OpaqueTy { generics, .. })
|
||||
if param_ty =>
|
||||
{
|
||||
// We skip the 0'th arg (self) because we do not want
|
||||
@ -424,10 +423,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
|
||||
| hir::ItemKind::Const(_, generics, _)
|
||||
| hir::ItemKind::TraitAlias(generics, _),
|
||||
..
|
||||
})
|
||||
| hir::Node::OpaqueTy(hir::OpaqueTy { generics, .. })
|
||||
if !param_ty =>
|
||||
{
|
||||
}) if !param_ty => {
|
||||
// Missing generic type parameter bound.
|
||||
if suggest_arbitrary_trait_bound(
|
||||
self.tcx,
|
||||
|
@ -18,7 +18,7 @@ use rustc_middle::ty::{
|
||||
};
|
||||
use rustc_span::Span;
|
||||
use rustc_span::symbol::Symbol;
|
||||
use rustc_target::abi::Abi;
|
||||
use rustc_target::abi::BackendRepr;
|
||||
use smallvec::SmallVec;
|
||||
use tracing::{debug, instrument};
|
||||
|
||||
@ -523,8 +523,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method:
|
||||
|
||||
// e.g., `Rc<()>`
|
||||
let unit_receiver_ty = receiver_for_self_ty(tcx, receiver_ty, tcx.types.unit, method_def_id);
|
||||
match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.abi) {
|
||||
Ok(Abi::Scalar(..)) => (),
|
||||
match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.backend_repr) {
|
||||
Ok(BackendRepr::Scalar(..)) => (),
|
||||
abi => {
|
||||
tcx.dcx().span_delayed_bug(
|
||||
tcx.def_span(method_def_id),
|
||||
@ -538,8 +538,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method:
|
||||
// e.g., `Rc<dyn Trait>`
|
||||
let trait_object_receiver =
|
||||
receiver_for_self_ty(tcx, receiver_ty, trait_object_ty, method_def_id);
|
||||
match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.abi) {
|
||||
Ok(Abi::ScalarPair(..)) => (),
|
||||
match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.backend_repr) {
|
||||
Ok(BackendRepr::ScalarPair(..)) => (),
|
||||
abi => {
|
||||
tcx.dcx().span_delayed_bug(
|
||||
tcx.def_span(method_def_id),
|
||||
|
@ -47,7 +47,7 @@ fn match_candidate<'tcx>(
|
||||
obligation: &HostEffectObligation<'tcx>,
|
||||
candidate: ty::Binder<'tcx, ty::HostEffectPredicate<'tcx>>,
|
||||
) -> Result<ThinVec<PredicateObligation<'tcx>>, NoSolution> {
|
||||
if !candidate.skip_binder().host.satisfies(obligation.predicate.host) {
|
||||
if !candidate.skip_binder().constness.satisfies(obligation.predicate.constness) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
@ -135,7 +135,8 @@ fn evaluate_host_effect_from_selection_candiate<'tcx>(
|
||||
.map(|(trait_ref, _)| {
|
||||
obligation.with(
|
||||
tcx,
|
||||
trait_ref.to_host_effect_clause(tcx, obligation.predicate.host),
|
||||
trait_ref
|
||||
.to_host_effect_clause(tcx, obligation.predicate.constness),
|
||||
)
|
||||
}),
|
||||
);
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::iter;
|
||||
|
||||
use rustc_abi::Primitive::Pointer;
|
||||
use rustc_abi::{Abi, PointerKind, Scalar, Size};
|
||||
use rustc_abi::{BackendRepr, PointerKind, Scalar, Size};
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::lang_items::LangItem;
|
||||
use rustc_middle::bug;
|
||||
@ -469,7 +469,7 @@ fn fn_abi_sanity_check<'tcx>(
|
||||
// careful. Scalar/ScalarPair is fine, since backends will generally use
|
||||
// `layout.abi` and ignore everything else. We should just reject `Aggregate`
|
||||
// entirely here, but some targets need to be fixed first.
|
||||
if matches!(arg.layout.abi, Abi::Aggregate { .. }) {
|
||||
if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) {
|
||||
// For an unsized type we'd only pass the sized prefix, so there is no universe
|
||||
// in which we ever want to allow this.
|
||||
assert!(
|
||||
@ -500,7 +500,7 @@ fn fn_abi_sanity_check<'tcx>(
|
||||
// Similar to `Direct`, we need to make sure that backends use `layout.abi` and
|
||||
// ignore the rest of the layout.
|
||||
assert!(
|
||||
matches!(arg.layout.abi, Abi::ScalarPair(..)),
|
||||
matches!(arg.layout.backend_repr, BackendRepr::ScalarPair(..)),
|
||||
"PassMode::Pair for type {}",
|
||||
arg.layout.ty
|
||||
);
|
||||
@ -658,9 +658,9 @@ fn fn_abi_adjust_for_abi<'tcx>(
|
||||
fn unadjust<'tcx>(arg: &mut ArgAbi<'tcx, Ty<'tcx>>) {
|
||||
// This still uses `PassMode::Pair` for ScalarPair types. That's unlikely to be intended,
|
||||
// but who knows what breaks if we change this now.
|
||||
if matches!(arg.layout.abi, Abi::Aggregate { .. }) {
|
||||
if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) {
|
||||
assert!(
|
||||
arg.layout.abi.is_sized(),
|
||||
arg.layout.backend_repr.is_sized(),
|
||||
"'unadjusted' ABI does not support unsized arguments"
|
||||
);
|
||||
}
|
||||
@ -731,8 +731,8 @@ fn make_thin_self_ptr<'tcx>(
|
||||
// FIXME (mikeyhew) change this to use &own if it is ever added to the language
|
||||
Ty::new_mut_ptr(tcx, layout.ty)
|
||||
} else {
|
||||
match layout.abi {
|
||||
Abi::ScalarPair(..) | Abi::Scalar(..) => (),
|
||||
match layout.backend_repr {
|
||||
BackendRepr::ScalarPair(..) | BackendRepr::Scalar(..) => (),
|
||||
_ => bug!("receiver type has unsupported layout: {:?}", layout),
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ fn associated_types_for_impl_traits_in_associated_fn(
|
||||
|
||||
impl<'tcx> Visitor<'tcx> for RPITVisitor {
|
||||
fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
|
||||
if let hir::TyKind::OpaqueDef(opaq, _) = ty.kind
|
||||
if let hir::TyKind::OpaqueDef(opaq) = ty.kind
|
||||
&& self.rpits.insert(opaq.def_id)
|
||||
{
|
||||
for bound in opaq.bounds {
|
||||
|
@ -5,8 +5,9 @@ use hir::def_id::DefId;
|
||||
use rustc_abi::Integer::{I8, I32};
|
||||
use rustc_abi::Primitive::{self, Float, Int, Pointer};
|
||||
use rustc_abi::{
|
||||
Abi, AbiAndPrefAlign, AddressSpace, Align, FieldsShape, HasDataLayout, LayoutCalculatorError,
|
||||
LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, Variants, WrappingRange,
|
||||
AbiAndPrefAlign, AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout,
|
||||
LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding,
|
||||
Variants, WrappingRange,
|
||||
};
|
||||
use rustc_index::bit_set::BitSet;
|
||||
use rustc_index::{IndexSlice, IndexVec};
|
||||
@ -173,7 +174,9 @@ fn layout_of_uncached<'tcx>(
|
||||
let mut layout = LayoutData::clone(&layout.0);
|
||||
match *pat {
|
||||
ty::PatternKind::Range { start, end, include_end } => {
|
||||
if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &mut layout.abi {
|
||||
if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) =
|
||||
&mut layout.backend_repr
|
||||
{
|
||||
if let Some(start) = start {
|
||||
scalar.valid_range_mut().start = start
|
||||
.try_to_bits(tcx, param_env)
|
||||
@ -275,7 +278,7 @@ fn layout_of_uncached<'tcx>(
|
||||
return Ok(tcx.mk_layout(LayoutData::scalar(cx, data_ptr)));
|
||||
}
|
||||
|
||||
let Abi::Scalar(metadata) = metadata_layout.abi else {
|
||||
let BackendRepr::Scalar(metadata) = metadata_layout.backend_repr else {
|
||||
return Err(error(cx, LayoutError::Unknown(pointee)));
|
||||
};
|
||||
|
||||
@ -330,9 +333,9 @@ fn layout_of_uncached<'tcx>(
|
||||
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
|
||||
|
||||
let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
|
||||
Abi::Uninhabited
|
||||
BackendRepr::Uninhabited
|
||||
} else {
|
||||
Abi::Aggregate { sized: true }
|
||||
BackendRepr::Memory { sized: true }
|
||||
};
|
||||
|
||||
let largest_niche = if count != 0 { element.largest_niche } else { None };
|
||||
@ -340,7 +343,7 @@ fn layout_of_uncached<'tcx>(
|
||||
tcx.mk_layout(LayoutData {
|
||||
variants: Variants::Single { index: FIRST_VARIANT },
|
||||
fields: FieldsShape::Array { stride: element.size, count },
|
||||
abi,
|
||||
backend_repr: abi,
|
||||
largest_niche,
|
||||
align: element.align,
|
||||
size,
|
||||
@ -353,7 +356,7 @@ fn layout_of_uncached<'tcx>(
|
||||
tcx.mk_layout(LayoutData {
|
||||
variants: Variants::Single { index: FIRST_VARIANT },
|
||||
fields: FieldsShape::Array { stride: element.size, count: 0 },
|
||||
abi: Abi::Aggregate { sized: false },
|
||||
backend_repr: BackendRepr::Memory { sized: false },
|
||||
largest_niche: None,
|
||||
align: element.align,
|
||||
size: Size::ZERO,
|
||||
@ -364,7 +367,7 @@ fn layout_of_uncached<'tcx>(
|
||||
ty::Str => tcx.mk_layout(LayoutData {
|
||||
variants: Variants::Single { index: FIRST_VARIANT },
|
||||
fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
|
||||
abi: Abi::Aggregate { sized: false },
|
||||
backend_repr: BackendRepr::Memory { sized: false },
|
||||
largest_niche: None,
|
||||
align: dl.i8_align,
|
||||
size: Size::ZERO,
|
||||
@ -384,8 +387,8 @@ fn layout_of_uncached<'tcx>(
|
||||
&ReprOptions::default(),
|
||||
StructKind::AlwaysSized,
|
||||
)?;
|
||||
match unit.abi {
|
||||
Abi::Aggregate { ref mut sized } => *sized = false,
|
||||
match unit.backend_repr {
|
||||
BackendRepr::Memory { ref mut sized } => *sized = false,
|
||||
_ => bug!(),
|
||||
}
|
||||
tcx.mk_layout(unit)
|
||||
@ -500,7 +503,7 @@ fn layout_of_uncached<'tcx>(
|
||||
|
||||
// Compute the ABI of the element type:
|
||||
let e_ly = cx.layout_of(e_ty)?;
|
||||
let Abi::Scalar(e_abi) = e_ly.abi else {
|
||||
let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else {
|
||||
// This error isn't caught in typeck, e.g., if
|
||||
// the element type of the vector is generic.
|
||||
tcx.dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty });
|
||||
@ -516,12 +519,12 @@ fn layout_of_uncached<'tcx>(
|
||||
// Non-power-of-two vectors have padding up to the next power-of-two.
|
||||
// If we're a packed repr, remove the padding while keeping the alignment as close
|
||||
// to a vector as possible.
|
||||
(Abi::Aggregate { sized: true }, AbiAndPrefAlign {
|
||||
(BackendRepr::Memory { sized: true }, AbiAndPrefAlign {
|
||||
abi: Align::max_for_offset(size),
|
||||
pref: dl.vector_align(size).pref,
|
||||
})
|
||||
} else {
|
||||
(Abi::Vector { element: e_abi, count: e_len }, dl.vector_align(size))
|
||||
(BackendRepr::Vector { element: e_abi, count: e_len }, dl.vector_align(size))
|
||||
};
|
||||
let size = size.align_to(align.abi);
|
||||
|
||||
@ -535,7 +538,7 @@ fn layout_of_uncached<'tcx>(
|
||||
tcx.mk_layout(LayoutData {
|
||||
variants: Variants::Single { index: FIRST_VARIANT },
|
||||
fields,
|
||||
abi,
|
||||
backend_repr: abi,
|
||||
largest_niche: e_ly.largest_niche,
|
||||
size,
|
||||
align,
|
||||
@ -985,10 +988,12 @@ fn coroutine_layout<'tcx>(
|
||||
|
||||
size = size.align_to(align.abi);
|
||||
|
||||
let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) {
|
||||
Abi::Uninhabited
|
||||
let abi = if prefix.backend_repr.is_uninhabited()
|
||||
|| variants.iter().all(|v| v.backend_repr.is_uninhabited())
|
||||
{
|
||||
BackendRepr::Uninhabited
|
||||
} else {
|
||||
Abi::Aggregate { sized: true }
|
||||
BackendRepr::Memory { sized: true }
|
||||
};
|
||||
|
||||
let layout = tcx.mk_layout(LayoutData {
|
||||
@ -999,7 +1004,7 @@ fn coroutine_layout<'tcx>(
|
||||
variants,
|
||||
},
|
||||
fields: outer_fields,
|
||||
abi,
|
||||
backend_repr: abi,
|
||||
// Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
|
||||
// self-referentiality), getting the discriminant can cause aliasing violations.
|
||||
// `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
|
||||
|
@ -66,12 +66,12 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||
|
||||
fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) {
|
||||
// Verify the ABI mandated alignment and size.
|
||||
let align = layout.abi.inherent_align(cx).map(|align| align.abi);
|
||||
let size = layout.abi.inherent_size(cx);
|
||||
let align = layout.backend_repr.inherent_align(cx).map(|align| align.abi);
|
||||
let size = layout.backend_repr.inherent_size(cx);
|
||||
let Some((align, size)) = align.zip(size) else {
|
||||
assert_matches!(
|
||||
layout.layout.abi(),
|
||||
Abi::Uninhabited | Abi::Aggregate { .. },
|
||||
layout.layout.backend_repr(),
|
||||
BackendRepr::Uninhabited | BackendRepr::Memory { .. },
|
||||
"ABI unexpectedly missing alignment and/or size in {layout:#?}"
|
||||
);
|
||||
return;
|
||||
@ -88,12 +88,12 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||
);
|
||||
|
||||
// Verify per-ABI invariants
|
||||
match layout.layout.abi() {
|
||||
Abi::Scalar(_) => {
|
||||
match layout.layout.backend_repr() {
|
||||
BackendRepr::Scalar(_) => {
|
||||
// Check that this matches the underlying field.
|
||||
let inner = skip_newtypes(cx, layout);
|
||||
assert!(
|
||||
matches!(inner.layout.abi(), Abi::Scalar(_)),
|
||||
matches!(inner.layout.backend_repr(), BackendRepr::Scalar(_)),
|
||||
"`Scalar` type {} is newtype around non-`Scalar` type {}",
|
||||
layout.ty,
|
||||
inner.ty
|
||||
@ -132,7 +132,7 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||
"`Scalar` field with bad align in {inner:#?}",
|
||||
);
|
||||
assert!(
|
||||
matches!(field.abi, Abi::Scalar(_)),
|
||||
matches!(field.backend_repr, BackendRepr::Scalar(_)),
|
||||
"`Scalar` field with bad ABI in {inner:#?}",
|
||||
);
|
||||
}
|
||||
@ -141,11 +141,11 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||
}
|
||||
}
|
||||
}
|
||||
Abi::ScalarPair(scalar1, scalar2) => {
|
||||
BackendRepr::ScalarPair(scalar1, scalar2) => {
|
||||
// Check that the underlying pair of fields matches.
|
||||
let inner = skip_newtypes(cx, layout);
|
||||
assert!(
|
||||
matches!(inner.layout.abi(), Abi::ScalarPair(..)),
|
||||
matches!(inner.layout.backend_repr(), BackendRepr::ScalarPair(..)),
|
||||
"`ScalarPair` type {} is newtype around non-`ScalarPair` type {}",
|
||||
layout.ty,
|
||||
inner.ty
|
||||
@ -208,8 +208,8 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||
"`ScalarPair` first field with bad align in {inner:#?}",
|
||||
);
|
||||
assert_matches!(
|
||||
field1.abi,
|
||||
Abi::Scalar(_),
|
||||
field1.backend_repr,
|
||||
BackendRepr::Scalar(_),
|
||||
"`ScalarPair` first field with bad ABI in {inner:#?}",
|
||||
);
|
||||
let field2_offset = size1.align_to(align2);
|
||||
@ -226,16 +226,16 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||
"`ScalarPair` second field with bad align in {inner:#?}",
|
||||
);
|
||||
assert_matches!(
|
||||
field2.abi,
|
||||
Abi::Scalar(_),
|
||||
field2.backend_repr,
|
||||
BackendRepr::Scalar(_),
|
||||
"`ScalarPair` second field with bad ABI in {inner:#?}",
|
||||
);
|
||||
}
|
||||
Abi::Vector { element, .. } => {
|
||||
BackendRepr::Vector { element, .. } => {
|
||||
assert!(align >= element.align(cx).abi); // just sanity-checking `vector_align`.
|
||||
// FIXME: Do some kind of check of the inner type, like for Scalar and ScalarPair.
|
||||
}
|
||||
Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
|
||||
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {} // Nothing to check.
|
||||
}
|
||||
}
|
||||
|
||||
@ -274,13 +274,13 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||
// The top-level ABI and the ABI of the variants should be coherent.
|
||||
let scalar_coherent =
|
||||
|s1: Scalar, s2: Scalar| s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx);
|
||||
let abi_coherent = match (layout.abi, variant.abi) {
|
||||
(Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2),
|
||||
(Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
|
||||
let abi_coherent = match (layout.backend_repr, variant.backend_repr) {
|
||||
(BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => scalar_coherent(s1, s2),
|
||||
(BackendRepr::ScalarPair(a1, b1), BackendRepr::ScalarPair(a2, b2)) => {
|
||||
scalar_coherent(a1, a2) && scalar_coherent(b1, b2)
|
||||
}
|
||||
(Abi::Uninhabited, _) => true,
|
||||
(Abi::Aggregate { .. }, _) => true,
|
||||
(BackendRepr::Uninhabited, _) => true,
|
||||
(BackendRepr::Memory { .. }, _) => true,
|
||||
_ => false,
|
||||
};
|
||||
if !abi_coherent {
|
||||
|
@ -155,7 +155,7 @@ fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> {
|
||||
if tcx.is_conditionally_const(def_id) {
|
||||
predicates.extend(
|
||||
tcx.const_conditions(def_id).instantiate_identity(tcx).into_iter().map(
|
||||
|(trait_ref, _)| trait_ref.to_host_effect_clause(tcx, ty::HostPolarity::Maybe),
|
||||
|(trait_ref, _)| trait_ref.to_host_effect_clause(tcx, ty::BoundConstness::Maybe),
|
||||
),
|
||||
);
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ impl<I: Interner, O: Elaboratable<I>> Elaborator<I, O> {
|
||||
cx.implied_const_bounds(data.def_id()).iter_identity().map(|trait_ref| {
|
||||
elaboratable.child(
|
||||
trait_ref
|
||||
.to_host_effect_clause(cx, data.host)
|
||||
.to_host_effect_clause(cx, data.constness)
|
||||
.instantiate_supertrait(cx, bound_clause.rebind(data.trait_ref)),
|
||||
)
|
||||
}),
|
||||
|
@ -112,9 +112,9 @@ impl<I: Interner> ty::Binder<I, TraitRef<I>> {
|
||||
self.skip_binder().def_id
|
||||
}
|
||||
|
||||
pub fn to_host_effect_clause(self, cx: I, host: HostPolarity) -> I::Clause {
|
||||
pub fn to_host_effect_clause(self, cx: I, constness: BoundConstness) -> I::Clause {
|
||||
self.map_bound(|trait_ref| {
|
||||
ty::ClauseKind::HostEffect(HostEffectPredicate { trait_ref, host })
|
||||
ty::ClauseKind::HostEffect(HostEffectPredicate { trait_ref, constness })
|
||||
})
|
||||
.upcast(cx)
|
||||
}
|
||||
@ -757,7 +757,7 @@ impl<I: Interner> fmt::Debug for NormalizesTo<I> {
|
||||
#[cfg_attr(feature = "nightly", derive(TyEncodable, TyDecodable, HashStable_NoContext))]
|
||||
pub struct HostEffectPredicate<I: Interner> {
|
||||
pub trait_ref: ty::TraitRef<I>,
|
||||
pub host: HostPolarity,
|
||||
pub constness: BoundConstness,
|
||||
}
|
||||
|
||||
impl<I: Interner> HostEffectPredicate<I> {
|
||||
@ -785,28 +785,8 @@ impl<I: Interner> ty::Binder<I, HostEffectPredicate<I>> {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn host(self) -> HostPolarity {
|
||||
self.skip_binder().host
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Hash, PartialEq, Eq, Debug)]
|
||||
#[derive(TypeVisitable_Generic, TypeFoldable_Generic)]
|
||||
#[cfg_attr(feature = "nightly", derive(TyEncodable, TyDecodable, HashStable_NoContext))]
|
||||
pub enum HostPolarity {
|
||||
/// May be called in const environments if the callee is const.
|
||||
Maybe,
|
||||
/// Always allowed to be called in const environments.
|
||||
Const,
|
||||
}
|
||||
|
||||
impl HostPolarity {
|
||||
pub fn satisfies(self, goal: HostPolarity) -> bool {
|
||||
match (self, goal) {
|
||||
(HostPolarity::Const, HostPolarity::Const | HostPolarity::Maybe) => true,
|
||||
(HostPolarity::Maybe, HostPolarity::Maybe) => true,
|
||||
(HostPolarity::Maybe, HostPolarity::Const) => false,
|
||||
}
|
||||
pub fn constness(self) -> BoundConstness {
|
||||
self.skip_binder().constness
|
||||
}
|
||||
}
|
||||
|
||||
@ -831,8 +811,8 @@ pub struct CoercePredicate<I: Interner> {
|
||||
pub b: I::Ty,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
#[cfg_attr(feature = "nightly", derive(HashStable_NoContext, TyEncodable, TyDecodable))]
|
||||
#[derive(Clone, Copy, Hash, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "nightly", derive(TyEncodable, TyDecodable, HashStable_NoContext))]
|
||||
pub enum BoundConstness {
|
||||
/// `Type: const Trait`
|
||||
///
|
||||
@ -841,14 +821,22 @@ pub enum BoundConstness {
|
||||
/// `Type: ~const Trait`
|
||||
///
|
||||
/// Requires resolving to const only when we are in a const context.
|
||||
ConstIfConst,
|
||||
Maybe,
|
||||
}
|
||||
|
||||
impl BoundConstness {
|
||||
pub fn satisfies(self, goal: BoundConstness) -> bool {
|
||||
match (self, goal) {
|
||||
(BoundConstness::Const, BoundConstness::Const | BoundConstness::Maybe) => true,
|
||||
(BoundConstness::Maybe, BoundConstness::Maybe) => true,
|
||||
(BoundConstness::Maybe, BoundConstness::Const) => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
Self::Const => "const",
|
||||
Self::ConstIfConst => "~const",
|
||||
Self::Maybe => "~const",
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -857,14 +845,7 @@ impl fmt::Display for BoundConstness {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Const => f.write_str("const"),
|
||||
Self::ConstIfConst => f.write_str("~const"),
|
||||
Self::Maybe => f.write_str("~const"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> Lift<I> for BoundConstness {
|
||||
type Lifted = BoundConstness;
|
||||
fn lift_to_interner(self, _: I) -> Option<Self::Lifted> {
|
||||
Some(self)
|
||||
}
|
||||
}
|
||||
|
@ -497,11 +497,12 @@
|
||||
#debug = false
|
||||
|
||||
# Whether to download the stage 1 and 2 compilers from CI.
|
||||
# This is mostly useful for tools; if you have changes to `compiler/` or `library/` they will be ignored.
|
||||
# This is useful if you are working on tools, doc-comments, or library (you will be able to build
|
||||
# the standard library without needing to build the compiler).
|
||||
#
|
||||
# Set this to "if-unchanged" to only download if the compiler and standard library have not been modified.
|
||||
# Set this to `true` to download unconditionally. This is useful if you are working on tools, doc-comments,
|
||||
# or library (you will be able to build the standard library without needing to build the compiler).
|
||||
# Set this to "if-unchanged" to only download if the compiler (and library if running on CI) have
|
||||
# not been modified.
|
||||
# Set this to `true` to download unconditionally.
|
||||
#download-rustc = false
|
||||
|
||||
# Number of codegen units to use for each compiler invocation. A value of 0
|
||||
|
@ -153,7 +153,6 @@ impl Step for Std {
|
||||
// NOTE: the beta compiler may generate different artifacts than the downloaded compiler, so
|
||||
// its artifacts can't be reused.
|
||||
&& compiler.stage != 0
|
||||
// This check is specific to testing std itself; see `test::Std` for more details.
|
||||
&& !self.force_recompile
|
||||
{
|
||||
let sysroot = builder.ensure(Sysroot { compiler, force_recompile: false });
|
||||
|
@ -2767,25 +2767,32 @@ impl Config {
|
||||
}
|
||||
};
|
||||
|
||||
let files_to_track =
|
||||
&["compiler", "library", "src/version", "src/stage0", "src/ci/channel"];
|
||||
let mut files_to_track = vec!["compiler", "src/version", "src/stage0", "src/ci/channel"];
|
||||
|
||||
// In CI, disable ci-rustc if there are changes in the library tree. But for non-CI, ignore
|
||||
// these changes to speed up the build process for library developers. This provides consistent
|
||||
// functionality for library developers between `download-rustc=true` and `download-rustc="if-unchanged"`
|
||||
// options.
|
||||
if CiEnv::is_ci() {
|
||||
files_to_track.push("library");
|
||||
}
|
||||
|
||||
// Look for a version to compare to based on the current commit.
|
||||
// Only commits merged by bors will have CI artifacts.
|
||||
let commit = match self.last_modified_commit(files_to_track, "download-rustc", if_unchanged)
|
||||
{
|
||||
Some(commit) => commit,
|
||||
None => {
|
||||
if if_unchanged {
|
||||
return None;
|
||||
let commit =
|
||||
match self.last_modified_commit(&files_to_track, "download-rustc", if_unchanged) {
|
||||
Some(commit) => commit,
|
||||
None => {
|
||||
if if_unchanged {
|
||||
return None;
|
||||
}
|
||||
println!("ERROR: could not find commit hash for downloading rustc");
|
||||
println!("HELP: maybe your repository history is too shallow?");
|
||||
println!("HELP: consider disabling `download-rustc`");
|
||||
println!("HELP: or fetch enough history to include one upstream commit");
|
||||
crate::exit!(1);
|
||||
}
|
||||
println!("ERROR: could not find commit hash for downloading rustc");
|
||||
println!("HELP: maybe your repository history is too shallow?");
|
||||
println!("HELP: consider disabling `download-rustc`");
|
||||
println!("HELP: or fetch enough history to include one upstream commit");
|
||||
crate::exit!(1);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
if CiEnv::is_ci() && {
|
||||
let head_sha =
|
||||
|
@ -1829,7 +1829,7 @@ pub(crate) fn clean_ty<'tcx>(ty: &hir::Ty<'tcx>, cx: &mut DocContext<'tcx>) -> T
|
||||
Array(Box::new(clean_ty(ty, cx)), length.into())
|
||||
}
|
||||
TyKind::Tup(tys) => Tuple(tys.iter().map(|ty| clean_ty(ty, cx)).collect()),
|
||||
TyKind::OpaqueDef(ty, _) => {
|
||||
TyKind::OpaqueDef(ty) => {
|
||||
ImplTrait(ty.bounds.iter().filter_map(|x| clean_generic_bound(x, cx)).collect())
|
||||
}
|
||||
TyKind::Path(_) => clean_qpath(ty, cx),
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user