mirror of
https://github.com/rust-lang/rust.git
synced 2025-04-13 04:26:48 +00:00
Rollup merge of #138158 - moulins:move-layout-to-rustc_abi, r=workingjubilee
Move more layouting logic to `rustc_abi` Move all `LayoutData`-constructing code to `rustc_abi`: - Infaillible operations get a new `LayoutData` constructor method; - Faillible ones get a new method on `LayoutCalculator`.
This commit is contained in:
commit
bfa1a62fd4
@ -4,6 +4,7 @@ use std::{cmp, iter};
|
||||
|
||||
use rustc_hashes::Hash64;
|
||||
use rustc_index::Idx;
|
||||
use rustc_index::bit_set::BitMatrix;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::{
|
||||
@ -12,6 +13,9 @@ use crate::{
|
||||
Variants, WrappingRange,
|
||||
};
|
||||
|
||||
mod coroutine;
|
||||
mod simple;
|
||||
|
||||
#[cfg(feature = "nightly")]
|
||||
mod ty;
|
||||
|
||||
@ -60,17 +64,28 @@ pub enum LayoutCalculatorError<F> {
|
||||
|
||||
/// The fields or variants have irreconcilable reprs
|
||||
ReprConflict,
|
||||
|
||||
/// The length of an SIMD type is zero
|
||||
ZeroLengthSimdType,
|
||||
|
||||
/// The length of an SIMD type exceeds the maximum number of lanes
|
||||
OversizedSimdType { max_lanes: u64 },
|
||||
|
||||
/// An element type of an SIMD type isn't a primitive
|
||||
NonPrimitiveSimdType(F),
|
||||
}
|
||||
|
||||
impl<F> LayoutCalculatorError<F> {
|
||||
pub fn without_payload(&self) -> LayoutCalculatorError<()> {
|
||||
match self {
|
||||
LayoutCalculatorError::UnexpectedUnsized(_) => {
|
||||
LayoutCalculatorError::UnexpectedUnsized(())
|
||||
}
|
||||
LayoutCalculatorError::SizeOverflow => LayoutCalculatorError::SizeOverflow,
|
||||
LayoutCalculatorError::EmptyUnion => LayoutCalculatorError::EmptyUnion,
|
||||
LayoutCalculatorError::ReprConflict => LayoutCalculatorError::ReprConflict,
|
||||
use LayoutCalculatorError::*;
|
||||
match *self {
|
||||
UnexpectedUnsized(_) => UnexpectedUnsized(()),
|
||||
SizeOverflow => SizeOverflow,
|
||||
EmptyUnion => EmptyUnion,
|
||||
ReprConflict => ReprConflict,
|
||||
ZeroLengthSimdType => ZeroLengthSimdType,
|
||||
OversizedSimdType { max_lanes } => OversizedSimdType { max_lanes },
|
||||
NonPrimitiveSimdType(_) => NonPrimitiveSimdType(()),
|
||||
}
|
||||
}
|
||||
|
||||
@ -78,13 +93,15 @@ impl<F> LayoutCalculatorError<F> {
|
||||
///
|
||||
/// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.
|
||||
pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use LayoutCalculatorError::*;
|
||||
f.write_str(match self {
|
||||
LayoutCalculatorError::UnexpectedUnsized(_) => {
|
||||
"an unsized type was found where a sized type was expected"
|
||||
UnexpectedUnsized(_) => "an unsized type was found where a sized type was expected",
|
||||
SizeOverflow => "size overflow",
|
||||
EmptyUnion => "type is a union with no fields",
|
||||
ReprConflict => "type has an invalid repr",
|
||||
ZeroLengthSimdType | OversizedSimdType { .. } | NonPrimitiveSimdType(_) => {
|
||||
"invalid simd type definition"
|
||||
}
|
||||
LayoutCalculatorError::SizeOverflow => "size overflow",
|
||||
LayoutCalculatorError::EmptyUnion => "type is a union with no fields",
|
||||
LayoutCalculatorError::ReprConflict => "type has an invalid repr",
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -102,41 +119,115 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
Self { cx }
|
||||
}
|
||||
|
||||
pub fn scalar_pair<FieldIdx: Idx, VariantIdx: Idx>(
|
||||
pub fn array_like<FieldIdx: Idx, VariantIdx: Idx, F>(
|
||||
&self,
|
||||
a: Scalar,
|
||||
b: Scalar,
|
||||
) -> LayoutData<FieldIdx, VariantIdx> {
|
||||
let dl = self.cx.data_layout();
|
||||
let b_align = b.align(dl);
|
||||
let align = a.align(dl).max(b_align).max(dl.aggregate_align);
|
||||
let b_offset = a.size(dl).align_to(b_align.abi);
|
||||
let size = (b_offset + b.size(dl)).align_to(align.abi);
|
||||
element: &LayoutData<FieldIdx, VariantIdx>,
|
||||
count_if_sized: Option<u64>, // None for slices
|
||||
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
|
||||
let count = count_if_sized.unwrap_or(0);
|
||||
let size =
|
||||
element.size.checked_mul(count, &self.cx).ok_or(LayoutCalculatorError::SizeOverflow)?;
|
||||
|
||||
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
|
||||
// returns the last maximum.
|
||||
let largest_niche = Niche::from_scalar(dl, b_offset, b)
|
||||
.into_iter()
|
||||
.chain(Niche::from_scalar(dl, Size::ZERO, a))
|
||||
.max_by_key(|niche| niche.available(dl));
|
||||
|
||||
let combined_seed = a.size(&self.cx).bytes().wrapping_add(b.size(&self.cx).bytes());
|
||||
|
||||
LayoutData {
|
||||
Ok(LayoutData {
|
||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||
fields: FieldsShape::Arbitrary {
|
||||
offsets: [Size::ZERO, b_offset].into(),
|
||||
memory_index: [0, 1].into(),
|
||||
},
|
||||
backend_repr: BackendRepr::ScalarPair(a, b),
|
||||
largest_niche,
|
||||
uninhabited: false,
|
||||
align,
|
||||
fields: FieldsShape::Array { stride: element.size, count },
|
||||
backend_repr: BackendRepr::Memory { sized: count_if_sized.is_some() },
|
||||
largest_niche: element.largest_niche.filter(|_| count != 0),
|
||||
uninhabited: element.uninhabited && count != 0,
|
||||
align: element.align,
|
||||
size,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: align.abi,
|
||||
randomization_seed: Hash64::new(combined_seed),
|
||||
unadjusted_abi_align: element.align.abi,
|
||||
randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn simd_type<
|
||||
FieldIdx: Idx,
|
||||
VariantIdx: Idx,
|
||||
F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
|
||||
>(
|
||||
&self,
|
||||
element: F,
|
||||
count: u64,
|
||||
repr_packed: bool,
|
||||
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
|
||||
let elt = element.as_ref();
|
||||
if count == 0 {
|
||||
return Err(LayoutCalculatorError::ZeroLengthSimdType);
|
||||
} else if count > crate::MAX_SIMD_LANES {
|
||||
return Err(LayoutCalculatorError::OversizedSimdType {
|
||||
max_lanes: crate::MAX_SIMD_LANES,
|
||||
});
|
||||
}
|
||||
|
||||
let BackendRepr::Scalar(e_repr) = elt.backend_repr else {
|
||||
return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));
|
||||
};
|
||||
|
||||
// Compute the size and alignment of the vector
|
||||
let dl = self.cx.data_layout();
|
||||
let size =
|
||||
elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;
|
||||
let (repr, align) = if repr_packed && !count.is_power_of_two() {
|
||||
// Non-power-of-two vectors have padding up to the next power-of-two.
|
||||
// If we're a packed repr, remove the padding while keeping the alignment as close
|
||||
// to a vector as possible.
|
||||
(
|
||||
BackendRepr::Memory { sized: true },
|
||||
AbiAndPrefAlign {
|
||||
abi: Align::max_aligned_factor(size),
|
||||
pref: dl.llvmlike_vector_align(size).pref,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
(BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size))
|
||||
};
|
||||
let size = size.align_to(align.abi);
|
||||
|
||||
Ok(LayoutData {
|
||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||
fields: FieldsShape::Arbitrary {
|
||||
offsets: [Size::ZERO].into(),
|
||||
memory_index: [0].into(),
|
||||
},
|
||||
backend_repr: repr,
|
||||
largest_niche: elt.largest_niche,
|
||||
uninhabited: false,
|
||||
size,
|
||||
align,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: elt.align.abi,
|
||||
randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
|
||||
})
|
||||
}
|
||||
|
||||
/// Compute the layout for a coroutine.
|
||||
///
|
||||
/// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine
|
||||
/// fields may be shared between multiple variants (see the [`coroutine`] module for details).
|
||||
pub fn coroutine<
|
||||
'a,
|
||||
F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
|
||||
VariantIdx: Idx,
|
||||
FieldIdx: Idx,
|
||||
LocalIdx: Idx,
|
||||
>(
|
||||
&self,
|
||||
local_layouts: &IndexSlice<LocalIdx, F>,
|
||||
prefix_layouts: IndexVec<FieldIdx, F>,
|
||||
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
|
||||
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
|
||||
tag_to_layout: impl Fn(Scalar) -> F,
|
||||
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
|
||||
coroutine::layout(
|
||||
self,
|
||||
local_layouts,
|
||||
prefix_layouts,
|
||||
variant_fields,
|
||||
storage_conflicts,
|
||||
tag_to_layout,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn univariant<
|
||||
@ -214,25 +305,6 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
layout
|
||||
}
|
||||
|
||||
pub fn layout_of_never_type<FieldIdx: Idx, VariantIdx: Idx>(
|
||||
&self,
|
||||
) -> LayoutData<FieldIdx, VariantIdx> {
|
||||
let dl = self.cx.data_layout();
|
||||
// This is also used for uninhabited enums, so we use `Variants::Empty`.
|
||||
LayoutData {
|
||||
variants: Variants::Empty,
|
||||
fields: FieldsShape::Primitive,
|
||||
backend_repr: BackendRepr::Memory { sized: true },
|
||||
largest_niche: None,
|
||||
uninhabited: true,
|
||||
align: dl.i8_align,
|
||||
size: Size::ZERO,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: dl.i8_align.abi,
|
||||
randomization_seed: Hash64::ZERO,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn layout_of_struct_or_enum<
|
||||
'a,
|
||||
FieldIdx: Idx,
|
||||
@ -260,7 +332,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
Some(present_first) => present_first,
|
||||
// Uninhabited because it has no variants, or only absent ones.
|
||||
None if is_enum => {
|
||||
return Ok(self.layout_of_never_type());
|
||||
return Ok(LayoutData::never_type(&self.cx));
|
||||
}
|
||||
// If it's a struct, still compute a layout so that we can still compute the
|
||||
// field offsets.
|
||||
@ -949,7 +1021,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
// Common prim might be uninit.
|
||||
Scalar::Union { value: prim }
|
||||
};
|
||||
let pair = self.scalar_pair::<FieldIdx, VariantIdx>(tag, prim_scalar);
|
||||
let pair =
|
||||
LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);
|
||||
let pair_offsets = match pair.fields {
|
||||
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
||||
assert_eq!(memory_index.raw, [0, 1]);
|
||||
@ -1341,7 +1414,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||
} else {
|
||||
((j, b), (i, a))
|
||||
};
|
||||
let pair = self.scalar_pair::<FieldIdx, VariantIdx>(a, b);
|
||||
let pair =
|
||||
LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);
|
||||
let pair_offsets = match pair.fields {
|
||||
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
||||
assert_eq!(memory_index.raw, [0, 1]);
|
||||
|
320
compiler/rustc_abi/src/layout/coroutine.rs
Normal file
320
compiler/rustc_abi/src/layout/coroutine.rs
Normal file
@ -0,0 +1,320 @@
|
||||
//! Coroutine layout logic.
|
||||
//!
|
||||
//! When laying out coroutines, we divide our saved local fields into two
|
||||
//! categories: overlap-eligible and overlap-ineligible.
|
||||
//!
|
||||
//! Those fields which are ineligible for overlap go in a "prefix" at the
|
||||
//! beginning of the layout, and always have space reserved for them.
|
||||
//!
|
||||
//! Overlap-eligible fields are only assigned to one variant, so we lay
|
||||
//! those fields out for each variant and put them right after the
|
||||
//! prefix.
|
||||
//!
|
||||
//! Finally, in the layout details, we point to the fields from the
|
||||
//! variants they are assigned to. It is possible for some fields to be
|
||||
//! included in multiple variants. No field ever "moves around" in the
|
||||
//! layout; its offset is always the same.
|
||||
//!
|
||||
//! Also included in the layout are the upvars and the discriminant.
|
||||
//! These are included as fields on the "outer" layout; they are not part
|
||||
//! of any variant.
|
||||
|
||||
use std::iter;
|
||||
|
||||
use rustc_index::bit_set::{BitMatrix, DenseBitSet};
|
||||
use rustc_index::{Idx, IndexSlice, IndexVec};
|
||||
use tracing::{debug, trace};
|
||||
|
||||
use crate::{
|
||||
BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutData, Primitive, ReprOptions, Scalar,
|
||||
StructKind, TagEncoding, Variants, WrappingRange,
|
||||
};
|
||||
|
||||
/// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
enum SavedLocalEligibility<VariantIdx, FieldIdx> {
|
||||
Unassigned,
|
||||
Assigned(VariantIdx),
|
||||
Ineligible(Option<FieldIdx>),
|
||||
}
|
||||
|
||||
/// Compute the eligibility and assignment of each local.
|
||||
fn coroutine_saved_local_eligibility<VariantIdx: Idx, FieldIdx: Idx, LocalIdx: Idx>(
|
||||
nb_locals: usize,
|
||||
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
|
||||
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
|
||||
) -> (DenseBitSet<LocalIdx>, IndexVec<LocalIdx, SavedLocalEligibility<VariantIdx, FieldIdx>>) {
|
||||
use SavedLocalEligibility::*;
|
||||
|
||||
let mut assignments: IndexVec<LocalIdx, _> = IndexVec::from_elem_n(Unassigned, nb_locals);
|
||||
|
||||
// The saved locals not eligible for overlap. These will get
|
||||
// "promoted" to the prefix of our coroutine.
|
||||
let mut ineligible_locals = DenseBitSet::new_empty(nb_locals);
|
||||
|
||||
// Figure out which of our saved locals are fields in only
|
||||
// one variant. The rest are deemed ineligible for overlap.
|
||||
for (variant_index, fields) in variant_fields.iter_enumerated() {
|
||||
for local in fields {
|
||||
match assignments[*local] {
|
||||
Unassigned => {
|
||||
assignments[*local] = Assigned(variant_index);
|
||||
}
|
||||
Assigned(idx) => {
|
||||
// We've already seen this local at another suspension
|
||||
// point, so it is no longer a candidate.
|
||||
trace!(
|
||||
"removing local {:?} in >1 variant ({:?}, {:?})",
|
||||
local, variant_index, idx
|
||||
);
|
||||
ineligible_locals.insert(*local);
|
||||
assignments[*local] = Ineligible(None);
|
||||
}
|
||||
Ineligible(_) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next, check every pair of eligible locals to see if they
|
||||
// conflict.
|
||||
for local_a in storage_conflicts.rows() {
|
||||
let conflicts_a = storage_conflicts.count(local_a);
|
||||
if ineligible_locals.contains(local_a) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for local_b in storage_conflicts.iter(local_a) {
|
||||
// local_a and local_b are storage live at the same time, therefore they
|
||||
// cannot overlap in the coroutine layout. The only way to guarantee
|
||||
// this is if they are in the same variant, or one is ineligible
|
||||
// (which means it is stored in every variant).
|
||||
if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If they conflict, we will choose one to make ineligible.
|
||||
// This is not always optimal; it's just a greedy heuristic that
|
||||
// seems to produce good results most of the time.
|
||||
let conflicts_b = storage_conflicts.count(local_b);
|
||||
let (remove, other) =
|
||||
if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
|
||||
ineligible_locals.insert(remove);
|
||||
assignments[remove] = Ineligible(None);
|
||||
trace!("removing local {:?} due to conflict with {:?}", remove, other);
|
||||
}
|
||||
}
|
||||
|
||||
// Count the number of variants in use. If only one of them, then it is
|
||||
// impossible to overlap any locals in our layout. In this case it's
|
||||
// always better to make the remaining locals ineligible, so we can
|
||||
// lay them out with the other locals in the prefix and eliminate
|
||||
// unnecessary padding bytes.
|
||||
{
|
||||
let mut used_variants = DenseBitSet::new_empty(variant_fields.len());
|
||||
for assignment in &assignments {
|
||||
if let Assigned(idx) = assignment {
|
||||
used_variants.insert(*idx);
|
||||
}
|
||||
}
|
||||
if used_variants.count() < 2 {
|
||||
for assignment in assignments.iter_mut() {
|
||||
*assignment = Ineligible(None);
|
||||
}
|
||||
ineligible_locals.insert_all();
|
||||
}
|
||||
}
|
||||
|
||||
// Write down the order of our locals that will be promoted to the prefix.
|
||||
{
|
||||
for (idx, local) in ineligible_locals.iter().enumerate() {
|
||||
assignments[local] = Ineligible(Some(FieldIdx::new(idx)));
|
||||
}
|
||||
}
|
||||
debug!("coroutine saved local assignments: {:?}", assignments);
|
||||
|
||||
(ineligible_locals, assignments)
|
||||
}
|
||||
|
||||
/// Compute the full coroutine layout.
|
||||
pub(super) fn layout<
|
||||
'a,
|
||||
F: core::ops::Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + core::fmt::Debug + Copy,
|
||||
VariantIdx: Idx,
|
||||
FieldIdx: Idx,
|
||||
LocalIdx: Idx,
|
||||
>(
|
||||
calc: &super::LayoutCalculator<impl HasDataLayout>,
|
||||
local_layouts: &IndexSlice<LocalIdx, F>,
|
||||
mut prefix_layouts: IndexVec<FieldIdx, F>,
|
||||
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
|
||||
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
|
||||
tag_to_layout: impl Fn(Scalar) -> F,
|
||||
) -> super::LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
|
||||
use SavedLocalEligibility::*;
|
||||
|
||||
let (ineligible_locals, assignments) =
|
||||
coroutine_saved_local_eligibility(local_layouts.len(), variant_fields, storage_conflicts);
|
||||
|
||||
// Build a prefix layout, including "promoting" all ineligible
|
||||
// locals as part of the prefix. We compute the layout of all of
|
||||
// these fields at once to get optimal packing.
|
||||
let tag_index = prefix_layouts.len();
|
||||
|
||||
// `variant_fields` already accounts for the reserved variants, so no need to add them.
|
||||
let max_discr = (variant_fields.len() - 1) as u128;
|
||||
let discr_int = Integer::fit_unsigned(max_discr);
|
||||
let tag = Scalar::Initialized {
|
||||
value: Primitive::Int(discr_int, /* signed = */ false),
|
||||
valid_range: WrappingRange { start: 0, end: max_discr },
|
||||
};
|
||||
|
||||
let promoted_layouts = ineligible_locals.iter().map(|local| local_layouts[local]);
|
||||
prefix_layouts.push(tag_to_layout(tag));
|
||||
prefix_layouts.extend(promoted_layouts);
|
||||
let prefix =
|
||||
calc.univariant(&prefix_layouts, &ReprOptions::default(), StructKind::AlwaysSized)?;
|
||||
|
||||
let (prefix_size, prefix_align) = (prefix.size, prefix.align);
|
||||
|
||||
// Split the prefix layout into the "outer" fields (upvars and
|
||||
// discriminant) and the "promoted" fields. Promoted fields will
|
||||
// get included in each variant that requested them in
|
||||
// CoroutineLayout.
|
||||
debug!("prefix = {:#?}", prefix);
|
||||
let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
|
||||
FieldsShape::Arbitrary { mut offsets, memory_index } => {
|
||||
let mut inverse_memory_index = memory_index.invert_bijective_mapping();
|
||||
|
||||
// "a" (`0..b_start`) and "b" (`b_start..`) correspond to
|
||||
// "outer" and "promoted" fields respectively.
|
||||
let b_start = FieldIdx::new(tag_index + 1);
|
||||
let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
|
||||
let offsets_a = offsets;
|
||||
|
||||
// Disentangle the "a" and "b" components of `inverse_memory_index`
|
||||
// by preserving the order but keeping only one disjoint "half" each.
|
||||
// FIXME(eddyb) build a better abstraction for permutations, if possible.
|
||||
let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
|
||||
.iter()
|
||||
.filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
|
||||
.collect();
|
||||
inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
|
||||
let inverse_memory_index_a = inverse_memory_index;
|
||||
|
||||
// Since `inverse_memory_index_{a,b}` each only refer to their
|
||||
// respective fields, they can be safely inverted
|
||||
let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
|
||||
let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
|
||||
|
||||
let outer_fields =
|
||||
FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
|
||||
(outer_fields, offsets_b, memory_index_b)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let mut size = prefix.size;
|
||||
let mut align = prefix.align;
|
||||
let variants = variant_fields
|
||||
.iter_enumerated()
|
||||
.map(|(index, variant_fields)| {
|
||||
// Only include overlap-eligible fields when we compute our variant layout.
|
||||
let variant_only_tys = variant_fields
|
||||
.iter()
|
||||
.filter(|local| match assignments[**local] {
|
||||
Unassigned => unreachable!(),
|
||||
Assigned(v) if v == index => true,
|
||||
Assigned(_) => unreachable!("assignment does not match variant"),
|
||||
Ineligible(_) => false,
|
||||
})
|
||||
.map(|local| local_layouts[*local]);
|
||||
|
||||
let mut variant = calc.univariant(
|
||||
&variant_only_tys.collect::<IndexVec<_, _>>(),
|
||||
&ReprOptions::default(),
|
||||
StructKind::Prefixed(prefix_size, prefix_align.abi),
|
||||
)?;
|
||||
variant.variants = Variants::Single { index };
|
||||
|
||||
let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
// Now, stitch the promoted and variant-only fields back together in
|
||||
// the order they are mentioned by our CoroutineLayout.
|
||||
// Because we only use some subset (that can differ between variants)
|
||||
// of the promoted fields, we can't just pick those elements of the
|
||||
// `promoted_memory_index` (as we'd end up with gaps).
|
||||
// So instead, we build an "inverse memory_index", as if all of the
|
||||
// promoted fields were being used, but leave the elements not in the
|
||||
// subset as `invalid_field_idx`, which we can filter out later to
|
||||
// obtain a valid (bijective) mapping.
|
||||
let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
|
||||
let mut combined_inverse_memory_index =
|
||||
IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
|
||||
|
||||
let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
|
||||
let combined_offsets = variant_fields
|
||||
.iter_enumerated()
|
||||
.map(|(i, local)| {
|
||||
let (offset, memory_index) = match assignments[*local] {
|
||||
Unassigned => unreachable!(),
|
||||
Assigned(_) => {
|
||||
let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
|
||||
(offset, promoted_memory_index.len() as u32 + memory_index)
|
||||
}
|
||||
Ineligible(field_idx) => {
|
||||
let field_idx = field_idx.unwrap();
|
||||
(promoted_offsets[field_idx], promoted_memory_index[field_idx])
|
||||
}
|
||||
};
|
||||
combined_inverse_memory_index[memory_index] = i;
|
||||
offset
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Remove the unused slots and invert the mapping to obtain the
|
||||
// combined `memory_index` (also see previous comment).
|
||||
combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
|
||||
let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
|
||||
|
||||
variant.fields = FieldsShape::Arbitrary {
|
||||
offsets: combined_offsets,
|
||||
memory_index: combined_memory_index,
|
||||
};
|
||||
|
||||
size = size.max(variant.size);
|
||||
align = align.max(variant.align);
|
||||
Ok(variant)
|
||||
})
|
||||
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
|
||||
|
||||
size = size.align_to(align.abi);
|
||||
|
||||
let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited());
|
||||
let abi = BackendRepr::Memory { sized: true };
|
||||
|
||||
Ok(LayoutData {
|
||||
variants: Variants::Multiple {
|
||||
tag,
|
||||
tag_encoding: TagEncoding::Direct,
|
||||
tag_field: tag_index,
|
||||
variants,
|
||||
},
|
||||
fields: outer_fields,
|
||||
backend_repr: abi,
|
||||
// Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
|
||||
// self-referentiality), getting the discriminant can cause aliasing violations.
|
||||
// `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
|
||||
// would do the same for us here.
|
||||
// See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>.
|
||||
// FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`.
|
||||
largest_niche: None,
|
||||
uninhabited,
|
||||
size,
|
||||
align,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: align.abi,
|
||||
randomization_seed: Default::default(),
|
||||
})
|
||||
}
|
148
compiler/rustc_abi/src/layout/simple.rs
Normal file
148
compiler/rustc_abi/src/layout/simple.rs
Normal file
@ -0,0 +1,148 @@
|
||||
use std::num::NonZero;
|
||||
|
||||
use rustc_hashes::Hash64;
|
||||
use rustc_index::{Idx, IndexVec};
|
||||
|
||||
use crate::{
|
||||
BackendRepr, FieldsShape, HasDataLayout, LayoutData, Niche, Primitive, Scalar, Size, Variants,
|
||||
};
|
||||
|
||||
/// "Simple" layout constructors that cannot fail.
|
||||
impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||
pub fn unit<C: HasDataLayout>(cx: &C, sized: bool) -> Self {
|
||||
let dl = cx.data_layout();
|
||||
LayoutData {
|
||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||
fields: FieldsShape::Arbitrary {
|
||||
offsets: IndexVec::new(),
|
||||
memory_index: IndexVec::new(),
|
||||
},
|
||||
backend_repr: BackendRepr::Memory { sized },
|
||||
largest_niche: None,
|
||||
uninhabited: false,
|
||||
align: dl.i8_align,
|
||||
size: Size::ZERO,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: dl.i8_align.abi,
|
||||
randomization_seed: Hash64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn never_type<C: HasDataLayout>(cx: &C) -> Self {
|
||||
let dl = cx.data_layout();
|
||||
// This is also used for uninhabited enums, so we use `Variants::Empty`.
|
||||
LayoutData {
|
||||
variants: Variants::Empty,
|
||||
fields: FieldsShape::Primitive,
|
||||
backend_repr: BackendRepr::Memory { sized: true },
|
||||
largest_niche: None,
|
||||
uninhabited: true,
|
||||
align: dl.i8_align,
|
||||
size: Size::ZERO,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: dl.i8_align.abi,
|
||||
randomization_seed: Hash64::ZERO,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
|
||||
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
|
||||
let size = scalar.size(cx);
|
||||
let align = scalar.align(cx);
|
||||
|
||||
let range = scalar.valid_range(cx);
|
||||
|
||||
// All primitive types for which we don't have subtype coercions should get a distinct seed,
|
||||
// so that types wrapping them can use randomization to arrive at distinct layouts.
|
||||
//
|
||||
// Some type information is already lost at this point, so as an approximation we derive
|
||||
// the seed from what remains. For example on 64-bit targets usize and u64 can no longer
|
||||
// be distinguished.
|
||||
let randomization_seed = size
|
||||
.bytes()
|
||||
.wrapping_add(
|
||||
match scalar.primitive() {
|
||||
Primitive::Int(_, true) => 1,
|
||||
Primitive::Int(_, false) => 2,
|
||||
Primitive::Float(_) => 3,
|
||||
Primitive::Pointer(_) => 4,
|
||||
} << 32,
|
||||
)
|
||||
// distinguishes references from pointers
|
||||
.wrapping_add((range.start as u64).rotate_right(16))
|
||||
// distinguishes char from u32 and bool from u8
|
||||
.wrapping_add((range.end as u64).rotate_right(16));
|
||||
|
||||
LayoutData {
|
||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||
fields: FieldsShape::Primitive,
|
||||
backend_repr: BackendRepr::Scalar(scalar),
|
||||
largest_niche,
|
||||
uninhabited: false,
|
||||
size,
|
||||
align,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: align.abi,
|
||||
randomization_seed: Hash64::new(randomization_seed),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn scalar_pair<C: HasDataLayout>(cx: &C, a: Scalar, b: Scalar) -> Self {
|
||||
let dl = cx.data_layout();
|
||||
let b_align = b.align(dl);
|
||||
let align = a.align(dl).max(b_align).max(dl.aggregate_align);
|
||||
let b_offset = a.size(dl).align_to(b_align.abi);
|
||||
let size = (b_offset + b.size(dl)).align_to(align.abi);
|
||||
|
||||
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
|
||||
// returns the last maximum.
|
||||
let largest_niche = Niche::from_scalar(dl, b_offset, b)
|
||||
.into_iter()
|
||||
.chain(Niche::from_scalar(dl, Size::ZERO, a))
|
||||
.max_by_key(|niche| niche.available(dl));
|
||||
|
||||
let combined_seed = a.size(dl).bytes().wrapping_add(b.size(dl).bytes());
|
||||
|
||||
LayoutData {
|
||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||
fields: FieldsShape::Arbitrary {
|
||||
offsets: [Size::ZERO, b_offset].into(),
|
||||
memory_index: [0, 1].into(),
|
||||
},
|
||||
backend_repr: BackendRepr::ScalarPair(a, b),
|
||||
largest_niche,
|
||||
uninhabited: false,
|
||||
align,
|
||||
size,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: align.abi,
|
||||
randomization_seed: Hash64::new(combined_seed),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a dummy layout for an uninhabited variant.
|
||||
///
|
||||
/// Uninhabited variants get pruned as part of the layout calculation,
|
||||
/// so this can be used after the fact to reconstitute a layout.
|
||||
pub fn uninhabited_variant<C: HasDataLayout>(cx: &C, index: VariantIdx, fields: usize) -> Self {
|
||||
let dl = cx.data_layout();
|
||||
LayoutData {
|
||||
variants: Variants::Single { index },
|
||||
fields: match NonZero::new(fields) {
|
||||
Some(fields) => FieldsShape::Union(fields),
|
||||
None => FieldsShape::Arbitrary {
|
||||
offsets: IndexVec::new(),
|
||||
memory_index: IndexVec::new(),
|
||||
},
|
||||
},
|
||||
backend_repr: BackendRepr::Memory { sized: true },
|
||||
largest_niche: None,
|
||||
uninhabited: true,
|
||||
align: dl.i8_align,
|
||||
size: Size::ZERO,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: dl.i8_align.abi,
|
||||
randomization_seed: Hash64::ZERO,
|
||||
}
|
||||
}
|
||||
}
|
@ -150,6 +150,12 @@ impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, Ty> AsRef<LayoutData<FieldIdx, VariantIdx>> for TyAndLayout<'a, Ty> {
|
||||
fn as_ref(&self) -> &LayoutData<FieldIdx, VariantIdx> {
|
||||
&*self.layout.0.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait that needs to be implemented by the higher-level type representation
|
||||
/// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
|
||||
pub trait TyAbiInterface<'a, C>: Sized + std::fmt::Debug {
|
||||
|
@ -204,6 +204,13 @@ impl ReprOptions {
|
||||
}
|
||||
}
|
||||
|
||||
/// The maximum supported number of lanes in a SIMD vector.
|
||||
///
|
||||
/// This value is selected based on backend support:
|
||||
/// * LLVM does not appear to have a vector width limit.
|
||||
/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
|
||||
pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
|
||||
|
||||
/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
|
||||
/// for a target, which contains everything needed to compute layouts.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
@ -1743,48 +1750,6 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||
pub fn is_uninhabited(&self) -> bool {
|
||||
self.uninhabited
|
||||
}
|
||||
|
||||
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
|
||||
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
|
||||
let size = scalar.size(cx);
|
||||
let align = scalar.align(cx);
|
||||
|
||||
let range = scalar.valid_range(cx);
|
||||
|
||||
// All primitive types for which we don't have subtype coercions should get a distinct seed,
|
||||
// so that types wrapping them can use randomization to arrive at distinct layouts.
|
||||
//
|
||||
// Some type information is already lost at this point, so as an approximation we derive
|
||||
// the seed from what remains. For example on 64-bit targets usize and u64 can no longer
|
||||
// be distinguished.
|
||||
let randomization_seed = size
|
||||
.bytes()
|
||||
.wrapping_add(
|
||||
match scalar.primitive() {
|
||||
Primitive::Int(_, true) => 1,
|
||||
Primitive::Int(_, false) => 2,
|
||||
Primitive::Float(_) => 3,
|
||||
Primitive::Pointer(_) => 4,
|
||||
} << 32,
|
||||
)
|
||||
// distinguishes references from pointers
|
||||
.wrapping_add((range.start as u64).rotate_right(16))
|
||||
// distinguishes char from u32 and bool from u8
|
||||
.wrapping_add((range.end as u64).rotate_right(16));
|
||||
|
||||
LayoutData {
|
||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||
fields: FieldsShape::Primitive,
|
||||
backend_repr: BackendRepr::Scalar(scalar),
|
||||
largest_niche,
|
||||
uninhabited: false,
|
||||
size,
|
||||
align,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: align.abi,
|
||||
randomization_seed: Hash64::new(randomization_seed),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
|
||||
|
@ -1,20 +1,17 @@
|
||||
use std::num::NonZero;
|
||||
use std::ops::Bound;
|
||||
use std::{cmp, fmt};
|
||||
|
||||
use rustc_abi::{
|
||||
AddressSpace, Align, BackendRepr, ExternAbi, FieldIdx, FieldsShape, HasDataLayout, LayoutData,
|
||||
PointeeInfo, PointerKind, Primitive, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout,
|
||||
AddressSpace, Align, ExternAbi, FieldIdx, FieldsShape, HasDataLayout, LayoutData, PointeeInfo,
|
||||
PointerKind, Primitive, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout,
|
||||
TyAbiInterface, VariantIdx, Variants,
|
||||
};
|
||||
use rustc_error_messages::DiagMessage;
|
||||
use rustc_errors::{
|
||||
Diag, DiagArgValue, DiagCtxtHandle, Diagnostic, EmissionGuarantee, IntoDiagArg, Level,
|
||||
};
|
||||
use rustc_hashes::Hash64;
|
||||
use rustc_hir::LangItem;
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_index::IndexVec;
|
||||
use rustc_macros::{HashStable, TyDecodable, TyEncodable, extension};
|
||||
use rustc_session::config::OptLevel;
|
||||
use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, Symbol, sym};
|
||||
@ -185,12 +182,7 @@ pub const WIDE_PTR_ADDR: usize = 0;
|
||||
/// - For a slice, this is the length.
|
||||
pub const WIDE_PTR_EXTRA: usize = 1;
|
||||
|
||||
/// The maximum supported number of lanes in a SIMD vector.
|
||||
///
|
||||
/// This value is selected based on backend support:
|
||||
/// * LLVM does not appear to have a vector width limit.
|
||||
/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
|
||||
pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
|
||||
pub const MAX_SIMD_LANES: u64 = rustc_abi::MAX_SIMD_LANES;
|
||||
|
||||
/// Used in `check_validity_requirement` to indicate the kind of initialization
|
||||
/// that is checked to be valid
|
||||
@ -762,11 +754,9 @@ where
|
||||
variant_index: VariantIdx,
|
||||
) -> TyAndLayout<'tcx> {
|
||||
let layout = match this.variants {
|
||||
Variants::Single { index }
|
||||
// If all variants but one are uninhabited, the variant layout is the enum layout.
|
||||
if index == variant_index =>
|
||||
{
|
||||
this.layout
|
||||
// If all variants but one are uninhabited, the variant layout is the enum layout.
|
||||
Variants::Single { index } if index == variant_index => {
|
||||
return this;
|
||||
}
|
||||
|
||||
Variants::Single { .. } | Variants::Empty => {
|
||||
@ -783,29 +773,18 @@ where
|
||||
}
|
||||
|
||||
let fields = match this.ty.kind() {
|
||||
ty::Adt(def, _) if def.variants().is_empty() =>
|
||||
bug!("for_variant called on zero-variant enum {}", this.ty),
|
||||
ty::Adt(def, _) if def.variants().is_empty() => {
|
||||
bug!("for_variant called on zero-variant enum {}", this.ty)
|
||||
}
|
||||
ty::Adt(def, _) => def.variant(variant_index).fields.len(),
|
||||
_ => bug!("`ty_and_layout_for_variant` on unexpected type {}", this.ty),
|
||||
};
|
||||
tcx.mk_layout(LayoutData {
|
||||
variants: Variants::Single { index: variant_index },
|
||||
fields: match NonZero::new(fields) {
|
||||
Some(fields) => FieldsShape::Union(fields),
|
||||
None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
|
||||
},
|
||||
backend_repr: BackendRepr::Memory { sized: true },
|
||||
largest_niche: None,
|
||||
uninhabited: true,
|
||||
align: tcx.data_layout.i8_align,
|
||||
size: Size::ZERO,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: tcx.data_layout.i8_align.abi,
|
||||
randomization_seed: Hash64::ZERO,
|
||||
})
|
||||
tcx.mk_layout(LayoutData::uninhabited_variant(cx, variant_index, fields))
|
||||
}
|
||||
|
||||
Variants::Multiple { ref variants, .. } => cx.tcx().mk_layout(variants[variant_index].clone()),
|
||||
Variants::Multiple { ref variants, .. } => {
|
||||
cx.tcx().mk_layout(variants[variant_index].clone())
|
||||
}
|
||||
};
|
||||
|
||||
assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
|
||||
|
@ -1,31 +1,25 @@
|
||||
use std::fmt::Debug;
|
||||
use std::iter;
|
||||
|
||||
use hir::def_id::DefId;
|
||||
use rustc_abi::Integer::{I8, I32};
|
||||
use rustc_abi::Primitive::{self, Float, Int, Pointer};
|
||||
use rustc_abi::{
|
||||
AbiAndPrefAlign, AddressSpace, Align, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape,
|
||||
HasDataLayout, Layout, LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size,
|
||||
StructKind, TagEncoding, VariantIdx, Variants, WrappingRange,
|
||||
AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Layout,
|
||||
LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding,
|
||||
VariantIdx, Variants, WrappingRange,
|
||||
};
|
||||
use rustc_hashes::Hash64;
|
||||
use rustc_index::bit_set::DenseBitSet;
|
||||
use rustc_index::{IndexSlice, IndexVec};
|
||||
use rustc_index::IndexVec;
|
||||
use rustc_middle::bug;
|
||||
use rustc_middle::mir::{CoroutineLayout, CoroutineSavedLocal};
|
||||
use rustc_middle::query::Providers;
|
||||
use rustc_middle::ty::layout::{
|
||||
FloatExt, HasTyCtxt, IntegerExt, LayoutCx, LayoutError, LayoutOf, MAX_SIMD_LANES, TyAndLayout,
|
||||
FloatExt, HasTyCtxt, IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
|
||||
};
|
||||
use rustc_middle::ty::print::with_no_trimmed_paths;
|
||||
use rustc_middle::ty::{
|
||||
self, AdtDef, CoroutineArgsExt, EarlyBinder, GenericArgsRef, PseudoCanonicalInput, Ty, TyCtxt,
|
||||
TypeVisitableExt,
|
||||
self, AdtDef, CoroutineArgsExt, EarlyBinder, PseudoCanonicalInput, Ty, TyCtxt, TypeVisitableExt,
|
||||
};
|
||||
use rustc_session::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo};
|
||||
use rustc_span::{Symbol, sym};
|
||||
use tracing::{debug, instrument, trace};
|
||||
use tracing::{debug, instrument};
|
||||
use {rustc_abi as abi, rustc_hir as hir};
|
||||
|
||||
use crate::errors::{NonPrimitiveSimdType, OversizedSimdType, ZeroLengthSimdType};
|
||||
@ -124,20 +118,23 @@ fn map_error<'tcx>(
|
||||
.delayed_bug(format!("computed impossible repr (packed enum?): {ty:?}"));
|
||||
LayoutError::ReferencesError(guar)
|
||||
}
|
||||
LayoutCalculatorError::ZeroLengthSimdType => {
|
||||
// Can't be caught in typeck if the array length is generic.
|
||||
cx.tcx().dcx().emit_fatal(ZeroLengthSimdType { ty })
|
||||
}
|
||||
LayoutCalculatorError::OversizedSimdType { max_lanes } => {
|
||||
// Can't be caught in typeck if the array length is generic.
|
||||
cx.tcx().dcx().emit_fatal(OversizedSimdType { ty, max_lanes })
|
||||
}
|
||||
LayoutCalculatorError::NonPrimitiveSimdType(field) => {
|
||||
// This error isn't caught in typeck, e.g., if
|
||||
// the element type of the vector is generic.
|
||||
cx.tcx().dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty: field.ty })
|
||||
}
|
||||
};
|
||||
error(cx, err)
|
||||
}
|
||||
|
||||
fn univariant_uninterned<'tcx>(
|
||||
cx: &LayoutCx<'tcx>,
|
||||
ty: Ty<'tcx>,
|
||||
fields: &IndexSlice<FieldIdx, TyAndLayout<'tcx>>,
|
||||
kind: StructKind,
|
||||
) -> Result<LayoutData<FieldIdx, VariantIdx>, &'tcx LayoutError<'tcx>> {
|
||||
let repr = ReprOptions::default();
|
||||
cx.calc.univariant(fields, &repr, kind).map_err(|err| map_error(cx, ty, err))
|
||||
}
|
||||
|
||||
fn extract_const_value<'tcx>(
|
||||
cx: &LayoutCx<'tcx>,
|
||||
ty: Ty<'tcx>,
|
||||
@ -188,6 +185,10 @@ fn layout_of_uncached<'tcx>(
|
||||
|
||||
let tcx = cx.tcx();
|
||||
let dl = cx.data_layout();
|
||||
let map_layout = |result: Result<_, _>| match result {
|
||||
Ok(layout) => Ok(tcx.mk_layout(layout)),
|
||||
Err(err) => Err(map_error(cx, ty, err)),
|
||||
};
|
||||
let scalar_unit = |value: Primitive| {
|
||||
let size = value.size(dl);
|
||||
assert!(size.bits() <= 128);
|
||||
@ -195,8 +196,10 @@ fn layout_of_uncached<'tcx>(
|
||||
};
|
||||
let scalar = |value: Primitive| tcx.mk_layout(LayoutData::scalar(cx, scalar_unit(value)));
|
||||
|
||||
let univariant = |fields: &IndexSlice<FieldIdx, TyAndLayout<'tcx>>, kind| {
|
||||
Ok(tcx.mk_layout(univariant_uninterned(cx, ty, fields, kind)?))
|
||||
let univariant = |tys: &[Ty<'tcx>], kind| {
|
||||
let fields = tys.iter().map(|ty| cx.layout_of(*ty)).try_collect::<IndexVec<_, _>>()?;
|
||||
let repr = ReprOptions::default();
|
||||
map_layout(cx.calc.univariant(&fields, &repr, kind))
|
||||
};
|
||||
debug_assert!(!ty.has_non_region_infer());
|
||||
|
||||
@ -258,7 +261,7 @@ fn layout_of_uncached<'tcx>(
|
||||
}
|
||||
|
||||
// The never type.
|
||||
ty::Never => tcx.mk_layout(cx.calc.layout_of_never_type()),
|
||||
ty::Never => tcx.mk_layout(LayoutData::never_type(cx)),
|
||||
|
||||
// Potentially-wide pointers.
|
||||
ty::Ref(_, pointee, _) | ty::RawPtr(pointee, _) => {
|
||||
@ -329,7 +332,7 @@ fn layout_of_uncached<'tcx>(
|
||||
};
|
||||
|
||||
// Effectively a (ptr, meta) tuple.
|
||||
tcx.mk_layout(cx.calc.scalar_pair(data_ptr, metadata))
|
||||
tcx.mk_layout(LayoutData::scalar_pair(cx, data_ptr, metadata))
|
||||
}
|
||||
|
||||
ty::Dynamic(_, _, ty::DynStar) => {
|
||||
@ -337,7 +340,7 @@ fn layout_of_uncached<'tcx>(
|
||||
data.valid_range_mut().start = 0;
|
||||
let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
|
||||
vtable.valid_range_mut().start = 1;
|
||||
tcx.mk_layout(cx.calc.scalar_pair(data, vtable))
|
||||
tcx.mk_layout(LayoutData::scalar_pair(cx, data, vtable))
|
||||
}
|
||||
|
||||
// Arrays and slices.
|
||||
@ -347,96 +350,87 @@ fn layout_of_uncached<'tcx>(
|
||||
.ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
|
||||
|
||||
let element = cx.layout_of(element)?;
|
||||
let size = element
|
||||
.size
|
||||
.checked_mul(count, dl)
|
||||
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
|
||||
|
||||
let abi = BackendRepr::Memory { sized: true };
|
||||
|
||||
let largest_niche = if count != 0 { element.largest_niche } else { None };
|
||||
let uninhabited = if count != 0 { element.uninhabited } else { false };
|
||||
|
||||
tcx.mk_layout(LayoutData {
|
||||
variants: Variants::Single { index: FIRST_VARIANT },
|
||||
fields: FieldsShape::Array { stride: element.size, count },
|
||||
backend_repr: abi,
|
||||
largest_niche,
|
||||
uninhabited,
|
||||
align: element.align,
|
||||
size,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: element.align.abi,
|
||||
randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),
|
||||
})
|
||||
map_layout(cx.calc.array_like(&element, Some(count)))?
|
||||
}
|
||||
ty::Slice(element) => {
|
||||
let element = cx.layout_of(element)?;
|
||||
tcx.mk_layout(LayoutData {
|
||||
variants: Variants::Single { index: FIRST_VARIANT },
|
||||
fields: FieldsShape::Array { stride: element.size, count: 0 },
|
||||
backend_repr: BackendRepr::Memory { sized: false },
|
||||
largest_niche: None,
|
||||
uninhabited: false,
|
||||
align: element.align,
|
||||
size: Size::ZERO,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: element.align.abi,
|
||||
// adding a randomly chosen value to distinguish slices
|
||||
randomization_seed: element
|
||||
.randomization_seed
|
||||
.wrapping_add(Hash64::new(0x2dcba99c39784102)),
|
||||
})
|
||||
map_layout(cx.calc.array_like(&element, None).map(|mut layout| {
|
||||
// a randomly chosen value to distinguish slices
|
||||
layout.randomization_seed = Hash64::new(0x2dcba99c39784102);
|
||||
layout
|
||||
}))?
|
||||
}
|
||||
ty::Str => {
|
||||
let element = scalar(Int(I8, false));
|
||||
map_layout(cx.calc.array_like(&element, None).map(|mut layout| {
|
||||
// another random value
|
||||
layout.randomization_seed = Hash64::new(0xc1325f37d127be22);
|
||||
layout
|
||||
}))?
|
||||
}
|
||||
ty::Str => tcx.mk_layout(LayoutData {
|
||||
variants: Variants::Single { index: FIRST_VARIANT },
|
||||
fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
|
||||
backend_repr: BackendRepr::Memory { sized: false },
|
||||
largest_niche: None,
|
||||
uninhabited: false,
|
||||
align: dl.i8_align,
|
||||
size: Size::ZERO,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: dl.i8_align.abi,
|
||||
// another random value
|
||||
randomization_seed: Hash64::new(0xc1325f37d127be22),
|
||||
}),
|
||||
|
||||
// Odd unit types.
|
||||
ty::FnDef(..) => univariant(IndexSlice::empty(), StructKind::AlwaysSized)?,
|
||||
ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
|
||||
let mut unit =
|
||||
univariant_uninterned(cx, ty, IndexSlice::empty(), StructKind::AlwaysSized)?;
|
||||
match unit.backend_repr {
|
||||
BackendRepr::Memory { ref mut sized } => *sized = false,
|
||||
_ => bug!(),
|
||||
}
|
||||
tcx.mk_layout(unit)
|
||||
ty::FnDef(..) | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
|
||||
let sized = matches!(ty.kind(), ty::FnDef(..));
|
||||
tcx.mk_layout(LayoutData::unit(cx, sized))
|
||||
}
|
||||
|
||||
ty::Coroutine(def_id, args) => coroutine_layout(cx, ty, def_id, args)?,
|
||||
ty::Coroutine(def_id, args) => {
|
||||
use rustc_middle::ty::layout::PrimitiveExt as _;
|
||||
|
||||
ty::Closure(_, args) => {
|
||||
let tys = args.as_closure().upvar_tys();
|
||||
univariant(
|
||||
&tys.iter().map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
|
||||
StructKind::AlwaysSized,
|
||||
)?
|
||||
let Some(info) = tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) else {
|
||||
return Err(error(cx, LayoutError::Unknown(ty)));
|
||||
};
|
||||
|
||||
let local_layouts = info
|
||||
.field_tys
|
||||
.iter()
|
||||
.map(|local| {
|
||||
let field_ty = EarlyBinder::bind(local.ty);
|
||||
let uninit_ty = Ty::new_maybe_uninit(tcx, field_ty.instantiate(tcx, args));
|
||||
cx.spanned_layout_of(uninit_ty, local.source_info.span)
|
||||
})
|
||||
.try_collect::<IndexVec<_, _>>()?;
|
||||
|
||||
let prefix_layouts = args
|
||||
.as_coroutine()
|
||||
.prefix_tys()
|
||||
.iter()
|
||||
.map(|ty| cx.layout_of(ty))
|
||||
.try_collect::<IndexVec<_, _>>()?;
|
||||
|
||||
let layout = cx
|
||||
.calc
|
||||
.coroutine(
|
||||
&local_layouts,
|
||||
prefix_layouts,
|
||||
&info.variant_fields,
|
||||
&info.storage_conflicts,
|
||||
|tag| TyAndLayout {
|
||||
ty: tag.primitive().to_ty(tcx),
|
||||
layout: tcx.mk_layout(LayoutData::scalar(cx, tag)),
|
||||
},
|
||||
)
|
||||
.map(|mut layout| {
|
||||
// this is similar to how ReprOptions populates its field_shuffle_seed
|
||||
layout.randomization_seed = tcx.def_path_hash(def_id).0.to_smaller_hash();
|
||||
debug!("coroutine layout ({:?}): {:#?}", ty, layout);
|
||||
layout
|
||||
});
|
||||
map_layout(layout)?
|
||||
}
|
||||
|
||||
ty::Closure(_, args) => univariant(args.as_closure().upvar_tys(), StructKind::AlwaysSized)?,
|
||||
|
||||
ty::CoroutineClosure(_, args) => {
|
||||
let tys = args.as_coroutine_closure().upvar_tys();
|
||||
univariant(
|
||||
&tys.iter().map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
|
||||
StructKind::AlwaysSized,
|
||||
)?
|
||||
univariant(args.as_coroutine_closure().upvar_tys(), StructKind::AlwaysSized)?
|
||||
}
|
||||
|
||||
ty::Tuple(tys) => {
|
||||
let kind =
|
||||
if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
|
||||
|
||||
univariant(&tys.iter().map(|k| cx.layout_of(k)).try_collect::<IndexVec<_, _>>()?, kind)?
|
||||
univariant(tys, kind)?
|
||||
}
|
||||
|
||||
// SIMD vector types.
|
||||
@ -461,65 +455,9 @@ fn layout_of_uncached<'tcx>(
|
||||
.try_to_target_usize(tcx)
|
||||
.ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
|
||||
|
||||
// SIMD vectors of zero length are not supported.
|
||||
// Additionally, lengths are capped at 2^16 as a fixed maximum backends must
|
||||
// support.
|
||||
//
|
||||
// Can't be caught in typeck if the array length is generic.
|
||||
if e_len == 0 {
|
||||
tcx.dcx().emit_fatal(ZeroLengthSimdType { ty });
|
||||
} else if e_len > MAX_SIMD_LANES {
|
||||
tcx.dcx().emit_fatal(OversizedSimdType { ty, max_lanes: MAX_SIMD_LANES });
|
||||
}
|
||||
|
||||
// Compute the ABI of the element type:
|
||||
let e_ly = cx.layout_of(e_ty)?;
|
||||
let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else {
|
||||
// This error isn't caught in typeck, e.g., if
|
||||
// the element type of the vector is generic.
|
||||
tcx.dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty });
|
||||
};
|
||||
|
||||
// Compute the size and alignment of the vector:
|
||||
let size = e_ly
|
||||
.size
|
||||
.checked_mul(e_len, dl)
|
||||
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
|
||||
|
||||
let (abi, align) = if def.repr().packed() && !e_len.is_power_of_two() {
|
||||
// Non-power-of-two vectors have padding up to the next power-of-two.
|
||||
// If we're a packed repr, remove the padding while keeping the alignment as close
|
||||
// to a vector as possible.
|
||||
(
|
||||
BackendRepr::Memory { sized: true },
|
||||
AbiAndPrefAlign {
|
||||
abi: Align::max_aligned_factor(size),
|
||||
pref: dl.llvmlike_vector_align(size).pref,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
(
|
||||
BackendRepr::SimdVector { element: e_abi, count: e_len },
|
||||
dl.llvmlike_vector_align(size),
|
||||
)
|
||||
};
|
||||
let size = size.align_to(align.abi);
|
||||
|
||||
tcx.mk_layout(LayoutData {
|
||||
variants: Variants::Single { index: FIRST_VARIANT },
|
||||
fields: FieldsShape::Arbitrary {
|
||||
offsets: [Size::ZERO].into(),
|
||||
memory_index: [0].into(),
|
||||
},
|
||||
backend_repr: abi,
|
||||
largest_niche: e_ly.largest_niche,
|
||||
uninhabited: false,
|
||||
size,
|
||||
align,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: align.abi,
|
||||
randomization_seed: e_ly.randomization_seed.wrapping_add(Hash64::new(e_len)),
|
||||
})
|
||||
map_layout(cx.calc.simd_type(e_ly, e_len, def.repr().packed()))?
|
||||
}
|
||||
|
||||
// ADTs.
|
||||
@ -545,11 +483,7 @@ fn layout_of_uncached<'tcx>(
|
||||
return Err(error(cx, LayoutError::ReferencesError(guar)));
|
||||
}
|
||||
|
||||
return Ok(tcx.mk_layout(
|
||||
cx.calc
|
||||
.layout_of_union(&def.repr(), &variants)
|
||||
.map_err(|err| map_error(cx, ty, err))?,
|
||||
));
|
||||
return map_layout(cx.calc.layout_of_union(&def.repr(), &variants));
|
||||
}
|
||||
|
||||
let get_discriminant_type =
|
||||
@ -677,335 +611,6 @@ fn layout_of_uncached<'tcx>(
|
||||
})
|
||||
}
|
||||
|
||||
/// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
enum SavedLocalEligibility {
|
||||
Unassigned,
|
||||
Assigned(VariantIdx),
|
||||
Ineligible(Option<FieldIdx>),
|
||||
}
|
||||
|
||||
// When laying out coroutines, we divide our saved local fields into two
|
||||
// categories: overlap-eligible and overlap-ineligible.
|
||||
//
|
||||
// Those fields which are ineligible for overlap go in a "prefix" at the
|
||||
// beginning of the layout, and always have space reserved for them.
|
||||
//
|
||||
// Overlap-eligible fields are only assigned to one variant, so we lay
|
||||
// those fields out for each variant and put them right after the
|
||||
// prefix.
|
||||
//
|
||||
// Finally, in the layout details, we point to the fields from the
|
||||
// variants they are assigned to. It is possible for some fields to be
|
||||
// included in multiple variants. No field ever "moves around" in the
|
||||
// layout; its offset is always the same.
|
||||
//
|
||||
// Also included in the layout are the upvars and the discriminant.
|
||||
// These are included as fields on the "outer" layout; they are not part
|
||||
// of any variant.
|
||||
|
||||
/// Compute the eligibility and assignment of each local.
|
||||
fn coroutine_saved_local_eligibility(
|
||||
info: &CoroutineLayout<'_>,
|
||||
) -> (DenseBitSet<CoroutineSavedLocal>, IndexVec<CoroutineSavedLocal, SavedLocalEligibility>) {
|
||||
use SavedLocalEligibility::*;
|
||||
|
||||
let mut assignments: IndexVec<CoroutineSavedLocal, SavedLocalEligibility> =
|
||||
IndexVec::from_elem(Unassigned, &info.field_tys);
|
||||
|
||||
// The saved locals not eligible for overlap. These will get
|
||||
// "promoted" to the prefix of our coroutine.
|
||||
let mut ineligible_locals = DenseBitSet::new_empty(info.field_tys.len());
|
||||
|
||||
// Figure out which of our saved locals are fields in only
|
||||
// one variant. The rest are deemed ineligible for overlap.
|
||||
for (variant_index, fields) in info.variant_fields.iter_enumerated() {
|
||||
for local in fields {
|
||||
match assignments[*local] {
|
||||
Unassigned => {
|
||||
assignments[*local] = Assigned(variant_index);
|
||||
}
|
||||
Assigned(idx) => {
|
||||
// We've already seen this local at another suspension
|
||||
// point, so it is no longer a candidate.
|
||||
trace!(
|
||||
"removing local {:?} in >1 variant ({:?}, {:?})",
|
||||
local, variant_index, idx
|
||||
);
|
||||
ineligible_locals.insert(*local);
|
||||
assignments[*local] = Ineligible(None);
|
||||
}
|
||||
Ineligible(_) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next, check every pair of eligible locals to see if they
|
||||
// conflict.
|
||||
for local_a in info.storage_conflicts.rows() {
|
||||
let conflicts_a = info.storage_conflicts.count(local_a);
|
||||
if ineligible_locals.contains(local_a) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for local_b in info.storage_conflicts.iter(local_a) {
|
||||
// local_a and local_b are storage live at the same time, therefore they
|
||||
// cannot overlap in the coroutine layout. The only way to guarantee
|
||||
// this is if they are in the same variant, or one is ineligible
|
||||
// (which means it is stored in every variant).
|
||||
if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If they conflict, we will choose one to make ineligible.
|
||||
// This is not always optimal; it's just a greedy heuristic that
|
||||
// seems to produce good results most of the time.
|
||||
let conflicts_b = info.storage_conflicts.count(local_b);
|
||||
let (remove, other) =
|
||||
if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
|
||||
ineligible_locals.insert(remove);
|
||||
assignments[remove] = Ineligible(None);
|
||||
trace!("removing local {:?} due to conflict with {:?}", remove, other);
|
||||
}
|
||||
}
|
||||
|
||||
// Count the number of variants in use. If only one of them, then it is
|
||||
// impossible to overlap any locals in our layout. In this case it's
|
||||
// always better to make the remaining locals ineligible, so we can
|
||||
// lay them out with the other locals in the prefix and eliminate
|
||||
// unnecessary padding bytes.
|
||||
{
|
||||
let mut used_variants = DenseBitSet::new_empty(info.variant_fields.len());
|
||||
for assignment in &assignments {
|
||||
if let Assigned(idx) = assignment {
|
||||
used_variants.insert(*idx);
|
||||
}
|
||||
}
|
||||
if used_variants.count() < 2 {
|
||||
for assignment in assignments.iter_mut() {
|
||||
*assignment = Ineligible(None);
|
||||
}
|
||||
ineligible_locals.insert_all();
|
||||
}
|
||||
}
|
||||
|
||||
// Write down the order of our locals that will be promoted to the prefix.
|
||||
{
|
||||
for (idx, local) in ineligible_locals.iter().enumerate() {
|
||||
assignments[local] = Ineligible(Some(FieldIdx::from_usize(idx)));
|
||||
}
|
||||
}
|
||||
debug!("coroutine saved local assignments: {:?}", assignments);
|
||||
|
||||
(ineligible_locals, assignments)
|
||||
}
|
||||
|
||||
/// Compute the full coroutine layout.
|
||||
fn coroutine_layout<'tcx>(
|
||||
cx: &LayoutCx<'tcx>,
|
||||
ty: Ty<'tcx>,
|
||||
def_id: hir::def_id::DefId,
|
||||
args: GenericArgsRef<'tcx>,
|
||||
) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> {
|
||||
use SavedLocalEligibility::*;
|
||||
let tcx = cx.tcx();
|
||||
let instantiate_field = |ty: Ty<'tcx>| EarlyBinder::bind(ty).instantiate(tcx, args);
|
||||
|
||||
let Some(info) = tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) else {
|
||||
return Err(error(cx, LayoutError::Unknown(ty)));
|
||||
};
|
||||
let (ineligible_locals, assignments) = coroutine_saved_local_eligibility(info);
|
||||
|
||||
// Build a prefix layout, including "promoting" all ineligible
|
||||
// locals as part of the prefix. We compute the layout of all of
|
||||
// these fields at once to get optimal packing.
|
||||
let tag_index = args.as_coroutine().prefix_tys().len();
|
||||
|
||||
// `info.variant_fields` already accounts for the reserved variants, so no need to add them.
|
||||
let max_discr = (info.variant_fields.len() - 1) as u128;
|
||||
let discr_int = abi::Integer::fit_unsigned(max_discr);
|
||||
let tag = Scalar::Initialized {
|
||||
value: Primitive::Int(discr_int, /* signed = */ false),
|
||||
valid_range: WrappingRange { start: 0, end: max_discr },
|
||||
};
|
||||
let tag_layout = TyAndLayout {
|
||||
ty: discr_int.to_ty(tcx, /* signed = */ false),
|
||||
layout: tcx.mk_layout(LayoutData::scalar(cx, tag)),
|
||||
};
|
||||
|
||||
let promoted_layouts = ineligible_locals.iter().map(|local| {
|
||||
let field_ty = instantiate_field(info.field_tys[local].ty);
|
||||
let uninit_ty = Ty::new_maybe_uninit(tcx, field_ty);
|
||||
cx.spanned_layout_of(uninit_ty, info.field_tys[local].source_info.span)
|
||||
});
|
||||
let prefix_layouts = args
|
||||
.as_coroutine()
|
||||
.prefix_tys()
|
||||
.iter()
|
||||
.map(|ty| cx.layout_of(ty))
|
||||
.chain(iter::once(Ok(tag_layout)))
|
||||
.chain(promoted_layouts)
|
||||
.try_collect::<IndexVec<_, _>>()?;
|
||||
let prefix = univariant_uninterned(cx, ty, &prefix_layouts, StructKind::AlwaysSized)?;
|
||||
|
||||
let (prefix_size, prefix_align) = (prefix.size, prefix.align);
|
||||
|
||||
// Split the prefix layout into the "outer" fields (upvars and
|
||||
// discriminant) and the "promoted" fields. Promoted fields will
|
||||
// get included in each variant that requested them in
|
||||
// CoroutineLayout.
|
||||
debug!("prefix = {:#?}", prefix);
|
||||
let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
|
||||
FieldsShape::Arbitrary { mut offsets, memory_index } => {
|
||||
let mut inverse_memory_index = memory_index.invert_bijective_mapping();
|
||||
|
||||
// "a" (`0..b_start`) and "b" (`b_start..`) correspond to
|
||||
// "outer" and "promoted" fields respectively.
|
||||
let b_start = FieldIdx::from_usize(tag_index + 1);
|
||||
let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.as_usize()));
|
||||
let offsets_a = offsets;
|
||||
|
||||
// Disentangle the "a" and "b" components of `inverse_memory_index`
|
||||
// by preserving the order but keeping only one disjoint "half" each.
|
||||
// FIXME(eddyb) build a better abstraction for permutations, if possible.
|
||||
let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
|
||||
.iter()
|
||||
.filter_map(|&i| i.as_u32().checked_sub(b_start.as_u32()).map(FieldIdx::from_u32))
|
||||
.collect();
|
||||
inverse_memory_index.raw.retain(|&i| i < b_start);
|
||||
let inverse_memory_index_a = inverse_memory_index;
|
||||
|
||||
// Since `inverse_memory_index_{a,b}` each only refer to their
|
||||
// respective fields, they can be safely inverted
|
||||
let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
|
||||
let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
|
||||
|
||||
let outer_fields =
|
||||
FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
|
||||
(outer_fields, offsets_b, memory_index_b)
|
||||
}
|
||||
_ => bug!(),
|
||||
};
|
||||
|
||||
let mut size = prefix.size;
|
||||
let mut align = prefix.align;
|
||||
let variants = info
|
||||
.variant_fields
|
||||
.iter_enumerated()
|
||||
.map(|(index, variant_fields)| {
|
||||
// Only include overlap-eligible fields when we compute our variant layout.
|
||||
let variant_only_tys = variant_fields
|
||||
.iter()
|
||||
.filter(|local| match assignments[**local] {
|
||||
Unassigned => bug!(),
|
||||
Assigned(v) if v == index => true,
|
||||
Assigned(_) => bug!("assignment does not match variant"),
|
||||
Ineligible(_) => false,
|
||||
})
|
||||
.map(|local| {
|
||||
let field_ty = instantiate_field(info.field_tys[*local].ty);
|
||||
Ty::new_maybe_uninit(tcx, field_ty)
|
||||
});
|
||||
|
||||
let mut variant = univariant_uninterned(
|
||||
cx,
|
||||
ty,
|
||||
&variant_only_tys.map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
|
||||
StructKind::Prefixed(prefix_size, prefix_align.abi),
|
||||
)?;
|
||||
variant.variants = Variants::Single { index };
|
||||
|
||||
let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
|
||||
bug!();
|
||||
};
|
||||
|
||||
// Now, stitch the promoted and variant-only fields back together in
|
||||
// the order they are mentioned by our CoroutineLayout.
|
||||
// Because we only use some subset (that can differ between variants)
|
||||
// of the promoted fields, we can't just pick those elements of the
|
||||
// `promoted_memory_index` (as we'd end up with gaps).
|
||||
// So instead, we build an "inverse memory_index", as if all of the
|
||||
// promoted fields were being used, but leave the elements not in the
|
||||
// subset as `INVALID_FIELD_IDX`, which we can filter out later to
|
||||
// obtain a valid (bijective) mapping.
|
||||
const INVALID_FIELD_IDX: FieldIdx = FieldIdx::MAX;
|
||||
debug_assert!(variant_fields.next_index() <= INVALID_FIELD_IDX);
|
||||
|
||||
let mut combined_inverse_memory_index = IndexVec::from_elem_n(
|
||||
INVALID_FIELD_IDX,
|
||||
promoted_memory_index.len() + memory_index.len(),
|
||||
);
|
||||
let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
|
||||
let combined_offsets = variant_fields
|
||||
.iter_enumerated()
|
||||
.map(|(i, local)| {
|
||||
let (offset, memory_index) = match assignments[*local] {
|
||||
Unassigned => bug!(),
|
||||
Assigned(_) => {
|
||||
let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
|
||||
(offset, promoted_memory_index.len() as u32 + memory_index)
|
||||
}
|
||||
Ineligible(field_idx) => {
|
||||
let field_idx = field_idx.unwrap();
|
||||
(promoted_offsets[field_idx], promoted_memory_index[field_idx])
|
||||
}
|
||||
};
|
||||
combined_inverse_memory_index[memory_index] = i;
|
||||
offset
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Remove the unused slots and invert the mapping to obtain the
|
||||
// combined `memory_index` (also see previous comment).
|
||||
combined_inverse_memory_index.raw.retain(|&i| i != INVALID_FIELD_IDX);
|
||||
let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
|
||||
|
||||
variant.fields = FieldsShape::Arbitrary {
|
||||
offsets: combined_offsets,
|
||||
memory_index: combined_memory_index,
|
||||
};
|
||||
|
||||
size = size.max(variant.size);
|
||||
align = align.max(variant.align);
|
||||
Ok(variant)
|
||||
})
|
||||
.try_collect::<IndexVec<VariantIdx, _>>()?;
|
||||
|
||||
size = size.align_to(align.abi);
|
||||
|
||||
let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited());
|
||||
let abi = BackendRepr::Memory { sized: true };
|
||||
|
||||
// this is similar to how ReprOptions populates its field_shuffle_seed
|
||||
let def_hash = tcx.def_path_hash(def_id).0.to_smaller_hash();
|
||||
|
||||
let layout = tcx.mk_layout(LayoutData {
|
||||
variants: Variants::Multiple {
|
||||
tag,
|
||||
tag_encoding: TagEncoding::Direct,
|
||||
tag_field: tag_index,
|
||||
variants,
|
||||
},
|
||||
fields: outer_fields,
|
||||
backend_repr: abi,
|
||||
// Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
|
||||
// self-referentiality), getting the discriminant can cause aliasing violations.
|
||||
// `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
|
||||
// would do the same for us here.
|
||||
// See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>.
|
||||
// FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`.
|
||||
largest_niche: None,
|
||||
uninhabited,
|
||||
size,
|
||||
align,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: align.abi,
|
||||
randomization_seed: def_hash,
|
||||
});
|
||||
debug!("coroutine layout ({:?}): {:#?}", ty, layout);
|
||||
Ok(layout)
|
||||
}
|
||||
|
||||
fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx>, layout: TyAndLayout<'tcx>) {
|
||||
// Ignore layouts that are done with non-empty environments or
|
||||
// non-monomorphic layouts, as the user only wants to see the stuff
|
||||
|
@ -6,16 +6,15 @@ use base_db::ra_salsa::Cycle;
|
||||
use chalk_ir::{AdtId, FloatTy, IntTy, TyKind, UintTy};
|
||||
use hir_def::{
|
||||
layout::{
|
||||
BackendRepr, FieldsShape, Float, Integer, LayoutCalculator, LayoutCalculatorError,
|
||||
LayoutData, Primitive, ReprOptions, Scalar, Size, StructKind, TargetDataLayout,
|
||||
Float, Integer, LayoutCalculator, LayoutCalculatorError,
|
||||
LayoutData, Primitive, ReprOptions, Scalar, StructKind, TargetDataLayout,
|
||||
WrappingRange,
|
||||
},
|
||||
LocalFieldId, StructId,
|
||||
};
|
||||
use la_arena::{Idx, RawIdx};
|
||||
use rustc_abi::AddressSpace;
|
||||
use rustc_hashes::Hash64;
|
||||
use rustc_index::{IndexSlice, IndexVec};
|
||||
use rustc_index::IndexVec;
|
||||
|
||||
use triomphe::Arc;
|
||||
|
||||
@ -23,7 +22,6 @@ use crate::{
|
||||
consteval::try_const_usize,
|
||||
db::{HirDatabase, InternedClosure},
|
||||
infer::normalize,
|
||||
layout::adt::struct_variant_idx,
|
||||
utils::ClosureSubst,
|
||||
Interner, ProjectionTy, Substitution, TraitEnvironment, Ty,
|
||||
};
|
||||
@ -125,10 +123,10 @@ impl<'a> LayoutCx<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: move this to the `rustc_abi`.
|
||||
fn layout_of_simd_ty(
|
||||
db: &dyn HirDatabase,
|
||||
id: StructId,
|
||||
repr_packed: bool,
|
||||
subst: &Substitution,
|
||||
env: Arc<TraitEnvironment>,
|
||||
dl: &TargetDataLayout,
|
||||
@ -149,33 +147,10 @@ fn layout_of_simd_ty(
|
||||
};
|
||||
|
||||
let e_len = try_const_usize(db, &e_len).ok_or(LayoutError::HasErrorConst)? as u64;
|
||||
|
||||
// Compute the ABI of the element type:
|
||||
let e_ly = db.layout_of_ty(e_ty, env)?;
|
||||
let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else {
|
||||
return Err(LayoutError::Unknown);
|
||||
};
|
||||
|
||||
// Compute the size and alignment of the vector:
|
||||
let size = e_ly
|
||||
.size
|
||||
.checked_mul(e_len, dl)
|
||||
.ok_or(LayoutError::BadCalc(LayoutCalculatorError::SizeOverflow))?;
|
||||
let align = dl.llvmlike_vector_align(size);
|
||||
let size = size.align_to(align.abi);
|
||||
|
||||
Ok(Arc::new(Layout {
|
||||
variants: Variants::Single { index: struct_variant_idx() },
|
||||
fields: FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() },
|
||||
backend_repr: BackendRepr::SimdVector { element: e_abi, count: e_len },
|
||||
largest_niche: e_ly.largest_niche,
|
||||
uninhabited: false,
|
||||
size,
|
||||
align,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: align.abi,
|
||||
randomization_seed: Hash64::ZERO,
|
||||
}))
|
||||
let cx = LayoutCx::new(dl);
|
||||
Ok(Arc::new(cx.calc.simd_type(e_ly, e_len, repr_packed)?))
|
||||
}
|
||||
|
||||
pub fn layout_of_ty_query(
|
||||
@ -190,13 +165,14 @@ pub fn layout_of_ty_query(
|
||||
let dl = &*target;
|
||||
let cx = LayoutCx::new(dl);
|
||||
let ty = normalize(db, trait_env.clone(), ty);
|
||||
let result = match ty.kind(Interner) {
|
||||
let kind = ty.kind(Interner);
|
||||
let result = match kind {
|
||||
TyKind::Adt(AdtId(def), subst) => {
|
||||
if let hir_def::AdtId::StructId(s) = def {
|
||||
let data = db.struct_data(*s);
|
||||
let repr = data.repr.unwrap_or_default();
|
||||
if repr.simd() {
|
||||
return layout_of_simd_ty(db, *s, subst, trait_env, &target);
|
||||
return layout_of_simd_ty(db, *s, repr.packed(), subst, trait_env, &target);
|
||||
}
|
||||
};
|
||||
return db.layout_of_adt(*def, subst.clone(), trait_env);
|
||||
@ -216,7 +192,7 @@ pub fn layout_of_ty_query(
|
||||
valid_range: WrappingRange { start: 0, end: 0x10FFFF },
|
||||
},
|
||||
),
|
||||
chalk_ir::Scalar::Int(i) => scalar(
|
||||
chalk_ir::Scalar::Int(i) => Layout::scalar(dl, scalar_unit(
|
||||
dl,
|
||||
Primitive::Int(
|
||||
match i {
|
||||
@ -229,8 +205,8 @@ pub fn layout_of_ty_query(
|
||||
},
|
||||
true,
|
||||
),
|
||||
),
|
||||
chalk_ir::Scalar::Uint(i) => scalar(
|
||||
)),
|
||||
chalk_ir::Scalar::Uint(i) => Layout::scalar(dl, scalar_unit(
|
||||
dl,
|
||||
Primitive::Int(
|
||||
match i {
|
||||
@ -243,8 +219,8 @@ pub fn layout_of_ty_query(
|
||||
},
|
||||
false,
|
||||
),
|
||||
),
|
||||
chalk_ir::Scalar::Float(f) => scalar(
|
||||
)),
|
||||
chalk_ir::Scalar::Float(f) => Layout::scalar(dl, scalar_unit(
|
||||
dl,
|
||||
Primitive::Float(match f {
|
||||
FloatTy::F16 => Float::F16,
|
||||
@ -252,7 +228,7 @@ pub fn layout_of_ty_query(
|
||||
FloatTy::F64 => Float::F64,
|
||||
FloatTy::F128 => Float::F128,
|
||||
}),
|
||||
),
|
||||
)),
|
||||
},
|
||||
TyKind::Tuple(len, tys) => {
|
||||
let kind = if *len == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
|
||||
@ -268,56 +244,16 @@ pub fn layout_of_ty_query(
|
||||
TyKind::Array(element, count) => {
|
||||
let count = try_const_usize(db, count).ok_or(LayoutError::HasErrorConst)? as u64;
|
||||
let element = db.layout_of_ty(element.clone(), trait_env)?;
|
||||
let size = element
|
||||
.size
|
||||
.checked_mul(count, dl)
|
||||
.ok_or(LayoutError::BadCalc(LayoutCalculatorError::SizeOverflow))?;
|
||||
|
||||
let backend_repr = BackendRepr::Memory { sized: true };
|
||||
|
||||
let largest_niche = if count != 0 { element.largest_niche } else { None };
|
||||
let uninhabited = if count != 0 { element.uninhabited } else { false };
|
||||
|
||||
Layout {
|
||||
variants: Variants::Single { index: struct_variant_idx() },
|
||||
fields: FieldsShape::Array { stride: element.size, count },
|
||||
backend_repr,
|
||||
largest_niche,
|
||||
uninhabited,
|
||||
align: element.align,
|
||||
size,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: element.align.abi,
|
||||
randomization_seed: Hash64::ZERO,
|
||||
}
|
||||
cx.calc.array_like::<_, _, ()>(&element, Some(count))?
|
||||
}
|
||||
TyKind::Slice(element) => {
|
||||
let element = db.layout_of_ty(element.clone(), trait_env)?;
|
||||
Layout {
|
||||
variants: Variants::Single { index: struct_variant_idx() },
|
||||
fields: FieldsShape::Array { stride: element.size, count: 0 },
|
||||
backend_repr: BackendRepr::Memory { sized: false },
|
||||
largest_niche: None,
|
||||
uninhabited: false,
|
||||
align: element.align,
|
||||
size: Size::ZERO,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: element.align.abi,
|
||||
randomization_seed: Hash64::ZERO,
|
||||
}
|
||||
cx.calc.array_like::<_, _, ()>(&element, None)?
|
||||
}
|
||||
TyKind::Str => {
|
||||
let element = scalar_unit(dl, Primitive::Int(Integer::I8, false));
|
||||
cx.calc.array_like::<_, _, ()>(&Layout::scalar(dl, element), None)?
|
||||
}
|
||||
TyKind::Str => Layout {
|
||||
variants: Variants::Single { index: struct_variant_idx() },
|
||||
fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
|
||||
backend_repr: BackendRepr::Memory { sized: false },
|
||||
largest_niche: None,
|
||||
uninhabited: false,
|
||||
align: dl.i8_align,
|
||||
size: Size::ZERO,
|
||||
max_repr_align: None,
|
||||
unadjusted_abi_align: dl.i8_align.abi,
|
||||
randomization_seed: Hash64::ZERO,
|
||||
},
|
||||
// Potentially-wide pointers.
|
||||
TyKind::Ref(_, _, pointee) | TyKind::Raw(_, pointee) => {
|
||||
let mut data_ptr = scalar_unit(dl, Primitive::Pointer(AddressSpace::DATA));
|
||||
@ -355,17 +291,12 @@ pub fn layout_of_ty_query(
|
||||
};
|
||||
|
||||
// Effectively a (ptr, meta) tuple.
|
||||
cx.calc.scalar_pair(data_ptr, metadata)
|
||||
LayoutData::scalar_pair(dl, data_ptr, metadata)
|
||||
}
|
||||
TyKind::FnDef(_, _) => layout_of_unit(&cx)?,
|
||||
TyKind::Never => cx.calc.layout_of_never_type(),
|
||||
TyKind::Dyn(_) | TyKind::Foreign(_) => {
|
||||
let mut unit = layout_of_unit(&cx)?;
|
||||
match &mut unit.backend_repr {
|
||||
BackendRepr::Memory { sized } => *sized = false,
|
||||
_ => return Err(LayoutError::Unknown),
|
||||
}
|
||||
unit
|
||||
TyKind::Never => LayoutData::never_type(dl),
|
||||
TyKind::FnDef(..) | TyKind::Dyn(_) | TyKind::Foreign(_) => {
|
||||
let sized = matches!(kind, TyKind::FnDef(..));
|
||||
LayoutData::unit(dl, sized)
|
||||
}
|
||||
TyKind::Function(_) => {
|
||||
let mut ptr = scalar_unit(dl, Primitive::Pointer(dl.instruction_address_space));
|
||||
@ -434,16 +365,6 @@ pub fn layout_of_ty_recover(
|
||||
Err(LayoutError::RecursiveTypeWithoutIndirection)
|
||||
}
|
||||
|
||||
fn layout_of_unit(cx: &LayoutCx<'_>) -> Result<Layout, LayoutError> {
|
||||
cx.calc
|
||||
.univariant::<RustcFieldIdx, RustcEnumVariantIdx, &&Layout>(
|
||||
IndexSlice::empty(),
|
||||
&ReprOptions::default(),
|
||||
StructKind::AlwaysSized,
|
||||
)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn struct_tail_erasing_lifetimes(db: &dyn HirDatabase, pointee: Ty) -> Ty {
|
||||
match pointee.kind(Interner) {
|
||||
TyKind::Adt(AdtId(hir_def::AdtId::StructId(i)), subst) => {
|
||||
@ -474,9 +395,5 @@ fn scalar_unit(dl: &TargetDataLayout, value: Primitive) -> Scalar {
|
||||
Scalar::Initialized { value, valid_range: WrappingRange::full(value.size(dl)) }
|
||||
}
|
||||
|
||||
fn scalar(dl: &TargetDataLayout, value: Primitive) -> Layout {
|
||||
Layout::scalar(dl, scalar_unit(dl, value))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
@ -16,16 +16,12 @@ use triomphe::Arc;
|
||||
use crate::{
|
||||
db::HirDatabase,
|
||||
lang_items::is_unsafe_cell,
|
||||
layout::{field_ty, Layout, LayoutError, RustcEnumVariantIdx},
|
||||
layout::{field_ty, Layout, LayoutError},
|
||||
Substitution, TraitEnvironment,
|
||||
};
|
||||
|
||||
use super::LayoutCx;
|
||||
|
||||
pub(crate) fn struct_variant_idx() -> RustcEnumVariantIdx {
|
||||
RustcEnumVariantIdx(0)
|
||||
}
|
||||
|
||||
pub fn layout_of_adt_query(
|
||||
db: &dyn HirDatabase,
|
||||
def: AdtId,
|
||||
|
@ -12,9 +12,6 @@ extern crate ra_ap_rustc_index as rustc_index;
|
||||
#[cfg(feature = "in-rust-tree")]
|
||||
extern crate rustc_abi;
|
||||
|
||||
#[cfg(feature = "in-rust-tree")]
|
||||
extern crate rustc_hashes;
|
||||
|
||||
#[cfg(not(feature = "in-rust-tree"))]
|
||||
extern crate ra_ap_rustc_abi as rustc_abi;
|
||||
|
||||
|
@ -3,6 +3,9 @@ error[E0733]: recursion in an async fn requires boxing
|
||||
|
|
||||
LL | async fn second(self) {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
LL |
|
||||
LL | self.first().await.second().await;
|
||||
| --------------------------------- recursive call here
|
||||
|
|
||||
= note: a recursive `async fn` call must introduce indirection such as `Box::pin` to avoid an infinitely sized future
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@ build-fail
|
||||
//@ check-fail
|
||||
//@ edition: 2021
|
||||
|
||||
use std::future::Future;
|
||||
|
@ -12,12 +12,6 @@ LL | Blah::iter(self, iterator).await
|
||||
|
|
||||
= note: a recursive `async fn` call must introduce indirection such as `Box::pin` to avoid an infinitely sized future
|
||||
|
||||
note: the above error was encountered while instantiating `fn Wrap::<()>::ice`
|
||||
--> $DIR/post-mono-layout-cycle-2.rs:54:9
|
||||
|
|
||||
LL | t.ice();
|
||||
| ^^^^^^^
|
||||
|
||||
error: aborting due to 1 previous error
|
||||
|
||||
For more information about this error, try `rustc --explain E0733`.
|
||||
|
Loading…
Reference in New Issue
Block a user