Move coroutine layout logic to rustc_abi

This commit is contained in:
Moulins 2025-03-07 22:13:22 +01:00
parent b8a217081d
commit 08530d3e99
3 changed files with 373 additions and 334 deletions

View File

@ -4,6 +4,7 @@ use std::{cmp, iter};
use rustc_hashes::Hash64;
use rustc_index::Idx;
use rustc_index::bit_set::BitMatrix;
use tracing::debug;
use crate::{
@ -12,6 +13,7 @@ use crate::{
Variants, WrappingRange,
};
mod coroutine;
mod simple;
#[cfg(feature = "nightly")]
@ -200,6 +202,34 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
})
}
/// Compute the layout for a coroutine.
///
/// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine
/// fields may be shared between multiple variants (see the [`coroutine`] module for details).
pub fn coroutine<
'a,
F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
VariantIdx: Idx,
FieldIdx: Idx,
LocalIdx: Idx,
>(
&self,
local_layouts: &IndexSlice<LocalIdx, F>,
prefix_layouts: IndexVec<FieldIdx, F>,
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
tag_to_layout: impl Fn(Scalar) -> F,
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
coroutine::layout(
self,
local_layouts,
prefix_layouts,
variant_fields,
storage_conflicts,
tag_to_layout,
)
}
pub fn univariant<
'a,
FieldIdx: Idx,

View File

@ -0,0 +1,320 @@
//! Coroutine layout logic.
//!
//! When laying out coroutines, we divide our saved local fields into two
//! categories: overlap-eligible and overlap-ineligible.
//!
//! Those fields which are ineligible for overlap go in a "prefix" at the
//! beginning of the layout, and always have space reserved for them.
//!
//! Overlap-eligible fields are only assigned to one variant, so we lay
//! those fields out for each variant and put them right after the
//! prefix.
//!
//! Finally, in the layout details, we point to the fields from the
//! variants they are assigned to. It is possible for some fields to be
//! included in multiple variants. No field ever "moves around" in the
//! layout; its offset is always the same.
//!
//! Also included in the layout are the upvars and the discriminant.
//! These are included as fields on the "outer" layout; they are not part
//! of any variant.
use std::iter;
use rustc_index::bit_set::{BitMatrix, DenseBitSet};
use rustc_index::{Idx, IndexSlice, IndexVec};
use tracing::{debug, trace};
use crate::{
BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutData, Primitive, ReprOptions, Scalar,
StructKind, TagEncoding, Variants, WrappingRange,
};
/// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
#[derive(Clone, Debug, PartialEq)]
enum SavedLocalEligibility<VariantIdx, FieldIdx> {
Unassigned,
Assigned(VariantIdx),
Ineligible(Option<FieldIdx>),
}
/// Compute the eligibility and assignment of each local.
fn coroutine_saved_local_eligibility<VariantIdx: Idx, FieldIdx: Idx, LocalIdx: Idx>(
nb_locals: usize,
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
) -> (DenseBitSet<LocalIdx>, IndexVec<LocalIdx, SavedLocalEligibility<VariantIdx, FieldIdx>>) {
use SavedLocalEligibility::*;
let mut assignments: IndexVec<LocalIdx, _> = IndexVec::from_elem_n(Unassigned, nb_locals);
// The saved locals not eligible for overlap. These will get
// "promoted" to the prefix of our coroutine.
let mut ineligible_locals = DenseBitSet::new_empty(nb_locals);
// Figure out which of our saved locals are fields in only
// one variant. The rest are deemed ineligible for overlap.
for (variant_index, fields) in variant_fields.iter_enumerated() {
for local in fields {
match assignments[*local] {
Unassigned => {
assignments[*local] = Assigned(variant_index);
}
Assigned(idx) => {
// We've already seen this local at another suspension
// point, so it is no longer a candidate.
trace!(
"removing local {:?} in >1 variant ({:?}, {:?})",
local, variant_index, idx
);
ineligible_locals.insert(*local);
assignments[*local] = Ineligible(None);
}
Ineligible(_) => {}
}
}
}
// Next, check every pair of eligible locals to see if they
// conflict.
for local_a in storage_conflicts.rows() {
let conflicts_a = storage_conflicts.count(local_a);
if ineligible_locals.contains(local_a) {
continue;
}
for local_b in storage_conflicts.iter(local_a) {
// local_a and local_b are storage live at the same time, therefore they
// cannot overlap in the coroutine layout. The only way to guarantee
// this is if they are in the same variant, or one is ineligible
// (which means it is stored in every variant).
if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
continue;
}
// If they conflict, we will choose one to make ineligible.
// This is not always optimal; it's just a greedy heuristic that
// seems to produce good results most of the time.
let conflicts_b = storage_conflicts.count(local_b);
let (remove, other) =
if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
ineligible_locals.insert(remove);
assignments[remove] = Ineligible(None);
trace!("removing local {:?} due to conflict with {:?}", remove, other);
}
}
// Count the number of variants in use. If only one of them, then it is
// impossible to overlap any locals in our layout. In this case it's
// always better to make the remaining locals ineligible, so we can
// lay them out with the other locals in the prefix and eliminate
// unnecessary padding bytes.
{
let mut used_variants = DenseBitSet::new_empty(variant_fields.len());
for assignment in &assignments {
if let Assigned(idx) = assignment {
used_variants.insert(*idx);
}
}
if used_variants.count() < 2 {
for assignment in assignments.iter_mut() {
*assignment = Ineligible(None);
}
ineligible_locals.insert_all();
}
}
// Write down the order of our locals that will be promoted to the prefix.
{
for (idx, local) in ineligible_locals.iter().enumerate() {
assignments[local] = Ineligible(Some(FieldIdx::new(idx)));
}
}
debug!("coroutine saved local assignments: {:?}", assignments);
(ineligible_locals, assignments)
}
/// Compute the full coroutine layout.
pub(super) fn layout<
'a,
F: core::ops::Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + core::fmt::Debug + Copy,
VariantIdx: Idx,
FieldIdx: Idx,
LocalIdx: Idx,
>(
calc: &super::LayoutCalculator<impl HasDataLayout>,
local_layouts: &IndexSlice<LocalIdx, F>,
mut prefix_layouts: IndexVec<FieldIdx, F>,
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
tag_to_layout: impl Fn(Scalar) -> F,
) -> super::LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
use SavedLocalEligibility::*;
let (ineligible_locals, assignments) =
coroutine_saved_local_eligibility(local_layouts.len(), variant_fields, storage_conflicts);
// Build a prefix layout, including "promoting" all ineligible
// locals as part of the prefix. We compute the layout of all of
// these fields at once to get optimal packing.
let tag_index = prefix_layouts.len();
// `variant_fields` already accounts for the reserved variants, so no need to add them.
let max_discr = (variant_fields.len() - 1) as u128;
let discr_int = Integer::fit_unsigned(max_discr);
let tag = Scalar::Initialized {
value: Primitive::Int(discr_int, /* signed = */ false),
valid_range: WrappingRange { start: 0, end: max_discr },
};
let promoted_layouts = ineligible_locals.iter().map(|local| local_layouts[local]);
prefix_layouts.push(tag_to_layout(tag));
prefix_layouts.extend(promoted_layouts);
let prefix =
calc.univariant(&prefix_layouts, &ReprOptions::default(), StructKind::AlwaysSized)?;
let (prefix_size, prefix_align) = (prefix.size, prefix.align);
// Split the prefix layout into the "outer" fields (upvars and
// discriminant) and the "promoted" fields. Promoted fields will
// get included in each variant that requested them in
// CoroutineLayout.
debug!("prefix = {:#?}", prefix);
let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
FieldsShape::Arbitrary { mut offsets, memory_index } => {
let mut inverse_memory_index = memory_index.invert_bijective_mapping();
// "a" (`0..b_start`) and "b" (`b_start..`) correspond to
// "outer" and "promoted" fields respectively.
let b_start = FieldIdx::new(tag_index + 1);
let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
let offsets_a = offsets;
// Disentangle the "a" and "b" components of `inverse_memory_index`
// by preserving the order but keeping only one disjoint "half" each.
// FIXME(eddyb) build a better abstraction for permutations, if possible.
let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
.iter()
.filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
.collect();
inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
let inverse_memory_index_a = inverse_memory_index;
// Since `inverse_memory_index_{a,b}` each only refer to their
// respective fields, they can be safely inverted
let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
let outer_fields =
FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
(outer_fields, offsets_b, memory_index_b)
}
_ => unreachable!(),
};
let mut size = prefix.size;
let mut align = prefix.align;
let variants = variant_fields
.iter_enumerated()
.map(|(index, variant_fields)| {
// Only include overlap-eligible fields when we compute our variant layout.
let variant_only_tys = variant_fields
.iter()
.filter(|local| match assignments[**local] {
Unassigned => unreachable!(),
Assigned(v) if v == index => true,
Assigned(_) => unreachable!("assignment does not match variant"),
Ineligible(_) => false,
})
.map(|local| local_layouts[*local]);
let mut variant = calc.univariant(
&variant_only_tys.collect::<IndexVec<_, _>>(),
&ReprOptions::default(),
StructKind::Prefixed(prefix_size, prefix_align.abi),
)?;
variant.variants = Variants::Single { index };
let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
unreachable!();
};
// Now, stitch the promoted and variant-only fields back together in
// the order they are mentioned by our CoroutineLayout.
// Because we only use some subset (that can differ between variants)
// of the promoted fields, we can't just pick those elements of the
// `promoted_memory_index` (as we'd end up with gaps).
// So instead, we build an "inverse memory_index", as if all of the
// promoted fields were being used, but leave the elements not in the
// subset as `invalid_field_idx`, which we can filter out later to
// obtain a valid (bijective) mapping.
let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
let mut combined_inverse_memory_index =
IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
let combined_offsets = variant_fields
.iter_enumerated()
.map(|(i, local)| {
let (offset, memory_index) = match assignments[*local] {
Unassigned => unreachable!(),
Assigned(_) => {
let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
(offset, promoted_memory_index.len() as u32 + memory_index)
}
Ineligible(field_idx) => {
let field_idx = field_idx.unwrap();
(promoted_offsets[field_idx], promoted_memory_index[field_idx])
}
};
combined_inverse_memory_index[memory_index] = i;
offset
})
.collect();
// Remove the unused slots and invert the mapping to obtain the
// combined `memory_index` (also see previous comment).
combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
variant.fields = FieldsShape::Arbitrary {
offsets: combined_offsets,
memory_index: combined_memory_index,
};
size = size.max(variant.size);
align = align.max(variant.align);
Ok(variant)
})
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
size = size.align_to(align.abi);
let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited());
let abi = BackendRepr::Memory { sized: true };
Ok(LayoutData {
variants: Variants::Multiple {
tag,
tag_encoding: TagEncoding::Direct,
tag_field: tag_index,
variants,
},
fields: outer_fields,
backend_repr: abi,
// Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
// self-referentiality), getting the discriminant can cause aliasing violations.
// `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
// would do the same for us here.
// See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>.
// FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`.
largest_niche: None,
uninhabited,
size,
align,
max_repr_align: None,
unadjusted_abi_align: align.abi,
randomization_seed: Default::default(),
})
}

View File

@ -1,17 +1,13 @@
use std::fmt::Debug;
use std::iter;
use hir::def_id::DefId;
use rustc_abi::Integer::{I8, I32};
use rustc_abi::Primitive::{self, Float, Int, Pointer};
use rustc_abi::{
AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Integer,
Layout, LayoutCalculator, LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size,
StructKind, TagEncoding, VariantIdx, Variants, WrappingRange,
AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Layout,
LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding,
VariantIdx, Variants, WrappingRange,
};
use rustc_hashes::Hash64;
use rustc_index::bit_set::{BitMatrix, DenseBitSet};
use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_index::IndexVec;
use rustc_middle::bug;
use rustc_middle::query::Providers;
use rustc_middle::ty::layout::{
@ -23,7 +19,7 @@ use rustc_middle::ty::{
};
use rustc_session::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo};
use rustc_span::{Symbol, sym};
use tracing::{debug, instrument, trace};
use tracing::{debug, instrument};
use {rustc_abi as abi, rustc_hir as hir};
use crate::errors::{NonPrimitiveSimdType, OversizedSimdType, ZeroLengthSimdType};
@ -403,23 +399,24 @@ fn layout_of_uncached<'tcx>(
.map(|ty| cx.layout_of(ty))
.try_collect::<IndexVec<_, _>>()?;
let layout = coroutine_layout(
&cx.calc,
&local_layouts,
prefix_layouts,
&info.variant_fields,
&info.storage_conflicts,
|tag| TyAndLayout {
ty: tag.primitive().to_ty(tcx),
layout: tcx.mk_layout(LayoutData::scalar(cx, tag)),
},
)
.map(|mut layout| {
// this is similar to how ReprOptions populates its field_shuffle_seed
layout.randomization_seed = tcx.def_path_hash(def_id).0.to_smaller_hash();
debug!("coroutine layout ({:?}): {:#?}", ty, layout);
layout
});
let layout = cx
.calc
.coroutine(
&local_layouts,
prefix_layouts,
&info.variant_fields,
&info.storage_conflicts,
|tag| TyAndLayout {
ty: tag.primitive().to_ty(tcx),
layout: tcx.mk_layout(LayoutData::scalar(cx, tag)),
},
)
.map(|mut layout| {
// this is similar to how ReprOptions populates its field_shuffle_seed
layout.randomization_seed = tcx.def_path_hash(def_id).0.to_smaller_hash();
debug!("coroutine layout ({:?}): {:#?}", ty, layout);
layout
});
map_layout(layout)?
}
@ -614,314 +611,6 @@ fn layout_of_uncached<'tcx>(
})
}
/// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
#[derive(Clone, Debug, PartialEq)]
enum SavedLocalEligibility<VariantIdx, FieldIdx> {
Unassigned,
Assigned(VariantIdx),
Ineligible(Option<FieldIdx>),
}
// When laying out coroutines, we divide our saved local fields into two
// categories: overlap-eligible and overlap-ineligible.
//
// Those fields which are ineligible for overlap go in a "prefix" at the
// beginning of the layout, and always have space reserved for them.
//
// Overlap-eligible fields are only assigned to one variant, so we lay
// those fields out for each variant and put them right after the
// prefix.
//
// Finally, in the layout details, we point to the fields from the
// variants they are assigned to. It is possible for some fields to be
// included in multiple variants. No field ever "moves around" in the
// layout; its offset is always the same.
//
// Also included in the layout are the upvars and the discriminant.
// These are included as fields on the "outer" layout; they are not part
// of any variant.
/// Compute the eligibility and assignment of each local.
fn coroutine_saved_local_eligibility<VariantIdx: Idx, FieldIdx: Idx, LocalIdx: Idx>(
nb_locals: usize,
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
) -> (DenseBitSet<LocalIdx>, IndexVec<LocalIdx, SavedLocalEligibility<VariantIdx, FieldIdx>>) {
use SavedLocalEligibility::*;
let mut assignments: IndexVec<LocalIdx, _> = IndexVec::from_elem_n(Unassigned, nb_locals);
// The saved locals not eligible for overlap. These will get
// "promoted" to the prefix of our coroutine.
let mut ineligible_locals = DenseBitSet::new_empty(nb_locals);
// Figure out which of our saved locals are fields in only
// one variant. The rest are deemed ineligible for overlap.
for (variant_index, fields) in variant_fields.iter_enumerated() {
for local in fields {
match assignments[*local] {
Unassigned => {
assignments[*local] = Assigned(variant_index);
}
Assigned(idx) => {
// We've already seen this local at another suspension
// point, so it is no longer a candidate.
trace!(
"removing local {:?} in >1 variant ({:?}, {:?})",
local, variant_index, idx
);
ineligible_locals.insert(*local);
assignments[*local] = Ineligible(None);
}
Ineligible(_) => {}
}
}
}
// Next, check every pair of eligible locals to see if they
// conflict.
for local_a in storage_conflicts.rows() {
let conflicts_a = storage_conflicts.count(local_a);
if ineligible_locals.contains(local_a) {
continue;
}
for local_b in storage_conflicts.iter(local_a) {
// local_a and local_b are storage live at the same time, therefore they
// cannot overlap in the coroutine layout. The only way to guarantee
// this is if they are in the same variant, or one is ineligible
// (which means it is stored in every variant).
if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
continue;
}
// If they conflict, we will choose one to make ineligible.
// This is not always optimal; it's just a greedy heuristic that
// seems to produce good results most of the time.
let conflicts_b = storage_conflicts.count(local_b);
let (remove, other) =
if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
ineligible_locals.insert(remove);
assignments[remove] = Ineligible(None);
trace!("removing local {:?} due to conflict with {:?}", remove, other);
}
}
// Count the number of variants in use. If only one of them, then it is
// impossible to overlap any locals in our layout. In this case it's
// always better to make the remaining locals ineligible, so we can
// lay them out with the other locals in the prefix and eliminate
// unnecessary padding bytes.
{
let mut used_variants = DenseBitSet::new_empty(variant_fields.len());
for assignment in &assignments {
if let Assigned(idx) = assignment {
used_variants.insert(*idx);
}
}
if used_variants.count() < 2 {
for assignment in assignments.iter_mut() {
*assignment = Ineligible(None);
}
ineligible_locals.insert_all();
}
}
// Write down the order of our locals that will be promoted to the prefix.
{
for (idx, local) in ineligible_locals.iter().enumerate() {
assignments[local] = Ineligible(Some(FieldIdx::new(idx)));
}
}
debug!("coroutine saved local assignments: {:?}", assignments);
(ineligible_locals, assignments)
}
/// Compute the full coroutine layout.
fn coroutine_layout<
'a,
F: core::ops::Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + core::fmt::Debug + Copy,
VariantIdx: Idx,
FieldIdx: Idx,
LocalIdx: Idx,
>(
calc: &LayoutCalculator<impl HasDataLayout>,
local_layouts: &IndexSlice<LocalIdx, F>,
mut prefix_layouts: IndexVec<FieldIdx, F>,
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
tag_to_layout: impl Fn(Scalar) -> F,
) -> Result<LayoutData<FieldIdx, VariantIdx>, LayoutCalculatorError<F>> {
use SavedLocalEligibility::*;
let (ineligible_locals, assignments) =
coroutine_saved_local_eligibility(local_layouts.len(), variant_fields, storage_conflicts);
// Build a prefix layout, including "promoting" all ineligible
// locals as part of the prefix. We compute the layout of all of
// these fields at once to get optimal packing.
let tag_index = prefix_layouts.len();
// `variant_fields` already accounts for the reserved variants, so no need to add them.
let max_discr = (variant_fields.len() - 1) as u128;
let discr_int = Integer::fit_unsigned(max_discr);
let tag = Scalar::Initialized {
value: Primitive::Int(discr_int, /* signed = */ false),
valid_range: WrappingRange { start: 0, end: max_discr },
};
let promoted_layouts = ineligible_locals.iter().map(|local| local_layouts[local]);
prefix_layouts.push(tag_to_layout(tag));
prefix_layouts.extend(promoted_layouts);
let prefix =
calc.univariant(&prefix_layouts, &ReprOptions::default(), StructKind::AlwaysSized)?;
let (prefix_size, prefix_align) = (prefix.size, prefix.align);
// Split the prefix layout into the "outer" fields (upvars and
// discriminant) and the "promoted" fields. Promoted fields will
// get included in each variant that requested them in
// CoroutineLayout.
debug!("prefix = {:#?}", prefix);
let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
FieldsShape::Arbitrary { mut offsets, memory_index } => {
let mut inverse_memory_index = memory_index.invert_bijective_mapping();
// "a" (`0..b_start`) and "b" (`b_start..`) correspond to
// "outer" and "promoted" fields respectively.
let b_start = FieldIdx::new(tag_index + 1);
let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
let offsets_a = offsets;
// Disentangle the "a" and "b" components of `inverse_memory_index`
// by preserving the order but keeping only one disjoint "half" each.
// FIXME(eddyb) build a better abstraction for permutations, if possible.
let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
.iter()
.filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
.collect();
inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
let inverse_memory_index_a = inverse_memory_index;
// Since `inverse_memory_index_{a,b}` each only refer to their
// respective fields, they can be safely inverted
let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
let outer_fields =
FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
(outer_fields, offsets_b, memory_index_b)
}
_ => unreachable!(),
};
let mut size = prefix.size;
let mut align = prefix.align;
let variants = variant_fields
.iter_enumerated()
.map(|(index, variant_fields)| {
// Only include overlap-eligible fields when we compute our variant layout.
let variant_only_tys = variant_fields
.iter()
.filter(|local| match assignments[**local] {
Unassigned => unreachable!(),
Assigned(v) if v == index => true,
Assigned(_) => unreachable!("assignment does not match variant"),
Ineligible(_) => false,
})
.map(|local| local_layouts[*local]);
let mut variant = calc.univariant(
&variant_only_tys.collect::<IndexVec<_, _>>(),
&ReprOptions::default(),
StructKind::Prefixed(prefix_size, prefix_align.abi),
)?;
variant.variants = Variants::Single { index };
let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
unreachable!();
};
// Now, stitch the promoted and variant-only fields back together in
// the order they are mentioned by our CoroutineLayout.
// Because we only use some subset (that can differ between variants)
// of the promoted fields, we can't just pick those elements of the
// `promoted_memory_index` (as we'd end up with gaps).
// So instead, we build an "inverse memory_index", as if all of the
// promoted fields were being used, but leave the elements not in the
// subset as `invalid_field_idx`, which we can filter out later to
// obtain a valid (bijective) mapping.
let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
let mut combined_inverse_memory_index =
IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
let combined_offsets = variant_fields
.iter_enumerated()
.map(|(i, local)| {
let (offset, memory_index) = match assignments[*local] {
Unassigned => unreachable!(),
Assigned(_) => {
let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
(offset, promoted_memory_index.len() as u32 + memory_index)
}
Ineligible(field_idx) => {
let field_idx = field_idx.unwrap();
(promoted_offsets[field_idx], promoted_memory_index[field_idx])
}
};
combined_inverse_memory_index[memory_index] = i;
offset
})
.collect();
// Remove the unused slots and invert the mapping to obtain the
// combined `memory_index` (also see previous comment).
combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
variant.fields = FieldsShape::Arbitrary {
offsets: combined_offsets,
memory_index: combined_memory_index,
};
size = size.max(variant.size);
align = align.max(variant.align);
Ok(variant)
})
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
size = size.align_to(align.abi);
let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited());
let abi = BackendRepr::Memory { sized: true };
Ok(LayoutData {
variants: Variants::Multiple {
tag,
tag_encoding: TagEncoding::Direct,
tag_field: tag_index,
variants,
},
fields: outer_fields,
backend_repr: abi,
// Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
// self-referentiality), getting the discriminant can cause aliasing violations.
// `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
// would do the same for us here.
// See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>.
// FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`.
largest_niche: None,
uninhabited,
size,
align,
max_repr_align: None,
unadjusted_abi_align: align.abi,
randomization_seed: Default::default(),
})
}
fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx>, layout: TyAndLayout<'tcx>) {
// Ignore layouts that are done with non-empty environments or
// non-monomorphic layouts, as the user only wants to see the stuff