interpret: refactor projection handling code

Moves our projection handling code into a common file, and avoids the use of a
general mplace-based fallback function by have more specialized implementations.

mplace_index (and the other slice-related functions) could be more efficient by
copy-pasting the body of operand_index. Or we could do some trait magic to share
the code between them. But for now this is probably fine.
This commit is contained in:
Ralf Jung 2022-07-04 08:48:05 -04:00
parent f893495e3d
commit ab225ade1e
7 changed files with 531 additions and 421 deletions

View File

@ -9,6 +9,7 @@ mod memory;
mod operand; mod operand;
mod operator; mod operator;
mod place; mod place;
mod projection;
mod step; mod step;
mod terminator; mod terminator;
mod traits; mod traits;

View File

@ -1,7 +1,6 @@
//! Functions concerning immediate values and operands, and reading from operands. //! Functions concerning immediate values and operands, and reading from operands.
//! All high-level functions to read from memory work on operands as sources. //! All high-level functions to read from memory work on operands as sources.
use std::convert::TryFrom;
use std::fmt::Write; use std::fmt::Write;
use rustc_hir::def::Namespace; use rustc_hir::def::Namespace;
@ -15,7 +14,7 @@ use rustc_target::abi::{VariantIdx, Variants};
use super::{ use super::{
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId, alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Place, PlaceTy, Pointer,
PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit, PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
}; };
@ -253,6 +252,11 @@ impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> {
ImmTy { imm, layout } ImmTy { imm, layout }
} }
#[inline]
pub fn uninit(layout: TyAndLayout<'tcx>) -> Self {
ImmTy { imm: Immediate::Uninit, layout }
}
#[inline] #[inline]
pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> { pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout)) Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
@ -280,6 +284,41 @@ impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> {
} }
} }
impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> {
pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
if self.layout.is_unsized() {
// There are no unsized immediates.
self.assert_mem_place().len(cx)
} else {
match self.layout.fields {
abi::FieldsShape::Array { count, .. } => Ok(count),
_ => bug!("len not supported on sized type {:?}", self.layout.ty),
}
}
}
pub fn offset(
&self,
offset: Size,
meta: MemPlaceMeta<Tag>,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
match self.try_as_mplace() {
Ok(mplace) => Ok(mplace.offset(offset, meta, layout, cx)?.into()),
Err(imm) => {
assert!(
matches!(*imm, Immediate::Uninit),
"Scalar/ScalarPair cannot be offset into"
);
assert!(!meta.has_meta()); // no place to store metadata here
// Every part of an uninit is uninit.
Ok(ImmTy::uninit(layout).into())
}
}
}
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`. /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
/// Returns `None` if the layout does not permit loading this as a value. /// Returns `None` if the layout does not permit loading this as a value.
@ -296,11 +335,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
let Some(alloc) = self.get_place_alloc(mplace)? else { let Some(alloc) = self.get_place_alloc(mplace)? else {
return Ok(Some(ImmTy { // zero-sized type can be left uninit
// zero-sized type can be left uninit return Ok(Some(ImmTy::uninit(mplace.layout)));
imm: Immediate::Uninit,
layout: mplace.layout,
}));
}; };
// It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point. // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
@ -367,6 +403,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// This flag exists only for validity checking. /// This flag exists only for validity checking.
/// ///
/// This is an internal function that should not usually be used; call `read_immediate` instead. /// This is an internal function that should not usually be used; call `read_immediate` instead.
/// ConstProp needs it, though.
pub fn read_immediate_raw( pub fn read_immediate_raw(
&self, &self,
src: &OpTy<'tcx, M::PointerTag>, src: &OpTy<'tcx, M::PointerTag>,
@ -421,123 +458,28 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(str) Ok(str)
} }
/// Projection functions
pub fn operand_field(
&self,
op: &OpTy<'tcx, M::PointerTag>,
field: usize,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let base = match op.try_as_mplace() {
Ok(ref mplace) => {
// We can reuse the mplace field computation logic for indirect operands.
let field = self.mplace_field(mplace, field)?;
return Ok(field.into());
}
Err(value) => value,
};
let field_layout = base.layout.field(self, field);
let offset = base.layout.fields.offset(field);
// This makes several assumptions about what layouts we will encounter; we match what
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
let field_val: Immediate<_> = match (*base, base.layout.abi) {
// the field contains no information, can be left uninit
_ if field_layout.is_zst() => Immediate::Uninit,
// the field covers the entire type
_ if field_layout.size == base.layout.size => {
assert!(match (base.layout.abi, field_layout.abi) {
(Abi::Scalar(..), Abi::Scalar(..)) => true,
(Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
_ => false,
});
assert!(offset.bytes() == 0);
*base
}
// extract fields from types with `ScalarPair` ABI
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
assert!(matches!(field_layout.abi, Abi::Scalar(..)));
Immediate::from(if offset.bytes() == 0 {
debug_assert_eq!(field_layout.size, a.size(self));
a_val
} else {
debug_assert_eq!(offset, a.size(self).align_to(b.align(self).abi));
debug_assert_eq!(field_layout.size, b.size(self));
b_val
})
}
_ => span_bug!(
self.cur_span(),
"invalid field access on immediate {}, layout {:#?}",
base,
base.layout
),
};
Ok(OpTy { op: Operand::Immediate(field_val), layout: field_layout, align: None })
}
pub fn operand_index(
&self,
op: &OpTy<'tcx, M::PointerTag>,
index: u64,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
if let Ok(index) = usize::try_from(index) {
// We can just treat this as a field.
self.operand_field(op, index)
} else {
// Indexing into a big array. This must be an mplace.
let mplace = op.assert_mem_place();
Ok(self.mplace_index(&mplace, index)?.into())
}
}
pub fn operand_downcast(
&self,
op: &OpTy<'tcx, M::PointerTag>,
variant: VariantIdx,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
Ok(match op.try_as_mplace() {
Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
Err(..) => {
// Downcasts only change the layout.
// (In particular, no check about whether this is even the active variant -- that's by design,
// see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
let layout = op.layout.for_variant(self, variant);
OpTy { layout, ..*op }
}
})
}
#[instrument(skip(self), level = "debug")]
pub fn operand_projection(
&self,
base: &OpTy<'tcx, M::PointerTag>,
proj_elem: mir::PlaceElem<'tcx>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
Field(field, _) => self.operand_field(base, field.index())?,
Downcast(_, variant) => self.operand_downcast(base, variant)?,
Deref => self.deref_operand(base)?.into(),
Subslice { .. } | ConstantIndex { .. } | Index(_) => {
// The rest should only occur as mplace, we do not use Immediates for types
// allowing such operations. This matches place_projection forcing an allocation.
let mplace = base.assert_mem_place();
self.mplace_projection(&mplace, proj_elem)?.into()
}
})
}
/// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements. /// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements.
/// Also returns the number of elements. /// Also returns the number of elements.
///
/// Can (but does not always) trigger UB if `op` is uninitialized.
pub fn operand_to_simd( pub fn operand_to_simd(
&self, &self,
base: &OpTy<'tcx, M::PointerTag>, op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> { ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> {
// Basically we just transmute this place into an array following simd_size_and_type. // Basically we just transmute this place into an array following simd_size_and_type.
// This only works in memory, but repr(simd) types should never be immediates anyway. // This only works in memory, but repr(simd) types should never be immediates anyway.
assert!(base.layout.ty.is_simd()); assert!(op.layout.ty.is_simd());
self.mplace_to_simd(&base.assert_mem_place()) match op.try_as_mplace() {
Ok(mplace) => self.mplace_to_simd(&mplace),
Err(imm) => match *imm {
Immediate::Uninit => {
throw_ub!(InvalidUninitBytes(None))
}
Immediate::Scalar(..) | Immediate::ScalarPair(..) => {
bug!("arrays/slices can never have Scalar/ScalarPair layout")
}
},
}
} }
/// Read from a local. Will not actually access the local if reading from a ZST. /// Read from a local. Will not actually access the local if reading from a ZST.
@ -598,14 +540,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
trace!("eval_place_to_op: got {:?}", *op); trace!("eval_place_to_op: got {:?}", *op);
// Sanity-check the type we ended up with. // Sanity-check the type we ended up with.
debug_assert!(mir_assign_valid_types( debug_assert!(
*self.tcx, mir_assign_valid_types(
self.param_env, *self.tcx,
self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions( self.param_env,
place.ty(&self.frame().body.local_decls, *self.tcx).ty self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
)?)?, place.ty(&self.frame().body.local_decls, *self.tcx).ty
op.layout, )?)?,
)); op.layout,
),
"eval_place of a MIR place with type {:?} produced an interpreter operand with type {:?}",
place.ty(&self.frame().body.local_decls, *self.tcx).ty,
op.layout.ty,
);
Ok(op) Ok(op)
} }

View File

@ -2,17 +2,14 @@
//! into a place. //! into a place.
//! All high-level functions to write to memory work on places as destinations. //! All high-level functions to write to memory work on places as destinations.
use std::convert::TryFrom;
use std::hash::Hash; use std::hash::Hash;
use rustc_ast::Mutability; use rustc_ast::Mutability;
use rustc_macros::HashStable; use rustc_macros::HashStable;
use rustc_middle::mir; use rustc_middle::mir;
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout}; use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
use rustc_middle::ty::{self, Ty}; use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding, VariantIdx};
use rustc_target::abi::{
Abi, Align, FieldsShape, HasDataLayout, Size, TagEncoding, VariantIdx, Variants,
};
use super::{ use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
@ -46,7 +43,7 @@ impl<Tag: Provenance> MemPlaceMeta<Tag> {
} }
} }
} }
fn has_meta(self) -> bool { pub fn has_meta(self) -> bool {
match self { match self {
Self::Meta(_) => true, Self::Meta(_) => true,
Self::None | Self::Poison => false, Self::None | Self::Poison => false,
@ -188,6 +185,7 @@ impl<Tag: Provenance> Place<Tag> {
/// Asserts that this points to some local variable. /// Asserts that this points to some local variable.
/// Returns the frame idx and the variable idx. /// Returns the frame idx and the variable idx.
#[inline] #[inline]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn assert_local(&self) -> (usize, mir::Local) { pub fn assert_local(&self) -> (usize, mir::Local) {
match self { match self {
Place::Local { frame, local } => (*frame, *local), Place::Local { frame, local } => (*frame, *local),
@ -250,7 +248,7 @@ impl<'tcx, Tag: Provenance> MPlaceTy<'tcx, Tag> {
// Go through the layout. There are lots of types that support a length, // Go through the layout. There are lots of types that support a length,
// e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!) // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
match self.layout.fields { match self.layout.fields {
FieldsShape::Array { count, .. } => Ok(count), abi::FieldsShape::Array { count, .. } => Ok(count),
_ => bug!("len not supported on sized type {:?}", self.layout.ty), _ => bug!("len not supported on sized type {:?}", self.layout.ty),
} }
} }
@ -281,6 +279,7 @@ impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> {
} }
#[inline(always)] #[inline(always)]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
/// Note: do not call `as_ref` on the resulting place. This function should only be used to /// Note: do not call `as_ref` on the resulting place. This function should only be used to
/// read from the resulting mplace, not to get its address back. /// read from the resulting mplace, not to get its address back.
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag> { pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag> {
@ -298,16 +297,16 @@ impl<'tcx, Tag: Provenance> PlaceTy<'tcx, Tag> {
} }
} }
#[inline] #[inline(always)]
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag> { #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
self.try_as_mplace().unwrap() self.try_as_mplace().unwrap()
} }
} }
// separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M> impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
where where
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
Tag: Provenance + Eq + Hash + 'static, Tag: Provenance + Eq + Hash + 'static,
M: Machine<'mir, 'tcx, PointerTag = Tag>, M: Machine<'mir, 'tcx, PointerTag = Tag>,
{ {
@ -392,276 +391,29 @@ where
Ok(()) Ok(())
} }
/// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
/// always possible without allocating, so it can take `&self`. Also return the field's layout.
/// This supports both struct and array fields.
///
/// This also works for arrays, but then the `usize` index type is restricting.
/// For indexing into arrays, use `mplace_index`.
#[inline(always)]
pub fn mplace_field(
&self,
base: &MPlaceTy<'tcx, M::PointerTag>,
field: usize,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let offset = base.layout.fields.offset(field);
let field_layout = base.layout.field(self, field);
// Offset may need adjustment for unsized fields.
let (meta, offset) = if field_layout.is_unsized() {
// Re-use parent metadata to determine dynamic field layout.
// With custom DSTS, this *will* execute user-defined code, but the same
// happens at run-time so that's okay.
match self.size_and_align_of(&base.meta, &field_layout)? {
Some((_, align)) => (base.meta, offset.align_to(align)),
None => {
// For unsized types with an extern type tail we perform no adjustments.
// NOTE: keep this in sync with `PlaceRef::project_field` in the codegen backend.
assert!(matches!(base.meta, MemPlaceMeta::None));
(base.meta, offset)
}
}
} else {
// base.meta could be present; we might be accessing a sized field of an unsized
// struct.
(MemPlaceMeta::None, offset)
};
// We do not look at `base.layout.align` nor `field_layout.align`, unlike
// codegen -- mostly to see if we can get away with that
base.offset(offset, meta, field_layout, self)
}
/// Index into an array.
#[inline(always)]
pub fn mplace_index(
&self,
base: &MPlaceTy<'tcx, M::PointerTag>,
index: u64,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Not using the layout method because we want to compute on u64
match base.layout.fields {
FieldsShape::Array { stride, .. } => {
let len = base.len(self)?;
if index >= len {
// This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len, index });
}
let offset = stride * index; // `Size` multiplication
// All fields have the same layout.
let field_layout = base.layout.field(self, 0);
assert!(!field_layout.is_unsized());
base.offset(offset, MemPlaceMeta::None, field_layout, self)
}
_ => span_bug!(
self.cur_span(),
"`mplace_index` called on non-array type {:?}",
base.layout.ty
),
}
}
// Iterates over all fields of an array. Much more efficient than doing the
// same by repeatedly calling `mplace_array`.
pub(super) fn mplace_array_fields<'a>(
&self,
base: &'a MPlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a>
{
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let FieldsShape::Array { stride, .. } = base.layout.fields else {
span_bug!(self.cur_span(), "mplace_array_fields: expected an array layout");
};
let layout = base.layout.field(self, 0);
let dl = &self.tcx.data_layout;
// `Size` multiplication
Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl)))
}
fn mplace_subslice(
&self,
base: &MPlaceTy<'tcx, M::PointerTag>,
from: u64,
to: u64,
from_end: bool,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let actual_to = if from_end {
if from.checked_add(to).map_or(true, |to| to > len) {
// This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
}
len.checked_sub(to).unwrap()
} else {
to
};
// Not using layout method because that works with usize, and does not work with slices
// (that have count 0 in their layout).
let from_offset = match base.layout.fields {
FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
_ => {
span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout)
}
};
// Compute meta and new layout
let inner_len = actual_to.checked_sub(from).unwrap();
let (meta, ty) = match base.layout.ty.kind() {
// It is not nice to match on the type, but that seems to be the only way to
// implement this.
ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(*inner, inner_len)),
ty::Slice(..) => {
let len = Scalar::from_machine_usize(inner_len, self);
(MemPlaceMeta::Meta(len), base.layout.ty)
}
_ => {
span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty)
}
};
let layout = self.layout_of(ty)?;
base.offset(from_offset, meta, layout, self)
}
pub(crate) fn mplace_downcast(
&self,
base: &MPlaceTy<'tcx, M::PointerTag>,
variant: VariantIdx,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout.
// (In particular, no check about whether this is even the active variant -- that's by design,
// see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
assert!(!base.meta.has_meta());
Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..*base })
}
/// Project into an mplace
#[instrument(skip(self), level = "debug")]
pub(super) fn mplace_projection(
&self,
base: &MPlaceTy<'tcx, M::PointerTag>,
proj_elem: mir::PlaceElem<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
Field(field, _) => self.mplace_field(base, field.index())?,
Downcast(_, variant) => self.mplace_downcast(base, variant)?,
Deref => self.deref_operand(&base.into())?,
Index(local) => {
let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.local_to_op(self.frame(), local, Some(layout))?;
let n = self.read_scalar(&n)?;
let n = n.to_machine_usize(self)?;
self.mplace_index(base, n)?
}
ConstantIndex { offset, min_length, from_end } => {
let n = base.len(self)?;
if n < min_length {
// This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len: min_length, index: n });
}
let index = if from_end {
assert!(0 < offset && offset <= min_length);
n.checked_sub(offset).unwrap()
} else {
assert!(offset < min_length);
offset
};
self.mplace_index(base, index)?
}
Subslice { from, to, from_end } => self.mplace_subslice(base, from, to, from_end)?,
})
}
/// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements. /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
/// Also returns the number of elements. /// Also returns the number of elements.
pub fn mplace_to_simd( pub fn mplace_to_simd(
&self, &self,
base: &MPlaceTy<'tcx, M::PointerTag>, mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> { ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> {
// Basically we just transmute this place into an array following simd_size_and_type. // Basically we just transmute this place into an array following simd_size_and_type.
// (Transmuting is okay since this is an in-memory place. We also double-check the size // (Transmuting is okay since this is an in-memory place. We also double-check the size
// stays the same.) // stays the same.)
let (len, e_ty) = base.layout.ty.simd_size_and_type(*self.tcx); let (len, e_ty) = mplace.layout.ty.simd_size_and_type(*self.tcx);
let array = self.tcx.mk_array(e_ty, len); let array = self.tcx.mk_array(e_ty, len);
let layout = self.layout_of(array)?; let layout = self.layout_of(array)?;
assert_eq!(layout.size, base.layout.size); assert_eq!(layout.size, mplace.layout.size);
Ok((MPlaceTy { layout, ..*base }, len)) Ok((MPlaceTy { layout, ..*mplace }, len))
}
/// Gets the place of a field inside the place, and also the field's type.
/// Just a convenience function, but used quite a bit.
/// This is the only projection that might have a side-effect: We cannot project
/// into the field of a local `ScalarPair`, we have to first allocate it.
#[instrument(skip(self), level = "debug")]
pub fn place_field(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
field: usize,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// FIXME: We could try to be smarter and avoid allocation for fields that span the
// entire place.
let mplace = self.force_allocation(base)?;
Ok(self.mplace_field(&mplace, field)?.into())
}
pub fn place_index(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
index: u64,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
let mplace = self.force_allocation(base)?;
Ok(self.mplace_index(&mplace, index)?.into())
}
pub fn place_downcast(
&self,
base: &PlaceTy<'tcx, M::PointerTag>,
variant: VariantIdx,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// Downcast just changes the layout
Ok(match base.try_as_mplace() {
Ok(mplace) => self.mplace_downcast(&mplace, variant)?.into(),
Err(..) => {
let layout = base.layout.for_variant(self, variant);
PlaceTy { layout, ..*base }
}
})
}
/// Projects into a place.
pub fn place_projection(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
&proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
Field(field, _) => self.place_field(base, field.index())?,
Downcast(_, variant) => self.place_downcast(base, variant)?,
Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
// For the other variants, we have to force an allocation.
// This matches `operand_projection`.
Subslice { .. } | ConstantIndex { .. } | Index(_) => {
let mplace = self.force_allocation(base)?;
self.mplace_projection(&mplace, proj_elem)?.into()
}
})
} }
/// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements. /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
/// Also returns the number of elements. /// Also returns the number of elements.
pub fn place_to_simd( pub fn place_to_simd(
&mut self, &mut self,
base: &PlaceTy<'tcx, M::PointerTag>, place: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> { ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> {
let mplace = self.force_allocation(base)?; let mplace = self.force_allocation(place)?;
self.mplace_to_simd(&mplace) self.mplace_to_simd(&mplace)
} }
@ -682,13 +434,14 @@ where
&mut self, &mut self,
place: mir::Place<'tcx>, place: mir::Place<'tcx>,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
let mut place_ty = self.local_to_place(self.frame_idx(), place.local)?; let base_place = self.local_to_place(self.frame_idx(), place.local)?;
for elem in place.projection.iter() { let final_place = place
place_ty = self.place_projection(&place_ty, &elem)? .projection
} .iter()
.try_fold(base_place, |op, elem| self.place_projection(&op, elem))?;
trace!("{:?}", self.dump_place(place_ty.place)); trace!("{:?}", self.dump_place(final_place.place));
// Sanity-check the type we ended up with. // Sanity-check the type we ended up with.
debug_assert!( debug_assert!(
mir_assign_valid_types( mir_assign_valid_types(
@ -697,13 +450,13 @@ where
self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions( self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
place.ty(&self.frame().body.local_decls, *self.tcx).ty place.ty(&self.frame().body.local_decls, *self.tcx).ty
)?)?, )?)?,
place_ty.layout, final_place.layout,
), ),
"eval_place of a MIR place with type {:?} produced an interpret place with type {:?}", "eval_place of a MIR place with type {:?} produced an interpreter place with type {:?}",
place.ty(&self.frame().body.local_decls, *self.tcx).ty, place.ty(&self.frame().body.local_decls, *self.tcx).ty,
place_ty.layout.ty, final_place.layout.ty,
); );
Ok(place_ty) Ok(final_place)
} }
/// Write an immediate to a place /// Write an immediate to a place
@ -1058,10 +811,10 @@ where
} }
match dest.layout.variants { match dest.layout.variants {
Variants::Single { index } => { abi::Variants::Single { index } => {
assert_eq!(index, variant_index); assert_eq!(index, variant_index);
} }
Variants::Multiple { abi::Variants::Multiple {
tag_encoding: TagEncoding::Direct, tag_encoding: TagEncoding::Direct,
tag: tag_layout, tag: tag_layout,
tag_field, tag_field,
@ -1082,7 +835,7 @@ where
let tag_dest = self.place_field(dest, tag_field)?; let tag_dest = self.place_field(dest, tag_field)?;
self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?; self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
} }
Variants::Multiple { abi::Variants::Multiple {
tag_encoding: tag_encoding:
TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start }, TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
tag: tag_layout, tag: tag_layout,

View File

@ -0,0 +1,393 @@
//! This file implements "place projections"; basically a symmetric API for 3 types: MPlaceTy, OpTy, PlaceTy.
//!
//! OpTy and PlaceTy genrally work by "let's see if we are actually an MPlaceTy, and do something custom if not".
//! For PlaceTy, the custom thing is basically always to call `force_allocation` and then use the MPlaceTy logic anyway.
//! For OpTy, the custom thing on field pojections has to be pretty clever (since `Operand::Immediate` can have fields),
//! but for array/slice operations it only has to worry about `Operand::Uninit`. That makes the value part trivial,
//! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
//! implement the logic on OpTy, and MPlaceTy calls that.
use std::hash::Hash;
use rustc_middle::mir;
use rustc_middle::ty;
use rustc_middle::ty::layout::LayoutOf;
use rustc_target::abi::{self, Abi, VariantIdx};
use super::{
ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, PlaceTy,
Provenance, Scalar,
};
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
where
Tag: Provenance + Eq + Hash + 'static,
M: Machine<'mir, 'tcx, PointerTag = Tag>,
{
//# Field access
/// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
/// always possible without allocating, so it can take `&self`. Also return the field's layout.
/// This supports both struct and array fields.
///
/// This also works for arrays, but then the `usize` index type is restricting.
/// For indexing into arrays, use `mplace_index`.
pub fn mplace_field(
&self,
base: &MPlaceTy<'tcx, M::PointerTag>,
field: usize,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let offset = base.layout.fields.offset(field);
let field_layout = base.layout.field(self, field);
// Offset may need adjustment for unsized fields.
let (meta, offset) = if field_layout.is_unsized() {
// Re-use parent metadata to determine dynamic field layout.
// With custom DSTS, this *will* execute user-defined code, but the same
// happens at run-time so that's okay.
match self.size_and_align_of(&base.meta, &field_layout)? {
Some((_, align)) => (base.meta, offset.align_to(align)),
None => {
// For unsized types with an extern type tail we perform no adjustments.
// NOTE: keep this in sync with `PlaceRef::project_field` in the codegen backend.
assert!(matches!(base.meta, MemPlaceMeta::None));
(base.meta, offset)
}
}
} else {
// base.meta could be present; we might be accessing a sized field of an unsized
// struct.
(MemPlaceMeta::None, offset)
};
// We do not look at `base.layout.align` nor `field_layout.align`, unlike
// codegen -- mostly to see if we can get away with that
base.offset(offset, meta, field_layout, self)
}
/// Gets the place of a field inside the place, and also the field's type.
/// Just a convenience function, but used quite a bit.
/// This is the only projection that might have a side-effect: We cannot project
/// into the field of a local `ScalarPair`, we have to first allocate it.
pub fn place_field(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
field: usize,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// FIXME: We could try to be smarter and avoid allocation for fields that span the
// entire place.
let base = self.force_allocation(base)?;
Ok(self.mplace_field(&base, field)?.into())
}
pub fn operand_field(
&self,
base: &OpTy<'tcx, M::PointerTag>,
field: usize,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let base = match base.try_as_mplace() {
Ok(ref mplace) => {
// We can reuse the mplace field computation logic for indirect operands.
let field = self.mplace_field(mplace, field)?;
return Ok(field.into());
}
Err(value) => value,
};
let field_layout = base.layout.field(self, field);
let offset = base.layout.fields.offset(field);
// This makes several assumptions about what layouts we will encounter; we match what
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
let field_val: Immediate<_> = match (*base, base.layout.abi) {
// the field contains no information, can be left uninit
_ if field_layout.is_zst() => Immediate::Uninit,
// the field covers the entire type
_ if field_layout.size == base.layout.size => {
assert!(match (base.layout.abi, field_layout.abi) {
(Abi::Scalar(..), Abi::Scalar(..)) => true,
(Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
_ => false,
});
assert!(offset.bytes() == 0);
*base
}
// extract fields from types with `ScalarPair` ABI
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
assert!(matches!(field_layout.abi, Abi::Scalar(..)));
Immediate::from(if offset.bytes() == 0 {
debug_assert_eq!(field_layout.size, a.size(self));
a_val
} else {
debug_assert_eq!(offset, a.size(self).align_to(b.align(self).abi));
debug_assert_eq!(field_layout.size, b.size(self));
b_val
})
}
_ => span_bug!(
self.cur_span(),
"invalid field access on immediate {}, layout {:#?}",
base,
base.layout
),
};
Ok(ImmTy::from_immediate(field_val, field_layout).into())
}
//# Downcasting
pub fn mplace_downcast(
&self,
base: &MPlaceTy<'tcx, M::PointerTag>,
variant: VariantIdx,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout.
// (In particular, no check about whether this is even the active variant -- that's by design,
// see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
assert!(!base.meta.has_meta());
let mut base = *base;
base.layout = base.layout.for_variant(self, variant);
Ok(base)
}
pub fn place_downcast(
&self,
base: &PlaceTy<'tcx, M::PointerTag>,
variant: VariantIdx,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// Downcast just changes the layout
let mut base = *base;
base.layout = base.layout.for_variant(self, variant);
Ok(base)
}
pub fn operand_downcast(
&self,
base: &OpTy<'tcx, M::PointerTag>,
variant: VariantIdx,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// Downcast just changes the layout
let mut base = *base;
base.layout = base.layout.for_variant(self, variant);
Ok(base)
}
//# Slice indexing
#[inline(always)]
pub fn operand_index(
&self,
base: &OpTy<'tcx, M::PointerTag>,
index: u64,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// Not using the layout method because we want to compute on u64
match base.layout.fields {
abi::FieldsShape::Array { stride, count: _ } => {
// `count` is nonsense for slices, use the dynamic length instead.
let len = base.len(self)?;
if index >= len {
// This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len, index });
}
let offset = stride * index; // `Size` multiplication
// All fields have the same layout.
let field_layout = base.layout.field(self, 0);
assert!(!field_layout.is_unsized());
base.offset(offset, MemPlaceMeta::None, field_layout, self)
}
_ => span_bug!(
self.cur_span(),
"`mplace_index` called on non-array type {:?}",
base.layout.ty
),
}
}
// Iterates over all fields of an array. Much more efficient than doing the
// same by repeatedly calling `operand_index`.
pub fn operand_array_fields<'a>(
&self,
base: &'a OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, OpTy<'tcx, Tag>>> + 'a> {
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let abi::FieldsShape::Array { stride, .. } = base.layout.fields else {
span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
};
let layout = base.layout.field(self, 0);
let dl = &self.tcx.data_layout;
// `Size` multiplication
Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl)))
}
/// Index into an array.
pub fn mplace_index(
&self,
base: &MPlaceTy<'tcx, M::PointerTag>,
index: u64,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
Ok(self.operand_index(&base.into(), index)?.assert_mem_place())
}
pub fn place_index(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
index: u64,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// There's not a lot we can do here, since we cannot have a place to a part of a local. If
// we are accessing the only element of a 1-element array, it's still the entire local...
// that doesn't seem worth it.
let base = self.force_allocation(base)?;
Ok(self.mplace_index(&base, index)?.into())
}
//# ConstantIndex support
fn operand_constant_index(
&self,
base: &OpTy<'tcx, M::PointerTag>,
offset: u64,
min_length: u64,
from_end: bool,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let n = base.len(self)?;
if n < min_length {
// This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len: min_length, index: n });
}
let index = if from_end {
assert!(0 < offset && offset <= min_length);
n.checked_sub(offset).unwrap()
} else {
assert!(offset < min_length);
offset
};
self.operand_index(base, index)
}
fn place_constant_index(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
offset: u64,
min_length: u64,
from_end: bool,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
let base = self.force_allocation(base)?;
Ok(self
.operand_constant_index(&base.into(), offset, min_length, from_end)?
.assert_mem_place()
.into())
}
//# Subslicing
fn operand_subslice(
&self,
base: &OpTy<'tcx, M::PointerTag>,
from: u64,
to: u64,
from_end: bool,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let actual_to = if from_end {
if from.checked_add(to).map_or(true, |to| to > len) {
// This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
}
len.checked_sub(to).unwrap()
} else {
to
};
// Not using layout method because that works with usize, and does not work with slices
// (that have count 0 in their layout).
let from_offset = match base.layout.fields {
abi::FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
_ => {
span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout)
}
};
// Compute meta and new layout
let inner_len = actual_to.checked_sub(from).unwrap();
let (meta, ty) = match base.layout.ty.kind() {
// It is not nice to match on the type, but that seems to be the only way to
// implement this.
ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(*inner, inner_len)),
ty::Slice(..) => {
let len = Scalar::from_machine_usize(inner_len, self);
(MemPlaceMeta::Meta(len), base.layout.ty)
}
_ => {
span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty)
}
};
let layout = self.layout_of(ty)?;
base.offset(from_offset, meta, layout, self)
}
pub fn place_subslice(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
from: u64,
to: u64,
from_end: bool,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
let base = self.force_allocation(base)?;
Ok(self.operand_subslice(&base.into(), from, to, from_end)?.assert_mem_place().into())
}
//# Applying a general projection
/// Projects into a place.
#[instrument(skip(self), level = "trace")]
pub fn place_projection(
&mut self,
base: &PlaceTy<'tcx, M::PointerTag>,
proj_elem: mir::PlaceElem<'tcx>,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
Field(field, _) => self.place_field(base, field.index())?,
Downcast(_, variant) => self.place_downcast(base, variant)?,
Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
Index(local) => {
let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.local_to_op(self.frame(), local, Some(layout))?;
let n = self.read_scalar(&n)?.to_machine_usize(self)?;
self.place_index(base, n)?
}
ConstantIndex { offset, min_length, from_end } => {
self.place_constant_index(base, offset, min_length, from_end)?
}
Subslice { from, to, from_end } => self.place_subslice(base, from, to, from_end)?,
})
}
#[instrument(skip(self), level = "trace")]
pub fn operand_projection(
&self,
base: &OpTy<'tcx, M::PointerTag>,
proj_elem: mir::PlaceElem<'tcx>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
Field(field, _) => self.operand_field(base, field.index())?,
Downcast(_, variant) => self.operand_downcast(base, variant)?,
Deref => self.deref_operand(base)?.into(),
Index(local) => {
let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.local_to_op(self.frame(), local, Some(layout))?;
let n = self.read_scalar(&n)?.to_machine_usize(self)?;
self.operand_index(base, n)?
}
ConstantIndex { offset, min_length, from_end } => {
self.operand_constant_index(base, offset, min_length, from_end)?
}
Subslice { from, to, from_end } => self.operand_subslice(base, from, to, from_end)?,
})
}
}

View File

@ -529,7 +529,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let receiver_place = loop { let receiver_place = loop {
match receiver.layout.ty.kind() { match receiver.layout.ty.kind() {
ty::Ref(..) | ty::RawPtr(..) => break self.deref_operand(&receiver)?, ty::Ref(..) | ty::RawPtr(..) => break self.deref_operand(&receiver)?,
ty::Dynamic(..) => break receiver.assert_mem_place(), ty::Dynamic(..) => break receiver.assert_mem_place(), // no immediate unsized values
_ => { _ => {
// Not there yet, search for the only non-ZST field. // Not there yet, search for the only non-ZST field.
let mut non_zst_field = None; let mut non_zst_field = None;

View File

@ -847,6 +847,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
); );
} }
Abi::Scalar(scalar_layout) => { Abi::Scalar(scalar_layout) => {
// We use a 'forced' read because we always need a `Immediate` here
// and treating "partially uninit" as "fully uninit" is fine for us.
let scalar = self.read_immediate_forced(op)?.to_scalar_or_uninit(); let scalar = self.read_immediate_forced(op)?.to_scalar_or_uninit();
self.visit_scalar(scalar, scalar_layout)?; self.visit_scalar(scalar, scalar_layout)?;
} }
@ -856,6 +858,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// is subtle due to enums having ScalarPair layout, where one field // is subtle due to enums having ScalarPair layout, where one field
// is the discriminant. // is the discriminant.
if cfg!(debug_assertions) { if cfg!(debug_assertions) {
// We use a 'forced' read because we always need a `Immediate` here
// and treating "partially uninit" as "fully uninit" is fine for us.
let (a, b) = self.read_immediate_forced(op)?.to_scalar_or_uninit_pair(); let (a, b) = self.read_immediate_forced(op)?.to_scalar_or_uninit_pair();
self.visit_scalar(a, a_layout)?; self.visit_scalar(a, a_layout)?;
self.visit_scalar(b, b_layout)?; self.visit_scalar(b, b_layout)?;
@ -880,7 +884,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
match op.layout.ty.kind() { match op.layout.ty.kind() {
ty::Str => { ty::Str => {
let mplace = op.assert_mem_place(); // strings are never immediate let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
let len = mplace.len(self.ecx)?; let len = mplace.len(self.ecx)?;
try_validation!( try_validation!(
self.ecx.read_bytes_ptr(mplace.ptr, Size::from_bytes(len)), self.ecx.read_bytes_ptr(mplace.ptr, Size::from_bytes(len)),
@ -900,14 +904,27 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
{ {
// Optimized handling for arrays of integer/float type. // Optimized handling for arrays of integer/float type.
// Arrays cannot be immediate, slices are never immediate.
let mplace = op.assert_mem_place();
// This is the length of the array/slice. // This is the length of the array/slice.
let len = mplace.len(self.ecx)?; let len = op.len(self.ecx)?;
// This is the element type size. // This is the element type size.
let layout = self.ecx.layout_of(*tys)?; let layout = self.ecx.layout_of(*tys)?;
// This is the size in bytes of the whole array. (This checks for overflow.) // This is the size in bytes of the whole array. (This checks for overflow.)
let size = layout.size * len; let size = layout.size * len;
// If the size is 0, there is nothing to check.
// (`size` can only be 0 of `len` is 0, and empty arrays are always valid.)
if size == Size::ZERO {
return Ok(());
}
// Now that we definitely have a non-ZST array, we know it lives in memory.
let mplace = match op.try_as_mplace() {
Ok(mplace) => mplace,
Err(imm) => match *imm {
Immediate::Uninit =>
throw_validation_failure!(self.path, { "uninitialized bytes" }),
Immediate::Scalar(..) | Immediate::ScalarPair(..) =>
bug!("arrays/slices can never have Scalar/ScalarPair layout"),
}
};
// Optimization: we just check the entire range at once. // Optimization: we just check the entire range at once.
// NOTE: Keep this in sync with the handling of integer and float // NOTE: Keep this in sync with the handling of integer and float
@ -919,10 +936,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// to reject those pointers, we just do not have the machinery to // to reject those pointers, we just do not have the machinery to
// talk about parts of a pointer. // talk about parts of a pointer.
// We also accept uninit, for consistency with the slow path. // We also accept uninit, for consistency with the slow path.
let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, mplace.align)? else { let alloc = self.ecx.get_ptr_alloc(mplace.ptr, size, mplace.align)?.expect("we already excluded size 0");
// Size 0, nothing more to check.
return Ok(());
};
match alloc.check_bytes( match alloc.check_bytes(
alloc_range(Size::ZERO, size), alloc_range(Size::ZERO, size),

View File

@ -21,8 +21,10 @@ pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy {
fn to_op(&self, ecx: &InterpCx<'mir, 'tcx, M>) fn to_op(&self, ecx: &InterpCx<'mir, 'tcx, M>)
-> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>; -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
/// Creates this from an `MPlaceTy`. /// Creates this from an `OpTy`.
fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self; ///
/// If `to_op` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
fn from_op(mplace: OpTy<'tcx, M::PointerTag>) -> Self;
/// Projects to the given enum variant. /// Projects to the given enum variant.
fn project_downcast( fn project_downcast(
@ -56,8 +58,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tc
} }
#[inline(always)] #[inline(always)]
fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self { fn from_op(op: OpTy<'tcx, M::PointerTag>) -> Self {
mplace.into() op
} }
#[inline(always)] #[inline(always)]
@ -96,8 +98,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
} }
#[inline(always)] #[inline(always)]
fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self { fn from_op(op: OpTy<'tcx, M::PointerTag>) -> Self {
mplace // assert is justified because our `to_op` only ever produces `Indirect` operands.
op.assert_mem_place()
} }
#[inline(always)] #[inline(always)]
@ -218,13 +221,13 @@ macro_rules! make_value_visitor {
match *v.layout().ty.kind() { match *v.layout().ty.kind() {
// If it is a trait object, switch to the real type that was used to create it. // If it is a trait object, switch to the real type that was used to create it.
ty::Dynamic(..) => { ty::Dynamic(..) => {
// immediate trait objects are not a thing // unsized values are never immediate, so we can assert_mem_place
let op = v.to_op(self.ecx())?; let op = v.to_op(self.ecx())?;
let dest = op.assert_mem_place(); let dest = op.assert_mem_place();
let inner = self.ecx().unpack_dyn_trait(&dest)?.1; let inner = self.ecx().unpack_dyn_trait(&dest)?.1;
trace!("walk_value: dyn object layout: {:#?}", inner.layout); trace!("walk_value: dyn object layout: {:#?}", inner.layout);
// recurse with the inner type // recurse with the inner type
return self.visit_field(&v, 0, &Value::from_mem_place(inner)); return self.visit_field(&v, 0, &Value::from_op(inner.into()));
}, },
// Slices do not need special handling here: they have `Array` field // Slices do not need special handling here: they have `Array` field
// placement with length 0, so we enter the `Array` case below which // placement with length 0, so we enter the `Array` case below which
@ -292,13 +295,12 @@ macro_rules! make_value_visitor {
FieldsShape::Array { .. } => { FieldsShape::Array { .. } => {
// Let's get an mplace first. // Let's get an mplace first.
let op = v.to_op(self.ecx())?; let op = v.to_op(self.ecx())?;
let mplace = op.assert_mem_place();
// Now we can go over all the fields. // Now we can go over all the fields.
// This uses the *run-time length*, i.e., if we are a slice, // This uses the *run-time length*, i.e., if we are a slice,
// the dynamic info from the metadata is used. // the dynamic info from the metadata is used.
let iter = self.ecx().mplace_array_fields(&mplace)? let iter = self.ecx().operand_array_fields(&op)?
.map(|f| f.and_then(|f| { .map(|f| f.and_then(|f| {
Ok(Value::from_mem_place(f)) Ok(Value::from_op(f))
})); }));
self.visit_aggregate(v, iter)?; self.visit_aggregate(v, iter)?;
} }