interpret: more consistently use ImmTy in operators and casts

This commit is contained in:
Ralf Jung 2023-09-20 21:49:30 +02:00
parent 4f226925ce
commit da08a3f40c
21 changed files with 200 additions and 189 deletions

View File

@ -3,7 +3,7 @@ use rustc_hir::{LangItem, CRATE_HIR_ID};
use rustc_middle::mir; use rustc_middle::mir;
use rustc_middle::mir::interpret::PointerArithmetic; use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout}; use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::ty::{self, TyCtxt};
use rustc_session::lint::builtin::INVALID_ALIGNMENT; use rustc_session::lint::builtin::INVALID_ALIGNMENT;
use std::borrow::Borrow; use std::borrow::Borrow;
use std::hash::Hash; use std::hash::Hash;
@ -596,7 +596,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
_bin_op: mir::BinOp, _bin_op: mir::BinOp,
_left: &ImmTy<'tcx>, _left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>, _right: &ImmTy<'tcx>,
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> { ) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time"); throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
} }

View File

@ -34,31 +34,31 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
CastKind::PointerExposeAddress => { CastKind::PointerExposeAddress => {
let src = self.read_immediate(src)?; let src = self.read_immediate(src)?;
let res = self.pointer_expose_address_cast(&src, cast_ty)?; let res = self.pointer_expose_address_cast(&src, cast_ty)?;
self.write_immediate(res, dest)?; self.write_immediate(*res, dest)?;
} }
CastKind::PointerFromExposedAddress => { CastKind::PointerFromExposedAddress => {
let src = self.read_immediate(src)?; let src = self.read_immediate(src)?;
let res = self.pointer_from_exposed_address_cast(&src, cast_ty)?; let res = self.pointer_from_exposed_address_cast(&src, cast_ty)?;
self.write_immediate(res, dest)?; self.write_immediate(*res, dest)?;
} }
CastKind::IntToInt | CastKind::IntToFloat => { CastKind::IntToInt | CastKind::IntToFloat => {
let src = self.read_immediate(src)?; let src = self.read_immediate(src)?;
let res = self.int_to_int_or_float(&src, cast_ty)?; let res = self.int_to_int_or_float(&src, cast_ty)?;
self.write_immediate(res, dest)?; self.write_immediate(*res, dest)?;
} }
CastKind::FloatToFloat | CastKind::FloatToInt => { CastKind::FloatToFloat | CastKind::FloatToInt => {
let src = self.read_immediate(src)?; let src = self.read_immediate(src)?;
let res = self.float_to_float_or_int(&src, cast_ty)?; let res = self.float_to_float_or_int(&src, cast_ty)?;
self.write_immediate(res, dest)?; self.write_immediate(*res, dest)?;
} }
CastKind::FnPtrToPtr | CastKind::PtrToPtr => { CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
let src = self.read_immediate(src)?; let src = self.read_immediate(src)?;
let res = self.ptr_to_ptr(&src, cast_ty)?; let res = self.ptr_to_ptr(&src, cast_ty)?;
self.write_immediate(res, dest)?; self.write_immediate(*res, dest)?;
} }
CastKind::PointerCoercion( CastKind::PointerCoercion(
@ -165,11 +165,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self, &self,
src: &ImmTy<'tcx, M::Provenance>, src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>, cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> { ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool()); assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool());
assert!(cast_ty.is_floating_point() || cast_ty.is_integral() || cast_ty.is_char()); assert!(cast_ty.is_floating_point() || cast_ty.is_integral() || cast_ty.is_char());
Ok(self.cast_from_int_like(src.to_scalar(), src.layout, cast_ty)?.into()) let layout = self.layout_of(cast_ty)?;
Ok(ImmTy::from_scalar(
self.cast_from_int_like(src.to_scalar(), src.layout, cast_ty)?,
layout,
))
} }
/// Handles 'FloatToFloat' and 'FloatToInt' casts. /// Handles 'FloatToFloat' and 'FloatToInt' casts.
@ -177,21 +181,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self, &self,
src: &ImmTy<'tcx, M::Provenance>, src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>, cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> { ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
use rustc_type_ir::sty::TyKind::*; use rustc_type_ir::sty::TyKind::*;
match src.layout.ty.kind() { let layout = self.layout_of(cast_ty)?;
let val = match src.layout.ty.kind() {
// Floating point // Floating point
Float(FloatTy::F32) => { Float(FloatTy::F32) => self.cast_from_float(src.to_scalar().to_f32()?, cast_ty),
return Ok(self.cast_from_float(src.to_scalar().to_f32()?, cast_ty).into()); Float(FloatTy::F64) => self.cast_from_float(src.to_scalar().to_f64()?, cast_ty),
}
Float(FloatTy::F64) => {
return Ok(self.cast_from_float(src.to_scalar().to_f64()?, cast_ty).into());
}
_ => { _ => {
bug!("Can't cast 'Float' type into {:?}", cast_ty); bug!("Can't cast 'Float' type into {:?}", cast_ty);
} }
} };
Ok(ImmTy::from_scalar(val, layout))
} }
/// Handles 'FnPtrToPtr' and 'PtrToPtr' casts. /// Handles 'FnPtrToPtr' and 'PtrToPtr' casts.
@ -199,21 +201,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self, &self,
src: &ImmTy<'tcx, M::Provenance>, src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>, cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> { ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_any_ptr()); assert!(src.layout.ty.is_any_ptr());
assert!(cast_ty.is_unsafe_ptr()); assert!(cast_ty.is_unsafe_ptr());
// Handle casting any ptr to raw ptr (might be a fat ptr). // Handle casting any ptr to raw ptr (might be a fat ptr).
let dest_layout = self.layout_of(cast_ty)?; let dest_layout = self.layout_of(cast_ty)?;
if dest_layout.size == src.layout.size { if dest_layout.size == src.layout.size {
// Thin or fat pointer that just hast the ptr kind of target type changed. // Thin or fat pointer that just hast the ptr kind of target type changed.
return Ok(**src); return Ok(ImmTy::from_immediate(**src, dest_layout));
} else { } else {
// Casting the metadata away from a fat ptr. // Casting the metadata away from a fat ptr.
assert_eq!(src.layout.size, 2 * self.pointer_size()); assert_eq!(src.layout.size, 2 * self.pointer_size());
assert_eq!(dest_layout.size, self.pointer_size()); assert_eq!(dest_layout.size, self.pointer_size());
assert!(src.layout.ty.is_unsafe_ptr()); assert!(src.layout.ty.is_unsafe_ptr());
return match **src { return match **src {
Immediate::ScalarPair(data, _) => Ok(data.into()), Immediate::ScalarPair(data, _) => Ok(ImmTy::from_scalar(data, dest_layout)),
Immediate::Scalar(..) => span_bug!( Immediate::Scalar(..) => span_bug!(
self.cur_span(), self.cur_span(),
"{:?} input to a fat-to-thin cast ({:?} -> {:?})", "{:?} input to a fat-to-thin cast ({:?} -> {:?})",
@ -230,7 +232,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&mut self, &mut self,
src: &ImmTy<'tcx, M::Provenance>, src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>, cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> { ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_)); assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_));
assert!(cast_ty.is_integral()); assert!(cast_ty.is_integral());
@ -240,14 +242,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(ptr) => M::expose_ptr(self, ptr)?, Ok(ptr) => M::expose_ptr(self, ptr)?,
Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP. Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
}; };
Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into()) let layout = self.layout_of(cast_ty)?;
Ok(ImmTy::from_scalar(self.cast_from_int_like(scalar, src.layout, cast_ty)?, layout))
} }
pub fn pointer_from_exposed_address_cast( pub fn pointer_from_exposed_address_cast(
&self, &self,
src: &ImmTy<'tcx, M::Provenance>, src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>, cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> { ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_integral()); assert!(src.layout.ty.is_integral());
assert_matches!(cast_ty.kind(), ty::RawPtr(_)); assert_matches!(cast_ty.kind(), ty::RawPtr(_));
@ -258,12 +261,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Then turn address into pointer. // Then turn address into pointer.
let ptr = M::ptr_from_addr_cast(&self, addr)?; let ptr = M::ptr_from_addr_cast(&self, addr)?;
Ok(Scalar::from_maybe_pointer(ptr, self).into()) let layout = self.layout_of(cast_ty)?;
Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), layout))
} }
/// Low-level cast helper function. This works directly on scalars and can take 'int-like' input /// Low-level cast helper function. This works directly on scalars and can take 'int-like' input
/// type (basically everything with a scalar layout) to int/float/char types. /// type (basically everything with a scalar layout) to int/float/char types.
pub fn cast_from_int_like( fn cast_from_int_like(
&self, &self,
scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout) scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
src_layout: TyAndLayout<'tcx>, src_layout: TyAndLayout<'tcx>,

View File

@ -76,7 +76,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val = let variant_index_relative_val =
ImmTy::from_uint(variant_index_relative, tag_layout); ImmTy::from_uint(variant_index_relative, tag_layout);
let tag_val = self.binary_op( let tag_val = self.wrapping_binary_op(
mir::BinOp::Add, mir::BinOp::Add,
&variant_index_relative_val, &variant_index_relative_val,
&niche_start_val, &niche_start_val,
@ -153,19 +153,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Figure out which discriminant and variant this corresponds to. // Figure out which discriminant and variant this corresponds to.
let index = match *tag_encoding { let index = match *tag_encoding {
TagEncoding::Direct => { TagEncoding::Direct => {
let scalar = tag_val.to_scalar();
// Generate a specific error if `tag_val` is not an integer. // Generate a specific error if `tag_val` is not an integer.
// (`tag_bits` itself is only used for error messages below.) // (`tag_bits` itself is only used for error messages below.)
let tag_bits = scalar let tag_bits = tag_val
.to_scalar()
.try_to_int() .try_to_int()
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))? .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
.assert_bits(tag_layout.size); .assert_bits(tag_layout.size);
// Cast bits from tag layout to discriminant layout. // Cast bits from tag layout to discriminant layout.
// After the checks we did above, this cannot fail, as // After the checks we did above, this cannot fail, as
// discriminants are int-like. // discriminants are int-like.
let discr_val = let discr_val = self.int_to_int_or_float(&tag_val, discr_layout.ty).unwrap();
self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap(); let discr_bits = discr_val.to_scalar().assert_bits(discr_layout.size);
let discr_bits = discr_val.assert_bits(discr_layout.size);
// Convert discriminant to variant index, and catch invalid discriminants. // Convert discriminant to variant index, and catch invalid discriminants.
let index = match *ty.kind() { let index = match *ty.kind() {
ty::Adt(adt, _) => { ty::Adt(adt, _) => {
@ -208,7 +207,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let tag_val = ImmTy::from_uint(tag_bits, tag_layout); let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val = let variant_index_relative_val =
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?; self.wrapping_binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
let variant_index_relative = let variant_index_relative =
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size); variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
// Check if this is in the range that indicates an actual discriminant. // Check if this is in the range that indicates an actual discriminant.

View File

@ -307,7 +307,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let dist = { let dist = {
// Addresses are unsigned, so this is a `usize` computation. We have to do the // Addresses are unsigned, so this is a `usize` computation. We have to do the
// overflow check separately anyway. // overflow check separately anyway.
let (val, overflowed, _ty) = { let (val, overflowed) = {
let a_offset = ImmTy::from_uint(a_offset, usize_layout); let a_offset = ImmTy::from_uint(a_offset, usize_layout);
let b_offset = ImmTy::from_uint(b_offset, usize_layout); let b_offset = ImmTy::from_uint(b_offset, usize_layout);
self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)? self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?
@ -324,7 +324,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The signed form of the intrinsic allows this. If we interpret the // The signed form of the intrinsic allows this. If we interpret the
// difference as isize, we'll get the proper signed difference. If that // difference as isize, we'll get the proper signed difference. If that
// seems *positive*, they were more than isize::MAX apart. // seems *positive*, they were more than isize::MAX apart.
let dist = val.to_target_isize(self)?; let dist = val.to_scalar().to_target_isize(self)?;
if dist >= 0 { if dist >= 0 {
throw_ub_custom!( throw_ub_custom!(
fluent::const_eval_offset_from_underflow, fluent::const_eval_offset_from_underflow,
@ -334,7 +334,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dist dist
} else { } else {
// b >= a // b >= a
let dist = val.to_target_isize(self)?; let dist = val.to_scalar().to_target_isize(self)?;
// If converting to isize produced a *negative* result, we had an overflow // If converting to isize produced a *negative* result, we had an overflow
// because they were more than isize::MAX apart. // because they were more than isize::MAX apart.
if dist < 0 { if dist < 0 {
@ -504,9 +504,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Performs an exact division, resulting in undefined behavior where // Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`. // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
// First, check x % y != 0 (or if that computation overflows). // First, check x % y != 0 (or if that computation overflows).
let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?; let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
assert!(!overflow); // All overflow is UB, so this should never return on overflow. assert!(!overflow); // All overflow is UB, so this should never return on overflow.
if res.assert_bits(a.layout.size) != 0 { if res.to_scalar().assert_bits(a.layout.size) != 0 {
throw_ub_custom!( throw_ub_custom!(
fluent::const_eval_exact_div_has_remainder, fluent::const_eval_exact_div_has_remainder,
a = format!("{a}"), a = format!("{a}"),
@ -524,7 +524,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
r: &ImmTy<'tcx, M::Provenance>, r: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> { ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
assert!(matches!(mir_op, BinOp::Add | BinOp::Sub)); assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
let (val, overflowed, _ty) = self.overflowing_binary_op(mir_op, l, r)?; let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?;
Ok(if overflowed { Ok(if overflowed {
let size = l.layout.size; let size = l.layout.size;
let num_bits = size.bits(); let num_bits = size.bits();
@ -556,7 +556,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
} }
} else { } else {
val val.to_scalar()
}) })
} }

View File

@ -9,7 +9,7 @@ use std::hash::Hash;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_middle::mir; use rustc_middle::mir;
use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::ty::{self, TyCtxt};
use rustc_span::def_id::DefId; use rustc_span::def_id::DefId;
use rustc_target::abi::{Align, Size}; use rustc_target::abi::{Align, Size};
use rustc_target::spec::abi::Abi as CallAbi; use rustc_target::spec::abi::Abi as CallAbi;
@ -18,7 +18,7 @@ use crate::const_eval::CheckAlignment;
use super::{ use super::{
AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx, AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx,
InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance, Scalar, InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance,
}; };
/// Data returned by Machine::stack_pop, /// Data returned by Machine::stack_pop,
@ -238,7 +238,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
bin_op: mir::BinOp, bin_op: mir::BinOp,
left: &ImmTy<'tcx, Self::Provenance>, left: &ImmTy<'tcx, Self::Provenance>,
right: &ImmTy<'tcx, Self::Provenance>, right: &ImmTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)>; ) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>;
/// Called before writing the specified `local` of the `frame`. /// Called before writing the specified `local` of the `frame`.
/// Since writing a ZST is not actually accessing memory or locals, this is never invoked /// Since writing a ZST is not actually accessing memory or locals, this is never invoked

View File

@ -8,7 +8,7 @@ use either::{Either, Left, Right};
use rustc_hir::def::Namespace; use rustc_hir::def::Namespace;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter}; use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
use rustc_middle::ty::{ConstInt, Ty}; use rustc_middle::ty::{ConstInt, Ty, TyCtxt};
use rustc_middle::{mir, ty}; use rustc_middle::{mir, ty};
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size}; use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
@ -188,6 +188,12 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
Self::from_scalar(Scalar::from_int(i, layout.size), layout) Self::from_scalar(Scalar::from_int(i, layout.size), layout)
} }
#[inline]
pub fn from_bool(b: bool, tcx: TyCtxt<'tcx>) -> Self {
let layout = tcx.layout_of(ty::ParamEnv::reveal_all().and(tcx.types.bool)).unwrap();
Self::from_scalar(Scalar::from_bool(b), layout)
}
#[inline] #[inline]
pub fn to_const_int(self) -> ConstInt { pub fn to_const_int(self) -> ConstInt {
assert!(self.layout.ty.is_integral()); assert!(self.layout.ty.is_integral());

View File

@ -1,7 +1,7 @@
use rustc_apfloat::Float; use rustc_apfloat::Float;
use rustc_middle::mir; use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar}; use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, FloatTy, Ty}; use rustc_middle::ty::{self, FloatTy, Ty};
use rustc_span::symbol::sym; use rustc_span::symbol::sym;
use rustc_target::abi::Abi; use rustc_target::abi::Abi;
@ -20,9 +20,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
right: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>, dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?; let (val, overflowed) = self.overflowing_binary_op(op, &left, &right)?;
debug_assert_eq!( debug_assert_eq!(
Ty::new_tup(self.tcx.tcx, &[ty, self.tcx.types.bool]), Ty::new_tup(self.tcx.tcx, &[val.layout.ty, self.tcx.types.bool]),
dest.layout.ty, dest.layout.ty,
"type mismatch for result of {op:?}", "type mismatch for result of {op:?}",
); );
@ -30,7 +30,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if let Abi::ScalarPair(..) = dest.layout.abi { if let Abi::ScalarPair(..) = dest.layout.abi {
// We can use the optimized path and avoid `place_field` (which might do // We can use the optimized path and avoid `place_field` (which might do
// `force_allocation`). // `force_allocation`).
let pair = Immediate::ScalarPair(val, Scalar::from_bool(overflowed)); let pair = Immediate::ScalarPair(val.to_scalar(), Scalar::from_bool(overflowed));
self.write_immediate(pair, dest)?; self.write_immediate(pair, dest)?;
} else { } else {
assert!(self.tcx.sess.opts.unstable_opts.randomize_layout); assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
@ -38,7 +38,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// do a component-wise write here. This code path is slower than the above because // do a component-wise write here. This code path is slower than the above because
// `place_field` will have to `force_allocate` locals here. // `place_field` will have to `force_allocate` locals here.
let val_field = self.project_field(dest, 0)?; let val_field = self.project_field(dest, 0)?;
self.write_scalar(val, &val_field)?; self.write_scalar(val.to_scalar(), &val_field)?;
let overflowed_field = self.project_field(dest, 1)?; let overflowed_field = self.project_field(dest, 1)?;
self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?; self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
} }
@ -54,9 +54,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
right: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>, dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?; let val = self.wrapping_binary_op(op, left, right)?;
assert_eq!(ty, dest.layout.ty, "type mismatch for result of {op:?}"); assert_eq!(val.layout.ty, dest.layout.ty, "type mismatch for result of {op:?}");
self.write_scalar(val, dest) self.write_immediate(*val, dest)
} }
} }
@ -66,7 +66,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp, bin_op: mir::BinOp,
l: char, l: char,
r: char, r: char,
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) { ) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*; use rustc_middle::mir::BinOp::*;
let res = match bin_op { let res = match bin_op {
@ -78,7 +78,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ge => l >= r, Ge => l >= r,
_ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op), _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
}; };
(Scalar::from_bool(res), false, self.tcx.types.bool) (ImmTy::from_bool(res, *self.tcx), false)
} }
fn binary_bool_op( fn binary_bool_op(
@ -86,7 +86,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp, bin_op: mir::BinOp,
l: bool, l: bool,
r: bool, r: bool,
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) { ) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*; use rustc_middle::mir::BinOp::*;
let res = match bin_op { let res = match bin_op {
@ -101,33 +101,33 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
BitXor => l ^ r, BitXor => l ^ r,
_ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op), _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
}; };
(Scalar::from_bool(res), false, self.tcx.types.bool) (ImmTy::from_bool(res, *self.tcx), false)
} }
fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>( fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
&self, &self,
bin_op: mir::BinOp, bin_op: mir::BinOp,
ty: Ty<'tcx>, layout: TyAndLayout<'tcx>,
l: F, l: F,
r: F, r: F,
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) { ) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*; use rustc_middle::mir::BinOp::*;
let (val, ty) = match bin_op { let val = match bin_op {
Eq => (Scalar::from_bool(l == r), self.tcx.types.bool), Eq => ImmTy::from_bool(l == r, *self.tcx),
Ne => (Scalar::from_bool(l != r), self.tcx.types.bool), Ne => ImmTy::from_bool(l != r, *self.tcx),
Lt => (Scalar::from_bool(l < r), self.tcx.types.bool), Lt => ImmTy::from_bool(l < r, *self.tcx),
Le => (Scalar::from_bool(l <= r), self.tcx.types.bool), Le => ImmTy::from_bool(l <= r, *self.tcx),
Gt => (Scalar::from_bool(l > r), self.tcx.types.bool), Gt => ImmTy::from_bool(l > r, *self.tcx),
Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool), Ge => ImmTy::from_bool(l >= r, *self.tcx),
Add => ((l + r).value.into(), ty), Add => ImmTy::from_scalar((l + r).value.into(), layout),
Sub => ((l - r).value.into(), ty), Sub => ImmTy::from_scalar((l - r).value.into(), layout),
Mul => ((l * r).value.into(), ty), Mul => ImmTy::from_scalar((l * r).value.into(), layout),
Div => ((l / r).value.into(), ty), Div => ImmTy::from_scalar((l / r).value.into(), layout),
Rem => ((l % r).value.into(), ty), Rem => ImmTy::from_scalar((l % r).value.into(), layout),
_ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op), _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
}; };
(val, false, ty) (val, false)
} }
fn binary_int_op( fn binary_int_op(
@ -138,7 +138,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
left_layout: TyAndLayout<'tcx>, left_layout: TyAndLayout<'tcx>,
r: u128, r: u128,
right_layout: TyAndLayout<'tcx>, right_layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> { ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::BinOp::*; use rustc_middle::mir::BinOp::*;
let throw_ub_on_overflow = match bin_op { let throw_ub_on_overflow = match bin_op {
@ -200,7 +200,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
); );
} }
return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty)); return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
} }
// For the remaining ops, the types must be the same on both sides // For the remaining ops, the types must be the same on both sides
@ -230,7 +230,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if let Some(op) = op { if let Some(op) = op {
let l = self.sign_extend(l, left_layout) as i128; let l = self.sign_extend(l, left_layout) as i128;
let r = self.sign_extend(r, right_layout) as i128; let r = self.sign_extend(r, right_layout) as i128;
return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool)); return Ok((ImmTy::from_bool(op(&l, &r), *self.tcx), false));
} }
let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op { let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
Div if r == 0 => throw_ub!(DivisionByZero), Div if r == 0 => throw_ub!(DivisionByZero),
@ -267,22 +267,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name); throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
} }
return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty)); return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
} }
} }
let (val, ty) = match bin_op { let val = match bin_op {
Eq => (Scalar::from_bool(l == r), self.tcx.types.bool), Eq => ImmTy::from_bool(l == r, *self.tcx),
Ne => (Scalar::from_bool(l != r), self.tcx.types.bool), Ne => ImmTy::from_bool(l != r, *self.tcx),
Lt => (Scalar::from_bool(l < r), self.tcx.types.bool), Lt => ImmTy::from_bool(l < r, *self.tcx),
Le => (Scalar::from_bool(l <= r), self.tcx.types.bool), Le => ImmTy::from_bool(l <= r, *self.tcx),
Gt => (Scalar::from_bool(l > r), self.tcx.types.bool), Gt => ImmTy::from_bool(l > r, *self.tcx),
Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool), Ge => ImmTy::from_bool(l >= r, *self.tcx),
BitOr => (Scalar::from_uint(l | r, size), left_layout.ty), BitOr => ImmTy::from_uint(l | r, left_layout),
BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty), BitAnd => ImmTy::from_uint(l & r, left_layout),
BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty), BitXor => ImmTy::from_uint(l ^ r, left_layout),
Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => { Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => {
assert!(!left_layout.abi.is_signed()); assert!(!left_layout.abi.is_signed());
@ -304,7 +304,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name); throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
} }
return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty)); return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
} }
_ => span_bug!( _ => span_bug!(
@ -317,7 +317,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
), ),
}; };
Ok((val, false, ty)) Ok((val, false))
} }
fn binary_ptr_op( fn binary_ptr_op(
@ -325,7 +325,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp, bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>, left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> { ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::BinOp::*; use rustc_middle::mir::BinOp::*;
match bin_op { match bin_op {
@ -336,7 +336,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty; let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?; let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
Ok((Scalar::from_maybe_pointer(offset_ptr, self), false, left.layout.ty)) Ok((
ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout),
false,
))
} }
// Fall back to machine hook so Miri can support more pointer ops. // Fall back to machine hook so Miri can support more pointer ops.
@ -344,14 +347,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
} }
/// Returns the result of the specified operation, whether it overflowed, and /// Returns the result of the specified operation, and whether it overflowed.
/// the result type.
pub fn overflowing_binary_op( pub fn overflowing_binary_op(
&self, &self,
bin_op: mir::BinOp, bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>, left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> { ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
trace!( trace!(
"Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op, bin_op,
@ -376,15 +378,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
ty::Float(fty) => { ty::Float(fty) => {
assert_eq!(left.layout.ty, right.layout.ty); assert_eq!(left.layout.ty, right.layout.ty);
let ty = left.layout.ty; let layout = left.layout;
let left = left.to_scalar(); let left = left.to_scalar();
let right = right.to_scalar(); let right = right.to_scalar();
Ok(match fty { Ok(match fty {
FloatTy::F32 => { FloatTy::F32 => {
self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?) self.binary_float_op(bin_op, layout, left.to_f32()?, right.to_f32()?)
} }
FloatTy::F64 => { FloatTy::F64 => {
self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?) self.binary_float_op(bin_op, layout, left.to_f64()?, right.to_f64()?)
} }
}) })
} }
@ -423,16 +425,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
} }
/// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
#[inline] #[inline]
pub fn binary_op( pub fn wrapping_binary_op(
&self, &self,
bin_op: mir::BinOp, bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>, left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?; let (val, _overflow) = self.overflowing_binary_op(bin_op, left, right)?;
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?)) Ok(val)
} }
/// Returns the result of the specified operation, whether it overflowed, and /// Returns the result of the specified operation, whether it overflowed, and
@ -441,7 +442,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self, &self,
un_op: mir::UnOp, un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>, val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> { ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::UnOp::*; use rustc_middle::mir::UnOp::*;
let layout = val.layout; let layout = val.layout;
@ -455,7 +456,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Not => !val, Not => !val,
_ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op), _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
}; };
Ok((Scalar::from_bool(res), false, self.tcx.types.bool)) Ok((ImmTy::from_bool(res, *self.tcx), false))
} }
ty::Float(fty) => { ty::Float(fty) => {
let res = match (un_op, fty) { let res = match (un_op, fty) {
@ -463,7 +464,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?), (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
_ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op), _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
}; };
Ok((res, false, layout.ty)) Ok((ImmTy::from_scalar(res, layout), false))
} }
_ => { _ => {
assert!(layout.ty.is_integral()); assert!(layout.ty.is_integral());
@ -482,17 +483,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(truncated, overflow || self.sign_extend(truncated, layout) != res) (truncated, overflow || self.sign_extend(truncated, layout) != res)
} }
}; };
Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty)) Ok((ImmTy::from_uint(res, layout), overflow))
} }
} }
} }
pub fn unary_op( #[inline]
pub fn wrapping_unary_op(
&self, &self,
un_op: mir::UnOp, un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>, val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?; let (val, _overflow) = self.overflowing_unary_op(un_op, val)?;
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?)) Ok(val)
} }
} }

View File

@ -177,7 +177,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
UnaryOp(un_op, ref operand) => { UnaryOp(un_op, ref operand) => {
// The operand always has the same type as the result. // The operand always has the same type as the result.
let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?; let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
let val = self.unary_op(un_op, &val)?; let val = self.wrapping_unary_op(un_op, &val)?;
assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}"); assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}");
self.write_immediate(*val, &dest)?; self.write_immediate(*val, &dest)?;
} }

View File

@ -98,14 +98,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
for (const_int, target) in targets.iter() { for (const_int, target) in targets.iter() {
// Compare using MIR BinOp::Eq, to also support pointer values. // Compare using MIR BinOp::Eq, to also support pointer values.
// (Avoiding `self.binary_op` as that does some redundant layout computation.) // (Avoiding `self.binary_op` as that does some redundant layout computation.)
let res = self let res = self.wrapping_binary_op(
.overflowing_binary_op( mir::BinOp::Eq,
mir::BinOp::Eq, &discr,
&discr, &ImmTy::from_uint(const_int, discr.layout),
&ImmTy::from_uint(const_int, discr.layout), )?;
)? if res.to_scalar().to_bool()? {
.0;
if res.to_bool()? {
target_block = target; target_block = target;
break; break;
} }

View File

@ -210,7 +210,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
_bin_op: BinOp, _bin_op: BinOp,
_left: &ImmTy<'tcx>, _left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>, _right: &ImmTy<'tcx>,
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> { ) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
// We can't do this because aliasing of memory can differ between const eval and llvm // We can't do this because aliasing of memory can differ between const eval and llvm
throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp") throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp")
} }

View File

@ -322,7 +322,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
fn check_unary_op(&mut self, op: UnOp, arg: &Operand<'tcx>, location: Location) -> Option<()> { fn check_unary_op(&mut self, op: UnOp, arg: &Operand<'tcx>, location: Location) -> Option<()> {
if let (val, true) = self.use_ecx(location, |this| { if let (val, true) = self.use_ecx(location, |this| {
let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?; let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?;
let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, &val)?; let (_res, overflow) = this.ecx.overflowing_unary_op(op, &val)?;
Ok((val, overflow)) Ok((val, overflow))
})? { })? {
// `AssertKind` only has an `OverflowNeg` variant, so make sure that is // `AssertKind` only has an `OverflowNeg` variant, so make sure that is
@ -390,7 +390,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
if let (Some(l), Some(r)) = (l, r) { if let (Some(l), Some(r)) = (l, r) {
// The remaining operators are handled through `overflowing_binary_op`. // The remaining operators are handled through `overflowing_binary_op`.
if self.use_ecx(location, |this| { if self.use_ecx(location, |this| {
let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, &l, &r)?; let (_res, overflow) = this.ecx.overflowing_binary_op(op, &l, &r)?;
Ok(overflow) Ok(overflow)
})? { })? {
let source_info = self.body().source_info(location); let source_info = self.body().source_info(location);

View File

@ -238,7 +238,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
FlatSet::Elem(op) => self FlatSet::Elem(op) => self
.ecx .ecx
.int_to_int_or_float(&op, *ty) .int_to_int_or_float(&op, *ty)
.map_or(FlatSet::Top, |result| self.wrap_immediate(result)), .map_or(FlatSet::Top, |result| self.wrap_immediate(*result)),
FlatSet::Bottom => FlatSet::Bottom, FlatSet::Bottom => FlatSet::Bottom,
FlatSet::Top => FlatSet::Top, FlatSet::Top => FlatSet::Top,
} }
@ -248,7 +248,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
FlatSet::Elem(op) => self FlatSet::Elem(op) => self
.ecx .ecx
.float_to_float_or_int(&op, *ty) .float_to_float_or_int(&op, *ty)
.map_or(FlatSet::Top, |result| self.wrap_immediate(result)), .map_or(FlatSet::Top, |result| self.wrap_immediate(*result)),
FlatSet::Bottom => FlatSet::Bottom, FlatSet::Bottom => FlatSet::Bottom,
FlatSet::Top => FlatSet::Top, FlatSet::Top => FlatSet::Top,
} }
@ -268,7 +268,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
Rvalue::UnaryOp(op, operand) => match self.eval_operand(operand, state) { Rvalue::UnaryOp(op, operand) => match self.eval_operand(operand, state) {
FlatSet::Elem(value) => self FlatSet::Elem(value) => self
.ecx .ecx
.unary_op(*op, &value) .wrapping_unary_op(*op, &value)
.map_or(FlatSet::Top, |val| self.wrap_immediate(*val)), .map_or(FlatSet::Top, |val| self.wrap_immediate(*val)),
FlatSet::Bottom => FlatSet::Bottom, FlatSet::Bottom => FlatSet::Bottom,
FlatSet::Top => FlatSet::Top, FlatSet::Top => FlatSet::Top,
@ -439,7 +439,9 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
// Both sides are known, do the actual computation. // Both sides are known, do the actual computation.
(FlatSet::Elem(left), FlatSet::Elem(right)) => { (FlatSet::Elem(left), FlatSet::Elem(right)) => {
match self.ecx.overflowing_binary_op(op, &left, &right) { match self.ecx.overflowing_binary_op(op, &left, &right) {
Ok((val, overflow, _)) => (FlatSet::Elem(val), FlatSet::Elem(overflow)), Ok((val, overflow)) => {
(FlatSet::Elem(val.to_scalar()), FlatSet::Elem(overflow))
}
_ => (FlatSet::Top, FlatSet::Top), _ => (FlatSet::Top, FlatSet::Top),
} }
} }
@ -783,8 +785,8 @@ impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for Dumm
_bin_op: BinOp, _bin_op: BinOp,
_left: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>, _left: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>,
_right: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>, _right: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>,
) -> interpret::InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)> { ) -> interpret::InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)> {
throw_unsup!(Unsupported("".into())) crate::const_prop::throw_machine_stop_str!("can't do pointer arithmetic");
} }
fn expose_ptr( fn expose_ptr(

View File

@ -516,8 +516,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
let old = this.allow_data_races_mut(|this| this.read_immediate(place))?; let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
// Atomics wrap around on overflow. // Atomics wrap around on overflow.
let val = this.binary_op(op, &old, rhs)?; let val = this.wrapping_binary_op(op, &old, rhs)?;
let val = if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val }; let val = if neg { this.wrapping_unary_op(mir::UnOp::Not, &val)? } else { val };
this.allow_data_races_mut(|this| this.write_immediate(*val, place))?; this.allow_data_races_mut(|this| this.write_immediate(*val, place))?;
this.validate_atomic_rmw(place, atomic)?; this.validate_atomic_rmw(place, atomic)?;
@ -561,7 +561,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
this.validate_overlapping_atomic(place)?; this.validate_overlapping_atomic(place)?;
let old = this.allow_data_races_mut(|this| this.read_immediate(place))?; let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?; let lt = this.wrapping_binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?;
let new_val = if min { let new_val = if min {
if lt { &old } else { &rhs } if lt { &old } else { &rhs }
@ -605,7 +605,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
// Read as immediate for the sake of `binary_op()` // Read as immediate for the sake of `binary_op()`
let old = this.allow_data_races_mut(|this| this.read_immediate(place))?; let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
// `binary_op` will bail if either of them is not a scalar. // `binary_op` will bail if either of them is not a scalar.
let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?; let eq = this.wrapping_binary_op(mir::BinOp::Eq, &old, expect_old)?;
// If the operation would succeed, but is "weak", fail some portion // If the operation would succeed, but is "weak", fail some portion
// of the time, based on `success_rate`. // of the time, based on `success_rate`.
let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate; let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;

View File

@ -1015,13 +1015,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
f: F, f: F,
dest_ty: Ty<'tcx>, dest_ty: Ty<'tcx>,
round: rustc_apfloat::Round, round: rustc_apfloat::Round,
) -> Option<Scalar<Provenance>> ) -> Option<ImmTy<'tcx, Provenance>>
where where
F: rustc_apfloat::Float + Into<Scalar<Provenance>>, F: rustc_apfloat::Float + Into<Scalar<Provenance>>,
{ {
let this = self.eval_context_ref(); let this = self.eval_context_ref();
match dest_ty.kind() { let val = match dest_ty.kind() {
// Unsigned // Unsigned
ty::Uint(t) => { ty::Uint(t) => {
let size = Integer::from_uint_ty(this, *t).size(); let size = Integer::from_uint_ty(this, *t).size();
@ -1033,11 +1033,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) { ) {
// Floating point value is NaN (flagged with INVALID_OP) or outside the range // Floating point value is NaN (flagged with INVALID_OP) or outside the range
// of values of the integer type (flagged with OVERFLOW or UNDERFLOW). // of values of the integer type (flagged with OVERFLOW or UNDERFLOW).
None return None
} else { } else {
// Floating point value can be represented by the integer type after rounding. // Floating point value can be represented by the integer type after rounding.
// The INEXACT flag is ignored on purpose to allow rounding. // The INEXACT flag is ignored on purpose to allow rounding.
Some(Scalar::from_uint(res.value, size)) Scalar::from_uint(res.value, size)
} }
} }
// Signed // Signed
@ -1051,11 +1051,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) { ) {
// Floating point value is NaN (flagged with INVALID_OP) or outside the range // Floating point value is NaN (flagged with INVALID_OP) or outside the range
// of values of the integer type (flagged with OVERFLOW or UNDERFLOW). // of values of the integer type (flagged with OVERFLOW or UNDERFLOW).
None return None
} else { } else {
// Floating point value can be represented by the integer type after rounding. // Floating point value can be represented by the integer type after rounding.
// The INEXACT flag is ignored on purpose to allow rounding. // The INEXACT flag is ignored on purpose to allow rounding.
Some(Scalar::from_int(res.value, size)) Scalar::from_int(res.value, size)
} }
} }
// Nothing else // Nothing else
@ -1064,7 +1064,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.cur_span(), this.cur_span(),
"attempted float-to-int conversion with non-int output type {dest_ty:?}" "attempted float-to-int conversion with non-int output type {dest_ty:?}"
), ),
} };
Some(ImmTy::from_scalar(val, this.layout_of(dest_ty).unwrap()))
} }
/// Returns an integer type that is twice wide as `ty` /// Returns an integer type that is twice wide as `ty`

View File

@ -998,7 +998,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
bin_op: mir::BinOp, bin_op: mir::BinOp,
left: &ImmTy<'tcx, Provenance>, left: &ImmTy<'tcx, Provenance>,
right: &ImmTy<'tcx, Provenance>, right: &ImmTy<'tcx, Provenance>,
) -> InterpResult<'tcx, (Scalar<Provenance>, bool, Ty<'tcx>)> { ) -> InterpResult<'tcx, (ImmTy<'tcx, Provenance>, bool)> {
ecx.binary_ptr_op(bin_op, left, right) ecx.binary_ptr_op(bin_op, left, right)
} }

View File

@ -1,6 +1,6 @@
use log::trace; use log::trace;
use rustc_middle::{mir, ty::Ty}; use rustc_middle::mir;
use rustc_target::abi::Size; use rustc_target::abi::Size;
use crate::*; use crate::*;
@ -11,7 +11,7 @@ pub trait EvalContextExt<'tcx> {
bin_op: mir::BinOp, bin_op: mir::BinOp,
left: &ImmTy<'tcx, Provenance>, left: &ImmTy<'tcx, Provenance>,
right: &ImmTy<'tcx, Provenance>, right: &ImmTy<'tcx, Provenance>,
) -> InterpResult<'tcx, (Scalar<Provenance>, bool, Ty<'tcx>)>; ) -> InterpResult<'tcx, (ImmTy<'tcx, Provenance>, bool)>;
} }
impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriInterpCx<'mir, 'tcx> { impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriInterpCx<'mir, 'tcx> {
@ -20,7 +20,7 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriInterpCx<'mir, 'tcx> {
bin_op: mir::BinOp, bin_op: mir::BinOp,
left: &ImmTy<'tcx, Provenance>, left: &ImmTy<'tcx, Provenance>,
right: &ImmTy<'tcx, Provenance>, right: &ImmTy<'tcx, Provenance>,
) -> InterpResult<'tcx, (Scalar<Provenance>, bool, Ty<'tcx>)> { ) -> InterpResult<'tcx, (ImmTy<'tcx, Provenance>, bool)> {
use rustc_middle::mir::BinOp::*; use rustc_middle::mir::BinOp::*;
trace!("ptr_op: {:?} {:?} {:?}", *left, bin_op, *right); trace!("ptr_op: {:?} {:?} {:?}", *left, bin_op, *right);
@ -50,7 +50,7 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriInterpCx<'mir, 'tcx> {
Ge => left >= right, Ge => left >= right,
_ => bug!(), _ => bug!(),
}; };
(Scalar::from_bool(res), false, self.tcx.types.bool) (ImmTy::from_bool(res, *self.tcx), false)
} }
// Some more operations are possible with atomics. // Some more operations are possible with atomics.
@ -65,12 +65,12 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriInterpCx<'mir, 'tcx> {
right.to_scalar().to_target_usize(self)?, right.to_scalar().to_target_usize(self)?,
self.machine.layouts.usize, self.machine.layouts.usize,
); );
let (result, overflowing, _ty) = let (result, overflowing) =
self.overflowing_binary_op(bin_op, &left, &right)?; self.overflowing_binary_op(bin_op, &left, &right)?;
// Construct a new pointer with the provenance of `ptr` (the LHS). // Construct a new pointer with the provenance of `ptr` (the LHS).
let result_ptr = let result_ptr =
Pointer::new(ptr.provenance, Size::from_bytes(result.to_target_usize(self)?)); Pointer::new(ptr.provenance, Size::from_bytes(result.to_scalar().to_target_usize(self)?));
(Scalar::from_maybe_pointer(result_ptr, self), overflowing, left.layout.ty) (ImmTy::from_scalar(Scalar::from_maybe_pointer(result_ptr, self), left.layout), overflowing)
} }
_ => span_bug!(self.cur_span(), "Invalid operator on pointers: {:?}", bin_op), _ => span_bug!(self.cur_span(), "Invalid operator on pointers: {:?}", bin_op),

View File

@ -89,10 +89,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let [left, right] = check_arg_count(args)?; let [left, right] = check_arg_count(args)?;
let left = this.read_immediate(left)?; let left = this.read_immediate(left)?;
let right = this.read_immediate(right)?; let right = this.read_immediate(right)?;
let (val, _overflowed, _ty) = let val = this.wrapping_binary_op(mir::BinOp::Eq, &left, &right)?;
this.overflowing_binary_op(mir::BinOp::Eq, &left, &right)?;
// We're type punning a bool as an u8 here. // We're type punning a bool as an u8 here.
this.write_scalar(val, dest)?; this.write_scalar(val.to_scalar(), dest)?;
} }
"const_allocate" => { "const_allocate" => {
// For now, for compatibility with the run-time implementation of this, we just return null. // For now, for compatibility with the run-time implementation of this, we just return null.
@ -396,7 +395,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
), ),
}; };
this.write_scalar(res, dest)?; this.write_immediate(*res, dest)?;
} }
// Other // Other

View File

@ -60,7 +60,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let op = this.read_immediate(&this.project_index(&op, i)?)?; let op = this.read_immediate(&this.project_index(&op, i)?)?;
let dest = this.project_index(&dest, i)?; let dest = this.project_index(&dest, i)?;
let val = match which { let val = match which {
Op::MirOp(mir_op) => this.unary_op(mir_op, &op)?.to_scalar(), Op::MirOp(mir_op) => this.wrapping_unary_op(mir_op, &op)?.to_scalar(),
Op::Abs => { Op::Abs => {
// Works for f32 and f64. // Works for f32 and f64.
let ty::Float(float_ty) = op.layout.ty.kind() else { let ty::Float(float_ty) = op.layout.ty.kind() else {
@ -177,7 +177,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let dest = this.project_index(&dest, i)?; let dest = this.project_index(&dest, i)?;
let val = match which { let val = match which {
Op::MirOp(mir_op) => { Op::MirOp(mir_op) => {
let (val, overflowed, ty) = this.overflowing_binary_op(mir_op, &left, &right)?; let (val, overflowed) = this.overflowing_binary_op(mir_op, &left, &right)?;
if matches!(mir_op, BinOp::Shl | BinOp::Shr) { if matches!(mir_op, BinOp::Shl | BinOp::Shr) {
// Shifts have extra UB as SIMD operations that the MIR binop does not have. // Shifts have extra UB as SIMD operations that the MIR binop does not have.
// See <https://github.com/rust-lang/rust/issues/91237>. // See <https://github.com/rust-lang/rust/issues/91237>.
@ -188,13 +188,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
} }
if matches!(mir_op, BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge) { if matches!(mir_op, BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge) {
// Special handling for boolean-returning operations // Special handling for boolean-returning operations
assert_eq!(ty, this.tcx.types.bool); assert_eq!(val.layout.ty, this.tcx.types.bool);
let val = val.to_bool().unwrap(); let val = val.to_scalar().to_bool().unwrap();
bool_to_simd_element(val, dest.layout.size) bool_to_simd_element(val, dest.layout.size)
} else { } else {
assert_ne!(ty, this.tcx.types.bool); assert_ne!(val.layout.ty, this.tcx.types.bool);
assert_eq!(ty, dest.layout.ty); assert_eq!(val.layout.ty, dest.layout.ty);
val val.to_scalar()
} }
} }
Op::SaturatingOp(mir_op) => { Op::SaturatingOp(mir_op) => {
@ -304,18 +304,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let op = this.read_immediate(&this.project_index(&op, i)?)?; let op = this.read_immediate(&this.project_index(&op, i)?)?;
res = match which { res = match which {
Op::MirOp(mir_op) => { Op::MirOp(mir_op) => {
this.binary_op(mir_op, &res, &op)? this.wrapping_binary_op(mir_op, &res, &op)?
} }
Op::MirOpBool(mir_op) => { Op::MirOpBool(mir_op) => {
let op = imm_from_bool(simd_element_to_bool(op)?); let op = imm_from_bool(simd_element_to_bool(op)?);
this.binary_op(mir_op, &res, &op)? this.wrapping_binary_op(mir_op, &res, &op)?
} }
Op::Max => { Op::Max => {
if matches!(res.layout.ty.kind(), ty::Float(_)) { if matches!(res.layout.ty.kind(), ty::Float(_)) {
ImmTy::from_scalar(fmax_op(&res, &op)?, res.layout) ImmTy::from_scalar(fmax_op(&res, &op)?, res.layout)
} else { } else {
// Just boring integers, so NaNs to worry about // Just boring integers, so NaNs to worry about
if this.binary_op(BinOp::Ge, &res, &op)?.to_scalar().to_bool()? { if this.wrapping_binary_op(BinOp::Ge, &res, &op)?.to_scalar().to_bool()? {
res res
} else { } else {
op op
@ -327,7 +327,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
ImmTy::from_scalar(fmin_op(&res, &op)?, res.layout) ImmTy::from_scalar(fmin_op(&res, &op)?, res.layout)
} else { } else {
// Just boring integers, so NaNs to worry about // Just boring integers, so NaNs to worry about
if this.binary_op(BinOp::Le, &res, &op)?.to_scalar().to_bool()? { if this.wrapping_binary_op(BinOp::Le, &res, &op)?.to_scalar().to_bool()? {
res res
} else { } else {
op op
@ -356,7 +356,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let mut res = init; let mut res = init;
for i in 0..op_len { for i in 0..op_len {
let op = this.read_immediate(&this.project_index(&op, i)?)?; let op = this.read_immediate(&this.project_index(&op, i)?)?;
res = this.binary_op(mir_op, &res, &op)?; res = this.wrapping_binary_op(mir_op, &res, &op)?;
} }
this.write_immediate(*res, dest)?; this.write_immediate(*res, dest)?;
} }
@ -487,7 +487,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
to_ty = dest.layout.ty, to_ty = dest.layout.ty,
), ),
}; };
this.write_immediate(val, &dest)?; this.write_immediate(*val, &dest)?;
} }
} }
"shuffle" => { "shuffle" => {

View File

@ -80,8 +80,8 @@ fn bin_op_float<'tcx, F: rustc_apfloat::Float>(
) -> InterpResult<'tcx, Scalar<Provenance>> { ) -> InterpResult<'tcx, Scalar<Provenance>> {
match which { match which {
FloatBinOp::Arith(which) => { FloatBinOp::Arith(which) => {
let (res, _overflow, _ty) = this.overflowing_binary_op(which, left, right)?; let res = this.wrapping_binary_op(which, left, right)?;
Ok(res) Ok(res.to_scalar())
} }
FloatBinOp::Cmp(which) => { FloatBinOp::Cmp(which) => {
let left = left.to_scalar().to_float::<F>()?; let left = left.to_scalar().to_float::<F>()?;

View File

@ -175,10 +175,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let res = this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| { let res = this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| {
// Fallback to minimum acording to SSE semantics. // Fallback to minimum acording to SSE semantics.
Scalar::from_int(dest.layout.size.signed_int_min(), dest.layout.size) ImmTy::from_int(dest.layout.size.signed_int_min(), dest.layout)
}); });
this.write_scalar(res, dest)?; this.write_immediate(*res, dest)?;
} }
// Used to implement the _mm_cvtsi32_ss and _mm_cvtsi64_ss functions. // Used to implement the _mm_cvtsi32_ss and _mm_cvtsi64_ss functions.
// Converts `right` from i32/i64 to f32. Returns a SIMD vector with // Converts `right` from i32/i64 to f32. Returns a SIMD vector with
@ -197,7 +197,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let right = this.read_immediate(right)?; let right = this.read_immediate(right)?;
let dest0 = this.project_index(&dest, 0)?; let dest0 = this.project_index(&dest, 0)?;
let res0 = this.int_to_int_or_float(&right, dest0.layout.ty)?; let res0 = this.int_to_int_or_float(&right, dest0.layout.ty)?;
this.write_immediate(res0, &dest0)?; this.write_immediate(*res0, &dest0)?;
for i in 1..dest_len { for i in 1..dest_len {
this.copy_op( this.copy_op(

View File

@ -62,30 +62,30 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let right = this.int_to_int_or_float(&right, twice_wide_ty)?; let right = this.int_to_int_or_float(&right, twice_wide_ty)?;
// Calculate left + right + 1 // Calculate left + right + 1
let (added, _overflow, _ty) = this.overflowing_binary_op( let added = this.wrapping_binary_op(
mir::BinOp::Add, mir::BinOp::Add,
&ImmTy::from_immediate(left, twice_wide_layout), &left,
&ImmTy::from_immediate(right, twice_wide_layout), &right,
)?; )?;
let (added, _overflow, _ty) = this.overflowing_binary_op( let added = this.wrapping_binary_op(
mir::BinOp::Add, mir::BinOp::Add,
&ImmTy::from_scalar(added, twice_wide_layout), &added,
&ImmTy::from_uint(1u32, twice_wide_layout), &ImmTy::from_uint(1u32, twice_wide_layout),
)?; )?;
// Calculate (left + right + 1) / 2 // Calculate (left + right + 1) / 2
let (divided, _overflow, _ty) = this.overflowing_binary_op( let divided = this.wrapping_binary_op(
mir::BinOp::Div, mir::BinOp::Div,
&ImmTy::from_scalar(added, twice_wide_layout), &added,
&ImmTy::from_uint(2u32, twice_wide_layout), &ImmTy::from_uint(2u32, twice_wide_layout),
)?; )?;
// Narrow back to the original type // Narrow back to the original type
let res = this.int_to_int_or_float( let res = this.int_to_int_or_float(
&ImmTy::from_scalar(divided, twice_wide_layout), &divided,
dest.layout.ty, dest.layout.ty,
)?; )?;
this.write_immediate(res, &dest)?; this.write_immediate(*res, &dest)?;
} }
} }
// Used to implement the _mm_mulhi_epi16 and _mm_mulhi_epu16 functions. // Used to implement the _mm_mulhi_epi16 and _mm_mulhi_epu16 functions.
@ -112,24 +112,24 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let right = this.int_to_int_or_float(&right, twice_wide_ty)?; let right = this.int_to_int_or_float(&right, twice_wide_ty)?;
// Multiply // Multiply
let (multiplied, _overflow, _ty) = this.overflowing_binary_op( let multiplied = this.wrapping_binary_op(
mir::BinOp::Mul, mir::BinOp::Mul,
&ImmTy::from_immediate(left, twice_wide_layout), &left,
&ImmTy::from_immediate(right, twice_wide_layout), &right,
)?; )?;
// Keep the high half // Keep the high half
let (high, _overflow, _ty) = this.overflowing_binary_op( let high = this.wrapping_binary_op(
mir::BinOp::Shr, mir::BinOp::Shr,
&ImmTy::from_scalar(multiplied, twice_wide_layout), &multiplied,
&ImmTy::from_uint(dest.layout.size.bits(), twice_wide_layout), &ImmTy::from_uint(dest.layout.size.bits(), twice_wide_layout),
)?; )?;
// Narrow back to the original type // Narrow back to the original type
let res = this.int_to_int_or_float( let res = this.int_to_int_or_float(
&ImmTy::from_scalar(high, twice_wide_layout), &high,
dest.layout.ty, dest.layout.ty,
)?; )?;
this.write_immediate(res, &dest)?; this.write_immediate(*res, &dest)?;
} }
} }
// Used to implement the _mm_mul_epu32 function. // Used to implement the _mm_mul_epu32 function.
@ -394,9 +394,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let res = let res =
this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| { this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| {
// Fallback to minimum acording to SSE2 semantics. // Fallback to minimum acording to SSE2 semantics.
Scalar::from_i32(i32::MIN) ImmTy::from_int(i32::MIN, this.machine.layouts.i32)
}); });
this.write_scalar(res, &dest)?; this.write_immediate(*res, &dest)?;
} }
} }
// Used to implement the _mm_packs_epi16 function. // Used to implement the _mm_packs_epi16 function.
@ -649,7 +649,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let dest = this.project_index(&dest, i)?; let dest = this.project_index(&dest, i)?;
let res = this.float_to_float_or_int(&op, dest.layout.ty)?; let res = this.float_to_float_or_int(&op, dest.layout.ty)?;
this.write_immediate(res, &dest)?; this.write_immediate(*res, &dest)?;
} }
// For f32 -> f64, ignore the remaining // For f32 -> f64, ignore the remaining
// For f64 -> f32, fill the remaining with zeros // For f64 -> f32, fill the remaining with zeros
@ -687,9 +687,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let res = let res =
this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| { this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| {
// Fallback to minimum acording to SSE2 semantics. // Fallback to minimum acording to SSE2 semantics.
Scalar::from_i32(i32::MIN) ImmTy::from_int(i32::MIN, this.machine.layouts.i32)
}); });
this.write_scalar(res, &dest)?; this.write_immediate(*res, &dest)?;
} }
// Fill the remaining with zeros // Fill the remaining with zeros
for i in op_len..dest_len { for i in op_len..dest_len {
@ -718,10 +718,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let res = this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| { let res = this.float_to_int_checked(op, dest.layout.ty, rnd).unwrap_or_else(|| {
// Fallback to minimum acording to SSE semantics. // Fallback to minimum acording to SSE semantics.
Scalar::from_int(dest.layout.size.signed_int_min(), dest.layout.size) ImmTy::from_int(dest.layout.size.signed_int_min(), dest.layout)
}); });
this.write_scalar(res, dest)?; this.write_immediate(*res, dest)?;
} }
// Used to implement the _mm_cvtsd_ss and _mm_cvtss_sd functions. // Used to implement the _mm_cvtsd_ss and _mm_cvtss_sd functions.
// Converts the first f64/f32 from `right` to f32/f64 and copies // Converts the first f64/f32 from `right` to f32/f64 and copies
@ -742,7 +742,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// `float_to_float_or_int` here will convert from f64 to f32 (cvtsd2ss) or // `float_to_float_or_int` here will convert from f64 to f32 (cvtsd2ss) or
// from f32 to f64 (cvtss2sd). // from f32 to f64 (cvtss2sd).
let res0 = this.float_to_float_or_int(&right0, dest0.layout.ty)?; let res0 = this.float_to_float_or_int(&right0, dest0.layout.ty)?;
this.write_immediate(res0, &dest0)?; this.write_immediate(*res0, &dest0)?;
// Copy remianing from `left` // Copy remianing from `left`
for i in 1..dest_len { for i in 1..dest_len {