mirror of
https://github.com/rust-lang/rust.git
synced 2025-02-04 02:54:00 +00:00
Pass OpTy by reference not value
This commit is contained in:
parent
6c9d7fbeed
commit
e915cf45dc
@ -105,7 +105,7 @@ pub(super) fn mk_eval_cx<'mir, 'tcx>(
|
||||
/// type system.
|
||||
pub(super) fn op_to_const<'tcx>(
|
||||
ecx: &CompileTimeEvalContext<'_, 'tcx>,
|
||||
op: OpTy<'tcx>,
|
||||
op: &OpTy<'tcx>,
|
||||
) -> ConstValue<'tcx> {
|
||||
// We do not have value optimizations for everything.
|
||||
// Only scalars and slices, since they are very common.
|
||||
@ -201,7 +201,7 @@ fn turn_into_const_value<'tcx>(
|
||||
"the `eval_to_const_value_raw` query should not be used for statics, use `eval_to_allocation` instead"
|
||||
);
|
||||
// Turn this into a proper constant.
|
||||
op_to_const(&ecx, mplace.into())
|
||||
op_to_const(&ecx, &mplace.into())
|
||||
}
|
||||
|
||||
pub fn eval_to_const_value_raw_provider<'tcx>(
|
||||
@ -348,7 +348,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
|
||||
Some(_) => CtfeValidationMode::Regular, // a `static`
|
||||
None => CtfeValidationMode::Const { inner, allow_static_ptrs: false },
|
||||
};
|
||||
ecx.const_validate_operand(mplace.into(), path, &mut ref_tracking, mode)?;
|
||||
ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?;
|
||||
inner = true;
|
||||
}
|
||||
};
|
||||
|
@ -39,7 +39,7 @@ impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> {
|
||||
// &str
|
||||
assert!(args.len() == 1);
|
||||
|
||||
let msg_place = self.deref_operand(args[0])?;
|
||||
let msg_place = self.deref_operand(&args[0])?;
|
||||
let msg = Symbol::intern(self.read_str(msg_place)?);
|
||||
let span = self.find_closest_untracked_caller_location();
|
||||
let (file, line, col) = self.location_triple_for_span(span);
|
||||
@ -284,8 +284,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
|
||||
};
|
||||
match intrinsic_name {
|
||||
sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
|
||||
let a = ecx.read_immediate(args[0])?.to_scalar()?;
|
||||
let b = ecx.read_immediate(args[1])?.to_scalar()?;
|
||||
let a = ecx.read_immediate(&args[0])?.to_scalar()?;
|
||||
let b = ecx.read_immediate(&args[1])?.to_scalar()?;
|
||||
let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
|
||||
ecx.guaranteed_eq(a, b)
|
||||
} else {
|
||||
@ -294,8 +294,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
|
||||
ecx.write_scalar(Scalar::from_bool(cmp), dest)?;
|
||||
}
|
||||
sym::const_allocate => {
|
||||
let size = ecx.read_scalar(args[0])?.to_machine_usize(ecx)?;
|
||||
let align = ecx.read_scalar(args[1])?.to_machine_usize(ecx)?;
|
||||
let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?;
|
||||
let align = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
|
||||
|
||||
let align = match Align::from_bytes(align) {
|
||||
Ok(a) => a,
|
||||
@ -330,7 +330,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
|
||||
use rustc_middle::mir::AssertKind::*;
|
||||
// Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
|
||||
let eval_to_int =
|
||||
|op| ecx.read_immediate(ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
|
||||
|op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
|
||||
let err = match msg {
|
||||
BoundsCheck { ref len, ref index } => {
|
||||
let len = eval_to_int(len)?;
|
||||
@ -358,8 +358,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
|
||||
fn binary_ptr_op(
|
||||
_ecx: &InterpCx<'mir, 'tcx, Self>,
|
||||
_bin_op: mir::BinOp,
|
||||
_left: ImmTy<'tcx>,
|
||||
_right: ImmTy<'tcx>,
|
||||
_left: &ImmTy<'tcx>,
|
||||
_right: &ImmTy<'tcx>,
|
||||
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
|
||||
Err(ConstEvalErrKind::NeedsRfc("pointer arithmetic or comparison".to_string()).into())
|
||||
}
|
||||
|
@ -55,8 +55,8 @@ pub(crate) fn destructure_const<'tcx>(
|
||||
return mir::DestructuredConst { variant: None, fields: &[] };
|
||||
}
|
||||
ty::Adt(def, _) => {
|
||||
let variant = ecx.read_discriminant(op).unwrap().1;
|
||||
let down = ecx.operand_downcast(op, variant).unwrap();
|
||||
let variant = ecx.read_discriminant(&op).unwrap().1;
|
||||
let down = ecx.operand_downcast(&op, variant).unwrap();
|
||||
(def.variants[variant].fields.len(), Some(variant), down)
|
||||
}
|
||||
ty::Tuple(substs) => (substs.len(), None, op),
|
||||
@ -64,8 +64,8 @@ pub(crate) fn destructure_const<'tcx>(
|
||||
};
|
||||
|
||||
let fields_iter = (0..field_count).map(|i| {
|
||||
let field_op = ecx.operand_field(down, i).unwrap();
|
||||
let val = op_to_const(&ecx, field_op);
|
||||
let field_op = ecx.operand_field(&down, i).unwrap();
|
||||
let val = op_to_const(&ecx, &field_op);
|
||||
ty::Const::from_value(tcx, val, field_op.layout.ty)
|
||||
});
|
||||
let fields = tcx.arena.alloc_from_iter(fields_iter);
|
||||
@ -81,7 +81,7 @@ pub(crate) fn deref_const<'tcx>(
|
||||
trace!("deref_const: {:?}", val);
|
||||
let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
|
||||
let op = ecx.const_to_op(val, None).unwrap();
|
||||
let mplace = ecx.deref_operand(op).unwrap();
|
||||
let mplace = ecx.deref_operand(&op).unwrap();
|
||||
if let Scalar::Ptr(ptr) = mplace.ptr {
|
||||
assert_eq!(
|
||||
ecx.memory.get_raw(ptr.alloc_id).unwrap().mutability,
|
||||
@ -106,5 +106,5 @@ pub(crate) fn deref_const<'tcx>(
|
||||
},
|
||||
};
|
||||
|
||||
tcx.mk_const(ty::Const { val: ty::ConstKind::Value(op_to_const(&ecx, mplace.into())), ty })
|
||||
tcx.mk_const(ty::Const { val: ty::ConstKind::Value(op_to_const(&ecx, &mplace.into())), ty })
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ use super::{
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn cast(
|
||||
&mut self,
|
||||
src: OpTy<'tcx, M::PointerTag>,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
cast_kind: CastKind,
|
||||
cast_ty: Ty<'tcx>,
|
||||
dest: PlaceTy<'tcx, M::PointerTag>,
|
||||
@ -259,7 +259,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
||||
fn unsize_into_ptr(
|
||||
&mut self,
|
||||
src: OpTy<'tcx, M::PointerTag>,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
dest: PlaceTy<'tcx, M::PointerTag>,
|
||||
// The pointee types
|
||||
source_ty: Ty<'tcx>,
|
||||
@ -300,7 +300,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
||||
fn unsize_into(
|
||||
&mut self,
|
||||
src: OpTy<'tcx, M::PointerTag>,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
cast_ty: TyAndLayout<'tcx>,
|
||||
dest: PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
@ -340,9 +340,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let src_field = self.operand_field(src, i)?;
|
||||
let dst_field = self.place_field(dest, i)?;
|
||||
if src_field.layout.ty == cast_ty_field.ty {
|
||||
self.copy_op(src_field, dst_field)?;
|
||||
self.copy_op(&src_field, dst_field)?;
|
||||
} else {
|
||||
self.unsize_into(src_field, cast_ty_field, dst_field)?;
|
||||
self.unsize_into(&src_field, cast_ty_field, dst_field)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -779,7 +779,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// Copy the return value to the caller's stack frame.
|
||||
if let Some(return_place) = frame.return_place {
|
||||
let op = self.access_local(&frame, mir::RETURN_PLACE, None)?;
|
||||
self.copy_op_transmute(op, return_place)?;
|
||||
self.copy_op_transmute(&op, return_place)?;
|
||||
trace!("{:?}", self.dump_place(*return_place));
|
||||
} else {
|
||||
throw_ub!(Unreachable);
|
||||
|
@ -167,7 +167,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
|
||||
|
||||
fn visit_aggregate(
|
||||
&mut self,
|
||||
mplace: MPlaceTy<'tcx>,
|
||||
mplace: &MPlaceTy<'tcx>,
|
||||
fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// ZSTs cannot contain pointers, so we can skip them.
|
||||
@ -191,13 +191,13 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
|
||||
self.walk_aggregate(mplace, fields)
|
||||
}
|
||||
|
||||
fn visit_value(&mut self, mplace: MPlaceTy<'tcx>) -> InterpResult<'tcx> {
|
||||
fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
|
||||
// Handle Reference types, as these are the only relocations supported by const eval.
|
||||
// Raw pointers (and boxes) are handled by the `leftover_relocations` logic.
|
||||
let tcx = self.ecx.tcx;
|
||||
let ty = mplace.layout.ty;
|
||||
if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
|
||||
let value = self.ecx.read_immediate(mplace.into())?;
|
||||
let value = self.ecx.read_immediate(&(*mplace).into())?;
|
||||
let mplace = self.ecx.ref_to_mplace(value)?;
|
||||
assert_eq!(mplace.layout.ty, referenced_ty);
|
||||
// Handle trait object vtables.
|
||||
@ -338,7 +338,7 @@ where
|
||||
leftover_allocations,
|
||||
inside_unsafe_cell: false,
|
||||
}
|
||||
.visit_value(mplace);
|
||||
.visit_value(&mplace);
|
||||
// We deliberately *ignore* interpreter errors here. When there is a problem, the remaining
|
||||
// references are "leftover"-interned, and later validation will show a proper error
|
||||
// and point at the right part of the value causing the problem.
|
||||
|
@ -143,7 +143,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
sym::min_align_of_val | sym::size_of_val => {
|
||||
// Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
|
||||
// dereferencable!
|
||||
let place = self.ref_to_mplace(self.read_immediate(args[0])?)?;
|
||||
let place = self.ref_to_mplace(self.read_immediate(&args[0])?)?;
|
||||
let (size, align) = self
|
||||
.size_and_align_of_mplace(place)?
|
||||
.ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
|
||||
@ -177,7 +177,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
|
||||
let const_ = ty::Const { val: ty::ConstKind::Value(val), ty };
|
||||
let val = self.const_to_op(&const_, None)?;
|
||||
self.copy_op(val, dest)?;
|
||||
self.copy_op(&val, dest)?;
|
||||
}
|
||||
|
||||
sym::ctpop
|
||||
@ -189,7 +189,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
| sym::bitreverse => {
|
||||
let ty = substs.type_at(0);
|
||||
let layout_of = self.layout_of(ty)?;
|
||||
let val = self.read_scalar(args[0])?.check_init()?;
|
||||
let val = self.read_scalar(&args[0])?.check_init()?;
|
||||
let bits = self.force_bits(val, layout_of.size)?;
|
||||
let kind = match layout_of.abi {
|
||||
Abi::Scalar(ref scalar) => scalar.value,
|
||||
@ -212,22 +212,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
self.write_scalar(out_val, dest)?;
|
||||
}
|
||||
sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
|
||||
let lhs = self.read_immediate(args[0])?;
|
||||
let rhs = self.read_immediate(args[1])?;
|
||||
let lhs = self.read_immediate(&args[0])?;
|
||||
let rhs = self.read_immediate(&args[1])?;
|
||||
let bin_op = match intrinsic_name {
|
||||
sym::add_with_overflow => BinOp::Add,
|
||||
sym::sub_with_overflow => BinOp::Sub,
|
||||
sym::mul_with_overflow => BinOp::Mul,
|
||||
_ => bug!("Already checked for int ops"),
|
||||
};
|
||||
self.binop_with_overflow(bin_op, lhs, rhs, dest)?;
|
||||
self.binop_with_overflow(bin_op, &lhs, &rhs, dest)?;
|
||||
}
|
||||
sym::saturating_add | sym::saturating_sub => {
|
||||
let l = self.read_immediate(args[0])?;
|
||||
let r = self.read_immediate(args[1])?;
|
||||
let l = self.read_immediate(&args[0])?;
|
||||
let r = self.read_immediate(&args[1])?;
|
||||
let is_add = intrinsic_name == sym::saturating_add;
|
||||
let (val, overflowed, _ty) =
|
||||
self.overflowing_binary_op(if is_add { BinOp::Add } else { BinOp::Sub }, l, r)?;
|
||||
self.overflowing_binary_op(if is_add { BinOp::Add } else { BinOp::Sub }, &l, &r)?;
|
||||
let val = if overflowed {
|
||||
let num_bits = l.layout.size.bits();
|
||||
if l.layout.abi.is_signed() {
|
||||
@ -269,8 +269,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
self.write_scalar(val, dest)?;
|
||||
}
|
||||
sym::discriminant_value => {
|
||||
let place = self.deref_operand(args[0])?;
|
||||
let discr_val = self.read_discriminant(place.into())?.0;
|
||||
let place = self.deref_operand(&args[0])?;
|
||||
let discr_val = self.read_discriminant(&place.into())?.0;
|
||||
self.write_scalar(discr_val, dest)?;
|
||||
}
|
||||
sym::unchecked_shl
|
||||
@ -280,8 +280,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
| sym::unchecked_mul
|
||||
| sym::unchecked_div
|
||||
| sym::unchecked_rem => {
|
||||
let l = self.read_immediate(args[0])?;
|
||||
let r = self.read_immediate(args[1])?;
|
||||
let l = self.read_immediate(&args[0])?;
|
||||
let r = self.read_immediate(&args[1])?;
|
||||
let bin_op = match intrinsic_name {
|
||||
sym::unchecked_shl => BinOp::Shl,
|
||||
sym::unchecked_shr => BinOp::Shr,
|
||||
@ -292,7 +292,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
sym::unchecked_rem => BinOp::Rem,
|
||||
_ => bug!("Already checked for int ops"),
|
||||
};
|
||||
let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, l, r)?;
|
||||
let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
|
||||
if overflowed {
|
||||
let layout = self.layout_of(substs.type_at(0))?;
|
||||
let r_val = self.force_bits(r.to_scalar()?, layout.size)?;
|
||||
@ -308,9 +308,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
|
||||
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
|
||||
let layout = self.layout_of(substs.type_at(0))?;
|
||||
let val = self.read_scalar(args[0])?.check_init()?;
|
||||
let val = self.read_scalar(&args[0])?.check_init()?;
|
||||
let val_bits = self.force_bits(val, layout.size)?;
|
||||
let raw_shift = self.read_scalar(args[1])?.check_init()?;
|
||||
let raw_shift = self.read_scalar(&args[1])?.check_init()?;
|
||||
let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
|
||||
let width_bits = u128::from(layout.size.bits());
|
||||
let shift_bits = raw_shift_bits % width_bits;
|
||||
@ -327,15 +327,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
sym::copy | sym::copy_nonoverlapping => {
|
||||
let elem_ty = instance.substs.type_at(0);
|
||||
let elem_layout = self.layout_of(elem_ty)?;
|
||||
let count = self.read_scalar(args[2])?.to_machine_usize(self)?;
|
||||
let count = self.read_scalar(&args[2])?.to_machine_usize(self)?;
|
||||
let elem_align = elem_layout.align.abi;
|
||||
|
||||
let size = elem_layout.size.checked_mul(count, self).ok_or_else(|| {
|
||||
err_ub_format!("overflow computing total size of `{}`", intrinsic_name)
|
||||
})?;
|
||||
let src = self.read_scalar(args[0])?.check_init()?;
|
||||
let src = self.read_scalar(&args[0])?.check_init()?;
|
||||
let src = self.memory.check_ptr_access(src, size, elem_align)?;
|
||||
let dest = self.read_scalar(args[1])?.check_init()?;
|
||||
let dest = self.read_scalar(&args[1])?.check_init()?;
|
||||
let dest = self.memory.check_ptr_access(dest, size, elem_align)?;
|
||||
|
||||
if let (Some(src), Some(dest)) = (src, dest) {
|
||||
@ -348,16 +348,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
}
|
||||
}
|
||||
sym::offset => {
|
||||
let ptr = self.read_scalar(args[0])?.check_init()?;
|
||||
let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
|
||||
let ptr = self.read_scalar(&args[0])?.check_init()?;
|
||||
let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
|
||||
let pointee_ty = substs.type_at(0);
|
||||
|
||||
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
|
||||
self.write_scalar(offset_ptr, dest)?;
|
||||
}
|
||||
sym::arith_offset => {
|
||||
let ptr = self.read_scalar(args[0])?.check_init()?;
|
||||
let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
|
||||
let ptr = self.read_scalar(&args[0])?.check_init()?;
|
||||
let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
|
||||
let pointee_ty = substs.type_at(0);
|
||||
|
||||
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
|
||||
@ -366,8 +366,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
self.write_scalar(offset_ptr, dest)?;
|
||||
}
|
||||
sym::ptr_offset_from => {
|
||||
let a = self.read_immediate(args[0])?.to_scalar()?;
|
||||
let b = self.read_immediate(args[1])?.to_scalar()?;
|
||||
let a = self.read_immediate(&args[0])?.to_scalar()?;
|
||||
let b = self.read_immediate(&args[1])?.to_scalar()?;
|
||||
|
||||
// Special case: if both scalars are *equal integers*
|
||||
// and not NULL, we pretend there is an allocation of size 0 right there,
|
||||
@ -406,16 +406,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout);
|
||||
let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout);
|
||||
let (val, _overflowed, _ty) =
|
||||
self.overflowing_binary_op(BinOp::Sub, a_offset, b_offset)?;
|
||||
self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
|
||||
let pointee_layout = self.layout_of(substs.type_at(0))?;
|
||||
let val = ImmTy::from_scalar(val, isize_layout);
|
||||
let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
|
||||
self.exact_div(val, size, dest)?;
|
||||
self.exact_div(&val, &size, dest)?;
|
||||
}
|
||||
}
|
||||
|
||||
sym::transmute => {
|
||||
self.copy_op_transmute(args[0], dest)?;
|
||||
self.copy_op_transmute(&args[0], dest)?;
|
||||
}
|
||||
sym::assert_inhabited => {
|
||||
let ty = instance.substs.type_at(0);
|
||||
@ -434,9 +434,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
}
|
||||
}
|
||||
sym::simd_insert => {
|
||||
let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
|
||||
let elem = args[2];
|
||||
let input = args[0];
|
||||
let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
|
||||
let elem = &args[2];
|
||||
let input = &args[0];
|
||||
let (len, e_ty) = input.layout.ty.simd_size_and_type(*self.tcx);
|
||||
assert!(
|
||||
index < len,
|
||||
@ -458,12 +458,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
||||
for i in 0..len {
|
||||
let place = self.place_index(dest, i)?;
|
||||
let value = if i == index { elem } else { self.operand_index(input, i)? };
|
||||
self.copy_op(value, place)?;
|
||||
let value = if i == index { *elem } else { self.operand_index(input, i)? };
|
||||
self.copy_op(&value, place)?;
|
||||
}
|
||||
}
|
||||
sym::simd_extract => {
|
||||
let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
|
||||
let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
|
||||
let (len, e_ty) = args[0].layout.ty.simd_size_and_type(*self.tcx);
|
||||
assert!(
|
||||
index < len,
|
||||
@ -477,14 +477,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
"Return type `{}` must match vector element type `{}`",
|
||||
dest.layout.ty, e_ty
|
||||
);
|
||||
self.copy_op(self.operand_index(args[0], index)?, dest)?;
|
||||
self.copy_op(&self.operand_index(&args[0], index)?, dest)?;
|
||||
}
|
||||
sym::likely | sym::unlikely => {
|
||||
// These just return their argument
|
||||
self.copy_op(args[0], dest)?;
|
||||
self.copy_op(&args[0], dest)?;
|
||||
}
|
||||
sym::assume => {
|
||||
let cond = self.read_scalar(args[0])?.check_init()?.to_bool()?;
|
||||
let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;
|
||||
if !cond {
|
||||
throw_ub_format!("`assume` intrinsic called with `false`");
|
||||
}
|
||||
@ -499,14 +499,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
||||
pub fn exact_div(
|
||||
&mut self,
|
||||
a: ImmTy<'tcx, M::PointerTag>,
|
||||
b: ImmTy<'tcx, M::PointerTag>,
|
||||
a: &ImmTy<'tcx, M::PointerTag>,
|
||||
b: &ImmTy<'tcx, M::PointerTag>,
|
||||
dest: PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Performs an exact division, resulting in undefined behavior where
|
||||
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
|
||||
// First, check x % y != 0 (or if that computation overflows).
|
||||
let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, a, b)?;
|
||||
let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
|
||||
if overflow || res.assert_bits(a.layout.size) != 0 {
|
||||
// Then, check if `b` is -1, which is the "MIN / -1" case.
|
||||
let minus1 = Scalar::from_int(-1, dest.layout.size);
|
||||
@ -518,7 +518,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
}
|
||||
}
|
||||
// `Rem` says this is all right, so we can let `Div` do its job.
|
||||
self.binop_ignore_overflow(BinOp::Div, a, b, dest)
|
||||
self.binop_ignore_overflow(BinOp::Div, &a, &b, dest)
|
||||
}
|
||||
|
||||
/// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
|
||||
|
@ -200,8 +200,8 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
||||
fn binary_ptr_op(
|
||||
ecx: &InterpCx<'mir, 'tcx, Self>,
|
||||
bin_op: mir::BinOp,
|
||||
left: ImmTy<'tcx, Self::PointerTag>,
|
||||
right: ImmTy<'tcx, Self::PointerTag>,
|
||||
left: &ImmTy<'tcx, Self::PointerTag>,
|
||||
right: &ImmTy<'tcx, Self::PointerTag>,
|
||||
) -> InterpResult<'tcx, (Scalar<Self::PointerTag>, bool, Ty<'tcx>)>;
|
||||
|
||||
/// Heap allocations via the `box` keyword.
|
||||
|
@ -231,7 +231,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
#[inline]
|
||||
pub fn force_op_ptr(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
match op.try_as_mplace(self) {
|
||||
Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()),
|
||||
@ -304,7 +304,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// in a `Immediate`, not on which data is stored there currently.
|
||||
pub(crate) fn try_read_immediate(
|
||||
&self,
|
||||
src: OpTy<'tcx, M::PointerTag>,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
|
||||
Ok(match src.try_as_mplace(self) {
|
||||
Ok(mplace) => {
|
||||
@ -322,7 +322,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
#[inline(always)]
|
||||
pub fn read_immediate(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
|
||||
if let Ok(imm) = self.try_read_immediate(op)? {
|
||||
Ok(imm)
|
||||
@ -334,7 +334,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Read a scalar from a place
|
||||
pub fn read_scalar(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
|
||||
Ok(self.read_immediate(op)?.to_scalar_or_uninit())
|
||||
}
|
||||
@ -350,7 +350,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Projection functions
|
||||
pub fn operand_field(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
let base = match op.try_as_mplace(self) {
|
||||
@ -388,7 +388,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
||||
pub fn operand_index(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
index: u64,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
if let Ok(index) = usize::try_from(index) {
|
||||
@ -403,7 +403,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
||||
pub fn operand_downcast(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
variant: VariantIdx,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
// Downcasts only change the layout
|
||||
@ -411,14 +411,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
Ok(mplace) => self.mplace_downcast(mplace, variant)?.into(),
|
||||
Err(..) => {
|
||||
let layout = op.layout.for_variant(self, variant);
|
||||
OpTy { layout, ..op }
|
||||
OpTy { layout, ..*op }
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn operand_projection(
|
||||
&self,
|
||||
base: OpTy<'tcx, M::PointerTag>,
|
||||
base: &OpTy<'tcx, M::PointerTag>,
|
||||
proj_elem: mir::PlaceElem<'tcx>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
use rustc_middle::mir::ProjectionElem::*;
|
||||
@ -489,7 +489,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let op = place
|
||||
.projection
|
||||
.iter()
|
||||
.try_fold(base_op, |op, elem| self.operand_projection(op, elem))?;
|
||||
.try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?;
|
||||
|
||||
trace!("eval_place_to_op: got {:?}", *op);
|
||||
// Sanity-check the type we ended up with.
|
||||
@ -599,7 +599,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Read discriminant, return the runtime value as well as the variant index.
|
||||
pub fn read_discriminant(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> {
|
||||
trace!("read_discriminant_value {:#?}", op.layout);
|
||||
// Get type and layout of the discriminant.
|
||||
@ -645,7 +645,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?;
|
||||
|
||||
// Read tag and sanity-check `tag_layout`.
|
||||
let tag_val = self.read_immediate(self.operand_field(op, tag_field)?)?;
|
||||
let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
|
||||
assert_eq!(tag_layout.size, tag_val.layout.size);
|
||||
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
|
||||
let tag_val = tag_val.to_scalar()?;
|
||||
@ -699,7 +699,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
|
||||
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
|
||||
let variant_index_relative_val =
|
||||
self.binary_op(mir::BinOp::Sub, tag_val, niche_start_val)?;
|
||||
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
|
||||
let variant_index_relative = variant_index_relative_val
|
||||
.to_scalar()?
|
||||
.assert_bits(tag_val.layout.size);
|
||||
|
@ -14,11 +14,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn binop_with_overflow(
|
||||
&mut self,
|
||||
op: mir::BinOp,
|
||||
left: ImmTy<'tcx, M::PointerTag>,
|
||||
right: ImmTy<'tcx, M::PointerTag>,
|
||||
left: &ImmTy<'tcx, M::PointerTag>,
|
||||
right: &ImmTy<'tcx, M::PointerTag>,
|
||||
dest: PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let (val, overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
|
||||
let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
|
||||
debug_assert_eq!(
|
||||
self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
|
||||
dest.layout.ty,
|
||||
@ -34,8 +34,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn binop_ignore_overflow(
|
||||
&mut self,
|
||||
op: mir::BinOp,
|
||||
left: ImmTy<'tcx, M::PointerTag>,
|
||||
right: ImmTy<'tcx, M::PointerTag>,
|
||||
left: &ImmTy<'tcx, M::PointerTag>,
|
||||
right: &ImmTy<'tcx, M::PointerTag>,
|
||||
dest: PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
|
||||
@ -269,8 +269,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn overflowing_binary_op(
|
||||
&self,
|
||||
bin_op: mir::BinOp,
|
||||
left: ImmTy<'tcx, M::PointerTag>,
|
||||
right: ImmTy<'tcx, M::PointerTag>,
|
||||
left: &ImmTy<'tcx, M::PointerTag>,
|
||||
right: &ImmTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
|
||||
trace!(
|
||||
"Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
|
||||
@ -347,8 +347,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn binary_op(
|
||||
&self,
|
||||
bin_op: mir::BinOp,
|
||||
left: ImmTy<'tcx, M::PointerTag>,
|
||||
right: ImmTy<'tcx, M::PointerTag>,
|
||||
left: &ImmTy<'tcx, M::PointerTag>,
|
||||
right: &ImmTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
|
||||
let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
|
||||
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
|
||||
@ -359,7 +359,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn overflowing_unary_op(
|
||||
&self,
|
||||
un_op: mir::UnOp,
|
||||
val: ImmTy<'tcx, M::PointerTag>,
|
||||
val: &ImmTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
|
||||
use rustc_middle::mir::UnOp::*;
|
||||
|
||||
@ -409,7 +409,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn unary_op(
|
||||
&self,
|
||||
un_op: mir::UnOp,
|
||||
val: ImmTy<'tcx, M::PointerTag>,
|
||||
val: &ImmTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
|
||||
let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
|
||||
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
|
||||
|
@ -248,10 +248,10 @@ impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> {
|
||||
/// Note: do not call `as_ref` on the resulting place. This function should only be used to
|
||||
/// read from the resulting mplace, not to get its address back.
|
||||
pub fn try_as_mplace(
|
||||
self,
|
||||
&self,
|
||||
cx: &impl HasDataLayout,
|
||||
) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
|
||||
match *self {
|
||||
match **self {
|
||||
Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
|
||||
Operand::Immediate(_) if self.layout.is_zst() => {
|
||||
Ok(MPlaceTy::dangling(self.layout, cx))
|
||||
@ -263,7 +263,7 @@ impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> {
|
||||
#[inline(always)]
|
||||
/// Note: do not call `as_ref` on the resulting place. This function should only be used to
|
||||
/// read from the resulting mplace, not to get its address back.
|
||||
pub fn assert_mem_place(self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> {
|
||||
pub fn assert_mem_place(&self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> {
|
||||
self.try_as_mplace(cx).unwrap()
|
||||
}
|
||||
}
|
||||
@ -331,7 +331,7 @@ where
|
||||
/// will always be a MemPlace. Lives in `place.rs` because it creates a place.
|
||||
pub fn deref_operand(
|
||||
&self,
|
||||
src: OpTy<'tcx, M::PointerTag>,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
let val = self.read_immediate(src)?;
|
||||
trace!("deref to {} on {:?}", val.layout.ty, *val);
|
||||
@ -551,12 +551,12 @@ where
|
||||
Ok(match proj_elem {
|
||||
Field(field, _) => self.mplace_field(base, field.index())?,
|
||||
Downcast(_, variant) => self.mplace_downcast(base, variant)?,
|
||||
Deref => self.deref_operand(base.into())?,
|
||||
Deref => self.deref_operand(&base.into())?,
|
||||
|
||||
Index(local) => {
|
||||
let layout = self.layout_of(self.tcx.types.usize)?;
|
||||
let n = self.access_local(self.frame(), local, Some(layout))?;
|
||||
let n = self.read_scalar(n)?;
|
||||
let n = self.read_scalar(&n)?;
|
||||
let n = u64::try_from(
|
||||
self.force_bits(n.check_init()?, self.tcx.data_layout.pointer_size)?,
|
||||
)
|
||||
@ -637,7 +637,7 @@ where
|
||||
Ok(match proj_elem {
|
||||
Field(field, _) => self.place_field(base, field.index())?,
|
||||
Downcast(_, variant) => self.place_downcast(base, variant)?,
|
||||
Deref => self.deref_operand(self.place_to_op(base)?)?.into(),
|
||||
Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
|
||||
// For the other variants, we have to force an allocation.
|
||||
// This matches `operand_projection`.
|
||||
Subslice { .. } | ConstantIndex { .. } | Index(_) => {
|
||||
@ -697,7 +697,7 @@ where
|
||||
|
||||
if M::enforce_validity(self) {
|
||||
// Data got changed, better make sure it matches the type!
|
||||
self.validate_operand(self.place_to_op(dest)?)?;
|
||||
self.validate_operand(&self.place_to_op(dest)?)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -714,7 +714,7 @@ where
|
||||
|
||||
if M::enforce_validity(self) {
|
||||
// Data got changed, better make sure it matches the type!
|
||||
self.validate_operand(dest.into())?;
|
||||
self.validate_operand(&dest.into())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -843,14 +843,14 @@ where
|
||||
#[inline(always)]
|
||||
pub fn copy_op(
|
||||
&mut self,
|
||||
src: OpTy<'tcx, M::PointerTag>,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
dest: PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.copy_op_no_validate(src, dest)?;
|
||||
|
||||
if M::enforce_validity(self) {
|
||||
// Data got changed, better make sure it matches the type!
|
||||
self.validate_operand(self.place_to_op(dest)?)?;
|
||||
self.validate_operand(&self.place_to_op(dest)?)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -862,7 +862,7 @@ where
|
||||
/// right type.
|
||||
fn copy_op_no_validate(
|
||||
&mut self,
|
||||
src: OpTy<'tcx, M::PointerTag>,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
dest: PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// We do NOT compare the types for equality, because well-typed code can
|
||||
@ -921,7 +921,7 @@ where
|
||||
/// have the same size.
|
||||
pub fn copy_op_transmute(
|
||||
&mut self,
|
||||
src: OpTy<'tcx, M::PointerTag>,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
dest: PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
|
||||
@ -964,7 +964,7 @@ where
|
||||
|
||||
if M::enforce_validity(self) {
|
||||
// Data got changed, better make sure it matches the type!
|
||||
self.validate_operand(dest.into())?;
|
||||
self.validate_operand(&dest.into())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -1118,8 +1118,8 @@ where
|
||||
ImmTy::from_uint(variant_index_relative, tag_layout);
|
||||
let tag_val = self.binary_op(
|
||||
mir::BinOp::Add,
|
||||
variant_index_relative_val,
|
||||
niche_start_val,
|
||||
&variant_index_relative_val,
|
||||
&niche_start_val,
|
||||
)?;
|
||||
// Write result.
|
||||
let niche_dest = self.place_field(dest, tag_field)?;
|
||||
|
@ -162,29 +162,29 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
Use(ref operand) => {
|
||||
// Avoid recomputing the layout
|
||||
let op = self.eval_operand(operand, Some(dest.layout))?;
|
||||
self.copy_op(op, dest)?;
|
||||
self.copy_op(&op, dest)?;
|
||||
}
|
||||
|
||||
BinaryOp(bin_op, ref left, ref right) => {
|
||||
let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
|
||||
let left = self.read_immediate(self.eval_operand(left, layout)?)?;
|
||||
let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
|
||||
let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
|
||||
let right = self.read_immediate(self.eval_operand(right, layout)?)?;
|
||||
self.binop_ignore_overflow(bin_op, left, right, dest)?;
|
||||
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
|
||||
self.binop_ignore_overflow(bin_op, &left, &right, dest)?;
|
||||
}
|
||||
|
||||
CheckedBinaryOp(bin_op, ref left, ref right) => {
|
||||
// Due to the extra boolean in the result, we can never reuse the `dest.layout`.
|
||||
let left = self.read_immediate(self.eval_operand(left, None)?)?;
|
||||
let left = self.read_immediate(&self.eval_operand(left, None)?)?;
|
||||
let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
|
||||
let right = self.read_immediate(self.eval_operand(right, layout)?)?;
|
||||
self.binop_with_overflow(bin_op, left, right, dest)?;
|
||||
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
|
||||
self.binop_with_overflow(bin_op, &left, &right, dest)?;
|
||||
}
|
||||
|
||||
UnaryOp(un_op, ref operand) => {
|
||||
// The operand always has the same type as the result.
|
||||
let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?;
|
||||
let val = self.unary_op(un_op, val)?;
|
||||
let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
|
||||
let val = self.unary_op(un_op, &val)?;
|
||||
assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
|
||||
self.write_immediate(*val, dest)?;
|
||||
}
|
||||
@ -208,7 +208,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
if !op.layout.is_zst() {
|
||||
let field_index = active_field_index.unwrap_or(i);
|
||||
let field_dest = self.place_field(dest, field_index)?;
|
||||
self.copy_op(op, field_dest)?;
|
||||
self.copy_op(&op, field_dest)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -221,7 +221,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
if let Some(first_ptr) = self.check_mplace_access(dest, None)? {
|
||||
// Write the first.
|
||||
let first = self.mplace_field(dest, 0)?;
|
||||
self.copy_op(op, first.into())?;
|
||||
self.copy_op(&op, first.into())?;
|
||||
|
||||
if length > 1 {
|
||||
let elem_size = first.layout.size;
|
||||
@ -278,12 +278,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
Cast(cast_kind, ref operand, cast_ty) => {
|
||||
let src = self.eval_operand(operand, None)?;
|
||||
let cast_ty = self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty);
|
||||
self.cast(src, cast_kind, cast_ty, dest)?;
|
||||
self.cast(&src, cast_kind, cast_ty, dest)?;
|
||||
}
|
||||
|
||||
Discriminant(place) => {
|
||||
let op = self.eval_place_to_op(place, None)?;
|
||||
let discr_val = self.read_discriminant(op)?.0;
|
||||
let discr_val = self.read_discriminant(&op)?.0;
|
||||
self.write_scalar(discr_val, dest)?;
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
Goto { target } => self.go_to_block(target),
|
||||
|
||||
SwitchInt { ref discr, ref targets, switch_ty } => {
|
||||
let discr = self.read_immediate(self.eval_operand(discr, None)?)?;
|
||||
let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
|
||||
trace!("SwitchInt({:?})", *discr);
|
||||
assert_eq!(discr.layout.ty, switch_ty);
|
||||
|
||||
@ -38,8 +38,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let res = self
|
||||
.overflowing_binary_op(
|
||||
mir::BinOp::Eq,
|
||||
discr,
|
||||
ImmTy::from_uint(const_int, discr.layout),
|
||||
&discr,
|
||||
&ImmTy::from_uint(const_int, discr.layout),
|
||||
)?
|
||||
.0;
|
||||
if res.to_bool()? {
|
||||
@ -58,7 +58,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let (fn_val, abi) = match *func.layout.ty.kind() {
|
||||
ty::FnPtr(sig) => {
|
||||
let caller_abi = sig.abi();
|
||||
let fn_ptr = self.read_scalar(func)?.check_init()?;
|
||||
let fn_ptr = self.read_scalar(&func)?.check_init()?;
|
||||
let fn_val = self.memory.get_fn(fn_ptr)?;
|
||||
(fn_val, caller_abi)
|
||||
}
|
||||
@ -101,7 +101,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
||||
Assert { ref cond, expected, ref msg, target, cleanup } => {
|
||||
let cond_val =
|
||||
self.read_immediate(self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
|
||||
self.read_immediate(&self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
|
||||
if expected == cond_val {
|
||||
self.go_to_block(target);
|
||||
} else {
|
||||
@ -202,7 +202,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
)
|
||||
}
|
||||
// We allow some transmutes here
|
||||
self.copy_op_transmute(caller_arg, callee_arg)
|
||||
self.copy_op_transmute(&caller_arg, callee_arg)
|
||||
}
|
||||
|
||||
/// Call this function -- pushing the stack frame and initializing the arguments.
|
||||
@ -314,7 +314,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let caller_args: Cow<'_, [OpTy<'tcx, M::PointerTag>]> =
|
||||
if caller_abi == Abi::RustCall && !args.is_empty() {
|
||||
// Untuple
|
||||
let (&untuple_arg, args) = args.split_last().unwrap();
|
||||
let (untuple_arg, args) = args.split_last().unwrap();
|
||||
trace!("eval_fn_call: Will pass last argument by untupling");
|
||||
Cow::from(
|
||||
args.iter()
|
||||
@ -397,7 +397,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let receiver_place = match args[0].layout.ty.builtin_deref(true) {
|
||||
Some(_) => {
|
||||
// Built-in pointer.
|
||||
self.deref_operand(args[0])?
|
||||
self.deref_operand(&args[0])?
|
||||
}
|
||||
None => {
|
||||
// Unsized self.
|
||||
|
@ -375,7 +375,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
||||
/// Check a reference or `Box`.
|
||||
fn check_safe_pointer(
|
||||
&mut self,
|
||||
value: OpTy<'tcx, M::PointerTag>,
|
||||
value: &OpTy<'tcx, M::PointerTag>,
|
||||
kind: &str,
|
||||
) -> InterpResult<'tcx> {
|
||||
let value = try_validation!(
|
||||
@ -491,7 +491,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
||||
|
||||
fn read_scalar(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
|
||||
Ok(try_validation!(
|
||||
self.ecx.read_scalar(op),
|
||||
@ -504,7 +504,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
||||
/// at that type. Return `true` if the type is indeed primitive.
|
||||
fn try_visit_primitive(
|
||||
&mut self,
|
||||
value: OpTy<'tcx, M::PointerTag>,
|
||||
value: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, bool> {
|
||||
// Go over all the primitive types
|
||||
let ty = value.layout.ty;
|
||||
@ -552,7 +552,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
||||
// actually enforce the strict rules for raw pointers (mostly because
|
||||
// that lets us re-use `ref_to_mplace`).
|
||||
let place = try_validation!(
|
||||
self.ecx.read_immediate(value).and_then(|i| self.ecx.ref_to_mplace(i)),
|
||||
self.ecx.read_immediate(value).and_then(|ref i| self.ecx.ref_to_mplace(i)),
|
||||
self.path,
|
||||
err_ub!(InvalidUninitBytes(None)) => { "uninitialized raw pointer" },
|
||||
err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" },
|
||||
@ -631,7 +631,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
||||
|
||||
fn visit_scalar(
|
||||
&mut self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
scalar_layout: &Scalar,
|
||||
) -> InterpResult<'tcx> {
|
||||
let value = self.read_scalar(op)?;
|
||||
@ -705,7 +705,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
|
||||
fn read_discriminant(
|
||||
&mut self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, VariantIdx> {
|
||||
self.with_elem(PathElem::EnumTag, move |this| {
|
||||
Ok(try_validation!(
|
||||
@ -725,9 +725,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
#[inline]
|
||||
fn visit_field(
|
||||
&mut self,
|
||||
old_op: OpTy<'tcx, M::PointerTag>,
|
||||
old_op: &OpTy<'tcx, M::PointerTag>,
|
||||
field: usize,
|
||||
new_op: OpTy<'tcx, M::PointerTag>,
|
||||
new_op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let elem = self.aggregate_field_path_elem(old_op.layout, field);
|
||||
self.with_elem(elem, move |this| this.visit_value(new_op))
|
||||
@ -736,9 +736,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
#[inline]
|
||||
fn visit_variant(
|
||||
&mut self,
|
||||
old_op: OpTy<'tcx, M::PointerTag>,
|
||||
old_op: &OpTy<'tcx, M::PointerTag>,
|
||||
variant_id: VariantIdx,
|
||||
new_op: OpTy<'tcx, M::PointerTag>,
|
||||
new_op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let name = match old_op.layout.ty.kind() {
|
||||
ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].ident.name),
|
||||
@ -752,14 +752,14 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
#[inline(always)]
|
||||
fn visit_union(
|
||||
&mut self,
|
||||
_op: OpTy<'tcx, M::PointerTag>,
|
||||
_op: &OpTy<'tcx, M::PointerTag>,
|
||||
_fields: NonZeroUsize,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn visit_value(&mut self, op: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
|
||||
fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
|
||||
trace!("visit_value: {:?}, {:?}", *op, op.layout);
|
||||
|
||||
// Check primitive types -- the leafs of our recursive descend.
|
||||
@ -816,7 +816,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
|
||||
fn visit_aggregate(
|
||||
&mut self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
|
||||
) -> InterpResult<'tcx> {
|
||||
match op.layout.ty.kind() {
|
||||
@ -918,7 +918,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
fn validate_operand_internal(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
path: Vec<PathElem>,
|
||||
ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
|
||||
ctfe_mode: Option<CtfeValidationMode>,
|
||||
@ -929,10 +929,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
|
||||
|
||||
// Try to cast to ptr *once* instead of all the time.
|
||||
let op = self.force_op_ptr(op).unwrap_or(op);
|
||||
let op = self.force_op_ptr(&op).unwrap_or(*op);
|
||||
|
||||
// Run it.
|
||||
match visitor.visit_value(op) {
|
||||
match visitor.visit_value(&op) {
|
||||
Ok(()) => Ok(()),
|
||||
// Pass through validation failures.
|
||||
Err(err) if matches!(err.kind, err_ub!(ValidationFailure { .. })) => Err(err),
|
||||
@ -960,7 +960,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
#[inline(always)]
|
||||
pub fn const_validate_operand(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
path: Vec<PathElem>,
|
||||
ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>,
|
||||
ctfe_mode: CtfeValidationMode,
|
||||
@ -972,7 +972,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// `op` is assumed to cover valid memory if it is an indirect operand.
|
||||
/// It will error if the bits at the destination do not match the ones described by the layout.
|
||||
#[inline(always)]
|
||||
pub fn validate_operand(&self, op: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
|
||||
pub fn validate_operand(&self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
|
||||
self.validate_operand_internal(op, vec![], None, None)
|
||||
}
|
||||
}
|
||||
|
@ -18,20 +18,20 @@ pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy {
|
||||
fn layout(&self) -> TyAndLayout<'tcx>;
|
||||
|
||||
/// Makes this into an `OpTy`.
|
||||
fn to_op(self, ecx: &InterpCx<'mir, 'tcx, M>) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
|
||||
fn to_op(&self, ecx: &InterpCx<'mir, 'tcx, M>) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
|
||||
|
||||
/// Creates this from an `MPlaceTy`.
|
||||
fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self;
|
||||
|
||||
/// Projects to the given enum variant.
|
||||
fn project_downcast(
|
||||
self,
|
||||
&self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
variant: VariantIdx,
|
||||
) -> InterpResult<'tcx, Self>;
|
||||
|
||||
/// Projects to the n-th field.
|
||||
fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize)
|
||||
fn project_field(&self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize)
|
||||
-> InterpResult<'tcx, Self>;
|
||||
}
|
||||
|
||||
@ -45,10 +45,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tc
|
||||
|
||||
#[inline(always)]
|
||||
fn to_op(
|
||||
self,
|
||||
&self,
|
||||
_ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
Ok(self)
|
||||
Ok(*self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@ -58,7 +58,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tc
|
||||
|
||||
#[inline(always)]
|
||||
fn project_downcast(
|
||||
self,
|
||||
&self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
variant: VariantIdx,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
@ -67,7 +67,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tc
|
||||
|
||||
#[inline(always)]
|
||||
fn project_field(
|
||||
self,
|
||||
&self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
@ -85,10 +85,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
|
||||
|
||||
#[inline(always)]
|
||||
fn to_op(
|
||||
self,
|
||||
&self,
|
||||
_ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
Ok(self.into())
|
||||
Ok((*self).into())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@ -98,20 +98,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
|
||||
|
||||
#[inline(always)]
|
||||
fn project_downcast(
|
||||
self,
|
||||
&self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
variant: VariantIdx,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
ecx.mplace_downcast(self, variant)
|
||||
ecx.mplace_downcast(*self, variant)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn project_field(
|
||||
self,
|
||||
&self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
ecx.mplace_field(self, field)
|
||||
ecx.mplace_field(*self, field)
|
||||
}
|
||||
}
|
||||
|
||||
@ -129,7 +129,7 @@ macro_rules! make_value_visitor {
|
||||
#[inline(always)]
|
||||
fn read_discriminant(
|
||||
&mut self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, VariantIdx> {
|
||||
Ok(self.ecx().read_discriminant(op)?.1)
|
||||
}
|
||||
@ -137,13 +137,13 @@ macro_rules! make_value_visitor {
|
||||
// Recursive actions, ready to be overloaded.
|
||||
/// Visits the given value, dispatching as appropriate to more specialized visitors.
|
||||
#[inline(always)]
|
||||
fn visit_value(&mut self, v: Self::V) -> InterpResult<'tcx>
|
||||
fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
|
||||
{
|
||||
self.walk_value(v)
|
||||
}
|
||||
/// Visits the given value as a union. No automatic recursion can happen here.
|
||||
#[inline(always)]
|
||||
fn visit_union(&mut self, _v: Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
|
||||
fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
|
||||
{
|
||||
Ok(())
|
||||
}
|
||||
@ -153,7 +153,7 @@ macro_rules! make_value_visitor {
|
||||
#[inline(always)]
|
||||
fn visit_aggregate(
|
||||
&mut self,
|
||||
v: Self::V,
|
||||
v: &Self::V,
|
||||
fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.walk_aggregate(v, fields)
|
||||
@ -167,9 +167,9 @@ macro_rules! make_value_visitor {
|
||||
#[inline(always)]
|
||||
fn visit_field(
|
||||
&mut self,
|
||||
_old_val: Self::V,
|
||||
_old_val: &Self::V,
|
||||
_field: usize,
|
||||
new_val: Self::V,
|
||||
new_val: &Self::V,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.visit_value(new_val)
|
||||
}
|
||||
@ -179,9 +179,9 @@ macro_rules! make_value_visitor {
|
||||
#[inline(always)]
|
||||
fn visit_variant(
|
||||
&mut self,
|
||||
_old_val: Self::V,
|
||||
_old_val: &Self::V,
|
||||
_variant: VariantIdx,
|
||||
new_val: Self::V,
|
||||
new_val: &Self::V,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.visit_value(new_val)
|
||||
}
|
||||
@ -189,16 +189,16 @@ macro_rules! make_value_visitor {
|
||||
// Default recursors. Not meant to be overloaded.
|
||||
fn walk_aggregate(
|
||||
&mut self,
|
||||
v: Self::V,
|
||||
v: &Self::V,
|
||||
fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Now iterate over it.
|
||||
for (idx, field_val) in fields.enumerate() {
|
||||
self.visit_field(v, idx, field_val?)?;
|
||||
self.visit_field(v, idx, &field_val?)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn walk_value(&mut self, v: Self::V) -> InterpResult<'tcx>
|
||||
fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
|
||||
{
|
||||
trace!("walk_value: type: {}", v.layout().ty);
|
||||
|
||||
@ -211,7 +211,7 @@ macro_rules! make_value_visitor {
|
||||
let inner = self.ecx().unpack_dyn_trait(dest)?.1;
|
||||
trace!("walk_value: dyn object layout: {:#?}", inner.layout);
|
||||
// recurse with the inner type
|
||||
return self.visit_field(v, 0, Value::from_mem_place(inner));
|
||||
return self.visit_field(&v, 0, &Value::from_mem_place(inner));
|
||||
},
|
||||
// Slices do not need special handling here: they have `Array` field
|
||||
// placement with length 0, so we enter the `Array` case below which
|
||||
@ -254,11 +254,11 @@ macro_rules! make_value_visitor {
|
||||
// with *its* fields.
|
||||
Variants::Multiple { .. } => {
|
||||
let op = v.to_op(self.ecx())?;
|
||||
let idx = self.read_discriminant(op)?;
|
||||
let idx = self.read_discriminant(&op)?;
|
||||
let inner = v.project_downcast(self.ecx(), idx)?;
|
||||
trace!("walk_value: variant layout: {:#?}", inner.layout());
|
||||
// recurse with the inner type
|
||||
self.visit_variant(v, idx, inner)
|
||||
self.visit_variant(v, idx, &inner)
|
||||
}
|
||||
// For single-variant layouts, we already did anything there is to do.
|
||||
Variants::Single { .. } => Ok(())
|
||||
|
@ -228,8 +228,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
|
||||
fn binary_ptr_op(
|
||||
_ecx: &InterpCx<'mir, 'tcx, Self>,
|
||||
_bin_op: BinOp,
|
||||
_left: ImmTy<'tcx>,
|
||||
_right: ImmTy<'tcx>,
|
||||
_left: &ImmTy<'tcx>,
|
||||
_right: &ImmTy<'tcx>,
|
||||
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
|
||||
// We can't do this because aliasing of memory can differ between const eval and llvm
|
||||
throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp")
|
||||
@ -426,7 +426,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
|
||||
// Try to read the local as an immediate so that if it is representable as a scalar, we can
|
||||
// handle it as such, but otherwise, just return the value as is.
|
||||
Some(match self.ecx.try_read_immediate(op) {
|
||||
Some(match self.ecx.try_read_immediate(&op) {
|
||||
Ok(Ok(imm)) => imm.into(),
|
||||
_ => op,
|
||||
})
|
||||
@ -548,8 +548,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
source_info: SourceInfo,
|
||||
) -> Option<()> {
|
||||
if let (val, true) = self.use_ecx(|this| {
|
||||
let val = this.ecx.read_immediate(this.ecx.eval_operand(arg, None)?)?;
|
||||
let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, val)?;
|
||||
let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?;
|
||||
let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, &val)?;
|
||||
Ok((val, overflow))
|
||||
})? {
|
||||
// `AssertKind` only has an `OverflowNeg` variant, so make sure that is
|
||||
@ -573,8 +573,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
right: &Operand<'tcx>,
|
||||
source_info: SourceInfo,
|
||||
) -> Option<()> {
|
||||
let r = self.use_ecx(|this| this.ecx.read_immediate(this.ecx.eval_operand(right, None)?));
|
||||
let l = self.use_ecx(|this| this.ecx.read_immediate(this.ecx.eval_operand(left, None)?));
|
||||
let r = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(right, None)?));
|
||||
let l = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(left, None)?));
|
||||
// Check for exceeding shifts *even if* we cannot evaluate the LHS.
|
||||
if op == BinOp::Shr || op == BinOp::Shl {
|
||||
let r = r?;
|
||||
@ -609,7 +609,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
if let (Some(l), Some(r)) = (l, r) {
|
||||
if let (Some(l), Some(r)) = (&l, &r) {
|
||||
// The remaining operators are handled through `overflowing_binary_op`.
|
||||
if self.use_ecx(|this| {
|
||||
let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, l, r)?;
|
||||
@ -630,7 +630,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
match *operand {
|
||||
Operand::Copy(l) | Operand::Move(l) => {
|
||||
if let Some(value) = self.get_const(l) {
|
||||
if self.should_const_prop(value) {
|
||||
if self.should_const_prop(&value) {
|
||||
// FIXME(felix91gr): this code only handles `Scalar` cases.
|
||||
// For now, we're not handling `ScalarPair` cases because
|
||||
// doing so here would require a lot of code duplication.
|
||||
@ -745,7 +745,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
let r = this.ecx.eval_operand(right, None);
|
||||
|
||||
let const_arg = match (l, r) {
|
||||
(Ok(x), Err(_)) | (Err(_), Ok(x)) => this.ecx.read_immediate(x)?,
|
||||
(Ok(ref x), Err(_)) | (Err(_), Ok(ref x)) => this.ecx.read_immediate(x)?,
|
||||
(Err(e), Err(_)) => return Err(e),
|
||||
(Ok(_), Ok(_)) => {
|
||||
this.ecx.eval_rvalue_into_place(rvalue, place)?;
|
||||
@ -809,7 +809,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
fn replace_with_const(
|
||||
&mut self,
|
||||
rval: &mut Rvalue<'tcx>,
|
||||
value: OpTy<'tcx>,
|
||||
value: &OpTy<'tcx>,
|
||||
source_info: SourceInfo,
|
||||
) {
|
||||
if let Rvalue::Use(Operand::Constant(c)) = rval {
|
||||
@ -902,7 +902,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
}
|
||||
|
||||
/// Returns `true` if and only if this `op` should be const-propagated into.
|
||||
fn should_const_prop(&mut self, op: OpTy<'tcx>) -> bool {
|
||||
fn should_const_prop(&mut self, op: &OpTy<'tcx>) -> bool {
|
||||
let mir_opt_level = self.tcx.sess.opts.debugging_opts.mir_opt_level;
|
||||
|
||||
if mir_opt_level == 0 {
|
||||
@ -913,7 +913,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
return false;
|
||||
}
|
||||
|
||||
match *op {
|
||||
match **op {
|
||||
interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => {
|
||||
s.is_bits()
|
||||
}
|
||||
@ -1094,7 +1094,7 @@ impl<'mir, 'tcx> MutVisitor<'tcx> for ConstPropagator<'mir, 'tcx> {
|
||||
// This will return None if the above `const_prop` invocation only "wrote" a
|
||||
// type whose creation requires no write. E.g. a generator whose initial state
|
||||
// consists solely of uninitialized memory (so it doesn't capture any locals).
|
||||
if let Some(value) = self.get_const(place) {
|
||||
if let Some(ref value) = self.get_const(place) {
|
||||
if self.should_const_prop(value) {
|
||||
trace!("replacing {:?} with {:?}", rval, value);
|
||||
self.replace_with_const(rval, value, source_info);
|
||||
@ -1177,10 +1177,10 @@ impl<'mir, 'tcx> MutVisitor<'tcx> for ConstPropagator<'mir, 'tcx> {
|
||||
self.super_terminator(terminator, location);
|
||||
match &mut terminator.kind {
|
||||
TerminatorKind::Assert { expected, ref msg, ref mut cond, .. } => {
|
||||
if let Some(value) = self.eval_operand(&cond, source_info) {
|
||||
if let Some(ref value) = self.eval_operand(&cond, source_info) {
|
||||
trace!("assertion on {:?} should be {:?}", value, expected);
|
||||
let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected));
|
||||
let value_const = self.ecx.read_scalar(value).unwrap();
|
||||
let value_const = self.ecx.read_scalar(&value).unwrap();
|
||||
if expected != value_const {
|
||||
enum DbgVal<T> {
|
||||
Val(T),
|
||||
@ -1199,7 +1199,7 @@ impl<'mir, 'tcx> MutVisitor<'tcx> for ConstPropagator<'mir, 'tcx> {
|
||||
// triggered the assert on the value of the rhs.
|
||||
match self.eval_operand(op, source_info) {
|
||||
Some(op) => {
|
||||
DbgVal::Val(self.ecx.read_immediate(op).unwrap().to_const_int())
|
||||
DbgVal::Val(self.ecx.read_immediate(&op).unwrap().to_const_int())
|
||||
}
|
||||
None => DbgVal::Underscore,
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user