panic when an interpreter error gets unintentionally discarded

This commit is contained in:
Ralf Jung 2024-09-26 16:48:32 +02:00
parent e9df22f51d
commit 4b8a5bd511
12 changed files with 262 additions and 147 deletions

View File

@ -13,9 +13,9 @@ use rustc_target::abi::{Abi, Align, HasDataLayout, Size};
use tracing::{instrument, trace};
use super::{
AllocRef, AllocRefMut, CheckAlignMsg, CtfeProvenance, ImmTy, Immediate, InterpCx, InterpResult,
Machine, MemoryKind, Misalignment, OffsetMode, OpTy, Operand, Pointer, Projectable, Provenance,
Scalar, alloc_range, mir_assign_valid_types,
AllocRef, AllocRefMut, CheckAlignMsg, CtfeProvenance, DiscardInterpError, ImmTy, Immediate,
InterpCx, InterpResult, Machine, MemoryKind, Misalignment, OffsetMode, OpTy, Operand, Pointer,
Projectable, Provenance, Scalar, alloc_range, mir_assign_valid_types,
};
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@ -490,10 +490,17 @@ where
// If an access is both OOB and misaligned, we want to see the bounds error.
// However we have to call `check_misalign` first to make the borrow checker happy.
let misalign_err = self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn);
let a = self.get_ptr_alloc_mut(mplace.ptr(), size)?;
match self.get_ptr_alloc_mut(mplace.ptr(), size) {
Ok(a) => {
misalign_err?;
Ok(a)
}
Err(e) => {
misalign_err.discard_interp_err();
Err(e)
}
}
}
/// Turn a local in the current frame into a place.
pub fn local_to_place(

View File

@ -17,8 +17,8 @@ use rustc_hir as hir;
use rustc_middle::bug;
use rustc_middle::mir::interpret::ValidationErrorKind::{self, *};
use rustc_middle::mir::interpret::{
ExpectedKind, InterpError, InvalidMetaKind, Misalignment, PointerKind, Provenance,
UnsupportedOpInfo, ValidationErrorInfo, alloc_range,
ExpectedKind, InterpError, InterpErrorInfo, InvalidMetaKind, Misalignment, PointerKind,
Provenance, UnsupportedOpInfo, ValidationErrorInfo, alloc_range,
};
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Ty};
@ -95,16 +95,19 @@ macro_rules! try_validation {
Ok(x) => x,
// We catch the error and turn it into a validation failure. We are okay with
// allocation here as this can only slow down builds that fail anyway.
Err(e) => match e.kind() {
Err(e) => {
let (kind, backtrace) = e.into_parts();
match kind {
$(
$($p)|+ =>
$($p)|+ => {
throw_validation_failure!(
$where,
$kind
)
}
),+,
#[allow(unreachable_patterns)]
_ => Err::<!, _>(e)?,
_ => Err::<!, _>(InterpErrorInfo::from_parts(kind, backtrace))?,
}
}
}
}};
@ -510,7 +513,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
Ub(DanglingIntPointer { addr: i, .. }) => DanglingPtrNoProvenance {
ptr_kind,
// FIXME this says "null pointer" when null but we need translate
pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(*i))
pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(i))
},
Ub(PointerOutOfBounds { .. }) => DanglingPtrOutOfBounds {
ptr_kind
@ -1231,7 +1234,8 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
Err(err) => {
// For some errors we might be able to provide extra information.
// (This custom logic does not fit the `try_validation!` macro.)
match err.kind() {
let (kind, backtrace) = err.into_parts();
match kind {
Ub(InvalidUninitBytes(Some((_alloc_id, access)))) | Unsup(ReadPointerAsInt(Some((_alloc_id, access)))) => {
// Some byte was uninitialized, determine which
// element that byte belongs to so we can
@ -1242,7 +1246,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
.unwrap();
self.path.push(PathElem::ArrayElem(i));
if matches!(err.kind(), Ub(InvalidUninitBytes(_))) {
if matches!(kind, Ub(InvalidUninitBytes(_))) {
throw_validation_failure!(self.path, Uninit { expected })
} else {
throw_validation_failure!(self.path, PointerAsInt { expected })
@ -1250,7 +1254,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
}
// Propagate upwards (that will also check for unexpected errors).
_ => return Err(err),
_ => return Err(InterpErrorInfo::from_parts(kind, backtrace)),
}
}
}
@ -1282,7 +1286,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
// It's not great to catch errors here, since we can't give a very good path,
// but it's better than ICEing.
Ub(InvalidVTableTrait { vtable_dyn_type, expected_dyn_type }) => {
InvalidMetaWrongTrait { vtable_dyn_type, expected_dyn_type: *expected_dyn_type }
InvalidMetaWrongTrait { vtable_dyn_type, expected_dyn_type }
},
);
}

View File

@ -1,4 +1,5 @@
use rustc_middle::bug;
use rustc_middle::mir::interpret::DiscardInterpError;
use rustc_middle::ty::layout::{
HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement,
};
@ -75,7 +76,8 @@ fn check_validity_requirement_strict<'tcx>(
/*recursive*/ false,
/*reset_provenance_and_padding*/ false,
)
.is_ok())
.discard_interp_err()
.is_some())
}
/// Implements the 'lax' (default) version of the [`check_validity_requirement`] checks; see that

View File

@ -1,7 +1,7 @@
use std::any::Any;
use std::backtrace::Backtrace;
use std::borrow::Cow;
use std::fmt;
use std::{fmt, mem};
use either::Either;
use rustc_ast_ir::Mutability;
@ -104,13 +104,57 @@ rustc_data_structures::static_assert_size!(InterpErrorInfo<'_>, 8);
/// These should always be constructed by calling `.into()` on
/// an `InterpError`. In `rustc_mir::interpret`, we have `throw_err_*`
/// macros for this.
///
/// Interpreter errors must *not* be silently discarded (that will lead to a panic). Instead,
/// explicitly call `discard_interp_err` if this is really the right thing to do. Note that if
/// this happens during const-eval or in Miri, it could lead to a UB error being lost!
#[derive(Debug)]
pub struct InterpErrorInfo<'tcx>(Box<InterpErrorInfoInner<'tcx>>);
/// Calling `.ok()` on an `InterpResult` leads to a panic because of the guard.
/// To still let people opt-in to discarding interpreter errors, we have this extension trait.
pub trait DiscardInterpError {
type Output;
fn discard_interp_err(self) -> Option<Self::Output>;
}
impl<'tcx, T> DiscardInterpError for InterpResult<'tcx, T> {
type Output = T;
fn discard_interp_err(self) -> Option<Self::Output> {
match self {
Ok(v) => Some(v),
Err(e) => {
// Disarm the guard.
mem::forget(e.0.guard);
None
}
}
}
}
#[derive(Debug)]
struct Guard;
impl Drop for Guard {
fn drop(&mut self) {
// We silence the guard if we are already panicking, to avoid double-panics.
if !std::thread::panicking() {
panic!(
"an interpreter error got improperly discarded; use `discard_interp_err()` instead of `ok()` if this is intentional"
);
}
}
}
#[derive(Debug)]
struct InterpErrorInfoInner<'tcx> {
kind: InterpError<'tcx>,
backtrace: InterpErrorBacktrace,
/// This makes us panic on drop. This is used to catch
/// accidentally discarding an interpreter error.
guard: Guard,
}
#[derive(Debug)]
@ -151,15 +195,25 @@ impl InterpErrorBacktrace {
impl<'tcx> InterpErrorInfo<'tcx> {
pub fn into_parts(self) -> (InterpError<'tcx>, InterpErrorBacktrace) {
let InterpErrorInfo(box InterpErrorInfoInner { kind, backtrace }) = self;
let InterpErrorInfo(box InterpErrorInfoInner { kind, backtrace, guard }) = self;
mem::forget(guard); // The error got explicitly discarded right here.
(kind, backtrace)
}
pub fn into_kind(self) -> InterpError<'tcx> {
let InterpErrorInfo(box InterpErrorInfoInner { kind, .. }) = self;
let InterpErrorInfo(box InterpErrorInfoInner { kind, guard, .. }) = self;
mem::forget(guard); // The error got explicitly discarded right here.
kind
}
pub fn discard_interp_err(self) {
mem::forget(self.0.guard); // The error got explicitly discarded right here.
}
pub fn from_parts(kind: InterpError<'tcx>, backtrace: InterpErrorBacktrace) -> Self {
Self(Box::new(InterpErrorInfoInner { kind, backtrace, guard: Guard }))
}
#[inline]
pub fn kind(&self) -> &InterpError<'tcx> {
&self.0.kind
@ -191,6 +245,7 @@ impl<'tcx> From<InterpError<'tcx>> for InterpErrorInfo<'tcx> {
InterpErrorInfo(Box::new(InterpErrorInfoInner {
kind,
backtrace: InterpErrorBacktrace::new(),
guard: Guard,
}))
}
}

View File

@ -34,12 +34,12 @@ pub use self::allocation::{
InitChunkIter, alloc_range,
};
pub use self::error::{
BadBytesAccess, CheckAlignMsg, CheckInAllocMsg, ErrorHandled, EvalStaticInitializerRawResult,
EvalToAllocationRawResult, EvalToConstValueResult, EvalToValTreeResult, ExpectedKind,
InterpError, InterpErrorInfo, InterpResult, InvalidMetaKind, InvalidProgramInfo,
MachineStopType, Misalignment, PointerKind, ReportedErrorInfo, ResourceExhaustionInfo,
ScalarSizeMismatch, UndefinedBehaviorInfo, UnsupportedOpInfo, ValidationErrorInfo,
ValidationErrorKind,
BadBytesAccess, CheckAlignMsg, CheckInAllocMsg, DiscardInterpError, ErrorHandled,
EvalStaticInitializerRawResult, EvalToAllocationRawResult, EvalToConstValueResult,
EvalToValTreeResult, ExpectedKind, InterpError, InterpErrorInfo, InterpResult, InvalidMetaKind,
InvalidProgramInfo, MachineStopType, Misalignment, PointerKind, ReportedErrorInfo,
ResourceExhaustionInfo, ScalarSizeMismatch, UndefinedBehaviorInfo, UnsupportedOpInfo,
ValidationErrorInfo, ValidationErrorKind,
};
pub use self::pointer::{CtfeProvenance, Pointer, PointerArithmetic, Provenance};
pub use self::value::Scalar;

View File

@ -3,7 +3,9 @@
//! Currently, this pass only propagates scalar values.
use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str};
use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable};
use rustc_const_eval::interpret::{
DiscardInterpError, ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable,
};
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def::DefKind;
use rustc_middle::bug;
@ -364,8 +366,10 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
}
}
Operand::Constant(box constant) => {
if let Ok(constant) =
self.ecx.eval_mir_constant(&constant.const_, constant.span, None)
if let Some(constant) = self
.ecx
.eval_mir_constant(&constant.const_, constant.span, None)
.discard_interp_err()
{
self.assign_constant(state, place, constant, &[]);
}
@ -387,7 +391,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
for &(mut proj_elem) in projection {
if let PlaceElem::Index(index) = proj_elem {
if let FlatSet::Elem(index) = state.get(index.into(), &self.map)
&& let Ok(offset) = index.to_target_usize(&self.tcx)
&& let Some(offset) = index.to_target_usize(&self.tcx).discard_interp_err()
&& let Some(min_length) = offset.checked_add(1)
{
proj_elem = PlaceElem::ConstantIndex { offset, min_length, from_end: false };
@ -395,7 +399,8 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
return;
}
}
operand = if let Ok(operand) = self.ecx.project(&operand, proj_elem) {
operand =
if let Some(operand) = self.ecx.project(&operand, proj_elem).discard_interp_err() {
operand
} else {
return;
@ -406,24 +411,28 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
place,
operand,
&mut |elem, op| match elem {
TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(),
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(),
TrackElem::Field(idx) => {
self.ecx.project_field(op, idx.as_usize()).discard_interp_err()
}
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).discard_interp_err(),
TrackElem::Discriminant => {
let variant = self.ecx.read_discriminant(op).ok()?;
let discr_value =
self.ecx.discriminant_for_variant(op.layout.ty, variant).ok()?;
let variant = self.ecx.read_discriminant(op).discard_interp_err()?;
let discr_value = self
.ecx
.discriminant_for_variant(op.layout.ty, variant)
.discard_interp_err()?;
Some(discr_value.into())
}
TrackElem::DerefLen => {
let op: OpTy<'_> = self.ecx.deref_pointer(op).ok()?.into();
let len_usize = op.len(&self.ecx).ok()?;
let op: OpTy<'_> = self.ecx.deref_pointer(op).discard_interp_err()?.into();
let len_usize = op.len(&self.ecx).discard_interp_err()?;
let layout =
self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).unwrap();
Some(ImmTy::from_uint(len_usize, layout).into())
}
},
&mut |place, op| {
if let Ok(imm) = self.ecx.read_immediate_raw(op)
if let Some(imm) = self.ecx.read_immediate_raw(op).discard_interp_err()
&& let Some(imm) = imm.right()
{
let elem = self.wrap_immediate(*imm);
@ -447,11 +456,11 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
(FlatSet::Bottom, _) | (_, FlatSet::Bottom) => (FlatSet::Bottom, FlatSet::Bottom),
// Both sides are known, do the actual computation.
(FlatSet::Elem(left), FlatSet::Elem(right)) => {
match self.ecx.binary_op(op, &left, &right) {
match self.ecx.binary_op(op, &left, &right).discard_interp_err() {
// Ideally this would return an Immediate, since it's sometimes
// a pair and sometimes not. But as a hack we always return a pair
// and just make the 2nd component `Bottom` when it does not exist.
Ok(val) => {
Some(val) => {
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
let (val, overflow) = val.to_scalar_pair();
(FlatSet::Elem(val), FlatSet::Elem(overflow))
@ -470,7 +479,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
}
let arg_scalar = const_arg.to_scalar();
let Ok(arg_value) = arg_scalar.to_bits(layout.size) else {
let Some(arg_value) = arg_scalar.to_bits(layout.size).discard_interp_err() else {
return (FlatSet::Top, FlatSet::Top);
};
@ -518,8 +527,10 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
return None;
}
let enum_ty_layout = self.tcx.layout_of(self.param_env.and(enum_ty)).ok()?;
let discr_value =
self.ecx.discriminant_for_variant(enum_ty_layout.ty, variant_index).ok()?;
let discr_value = self
.ecx
.discriminant_for_variant(enum_ty_layout.ty, variant_index)
.discard_interp_err()?;
Some(discr_value.to_scalar())
}
@ -573,7 +584,7 @@ impl<'a, 'tcx> Collector<'a, 'tcx> {
map: &Map<'tcx>,
) -> Option<Const<'tcx>> {
let ty = place.ty(self.local_decls, self.patch.tcx).ty;
let layout = ecx.layout_of(ty).ok()?;
let layout = ecx.layout_of(ty).discard_interp_err()?;
if layout.is_zst() {
return Some(Const::zero_sized(ty));
@ -595,7 +606,7 @@ impl<'a, 'tcx> Collector<'a, 'tcx> {
.intern_with_temp_alloc(layout, |ecx, dest| {
try_write_constant(ecx, dest, place, ty, state, map)
})
.ok()?;
.discard_interp_err()?;
return Some(Const::Val(ConstValue::Indirect { alloc_id, offset: Size::ZERO }, ty));
}
@ -830,7 +841,7 @@ impl<'tcx> MutVisitor<'tcx> for Patch<'tcx> {
if let PlaceElem::Index(local) = elem {
let offset = self.before_effect.get(&(location, local.into()))?;
let offset = offset.try_to_scalar()?;
let offset = offset.to_target_usize(&self.tcx).ok()?;
let offset = offset.to_target_usize(&self.tcx).discard_interp_err()?;
let min_length = offset.checked_add(1)?;
Some(PlaceElem::ConstantIndex { offset, min_length, from_end: false })
} else {

View File

@ -87,8 +87,8 @@ use std::borrow::Cow;
use either::Either;
use rustc_const_eval::const_eval::DummyMachine;
use rustc_const_eval::interpret::{
ImmTy, Immediate, InterpCx, MemPlaceMeta, MemoryKind, OpTy, Projectable, Scalar,
intern_const_alloc_for_constprop,
DiscardInterpError, ImmTy, Immediate, InterpCx, MemPlaceMeta, MemoryKind, OpTy, Projectable,
Scalar, intern_const_alloc_for_constprop,
};
use rustc_data_structures::fx::FxIndexSet;
use rustc_data_structures::graph::dominators::Dominators;
@ -393,7 +393,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
Repeat(..) => return None,
Constant { ref value, disambiguator: _ } => {
self.ecx.eval_mir_constant(value, DUMMY_SP, None).ok()?
self.ecx.eval_mir_constant(value, DUMMY_SP, None).discard_interp_err()?
}
Aggregate(kind, variant, ref fields) => {
let fields = fields
@ -414,34 +414,39 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
AggregateTy::RawPtr { output_pointer_ty, .. } => output_pointer_ty,
};
let variant = if ty.is_enum() { Some(variant) } else { None };
let ty = self.ecx.layout_of(ty).ok()?;
let ty = self.ecx.layout_of(ty).discard_interp_err()?;
if ty.is_zst() {
ImmTy::uninit(ty).into()
} else if matches!(kind, AggregateTy::RawPtr { .. }) {
// Pointers don't have fields, so don't `project_field` them.
let data = self.ecx.read_pointer(fields[0]).ok()?;
let data = self.ecx.read_pointer(fields[0]).discard_interp_err()?;
let meta = if fields[1].layout.is_zst() {
MemPlaceMeta::None
} else {
MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).ok()?)
MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).discard_interp_err()?)
};
let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
ImmTy::from_immediate(ptr_imm, ty).into()
} else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
let dest = self.ecx.allocate(ty, MemoryKind::Stack).ok()?;
let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_interp_err()?;
let variant_dest = if let Some(variant) = variant {
self.ecx.project_downcast(&dest, variant).ok()?
self.ecx.project_downcast(&dest, variant).discard_interp_err()?
} else {
dest.clone()
};
for (field_index, op) in fields.into_iter().enumerate() {
let field_dest = self.ecx.project_field(&variant_dest, field_index).ok()?;
self.ecx.copy_op(op, &field_dest).ok()?;
let field_dest = self
.ecx
.project_field(&variant_dest, field_index)
.discard_interp_err()?;
self.ecx.copy_op(op, &field_dest).discard_interp_err()?;
}
self.ecx.write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest).ok()?;
self.ecx
.write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest)
.discard_interp_err()?;
self.ecx
.alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id())
.ok()?;
.discard_interp_err()?;
dest.into()
} else {
return None;
@ -467,7 +472,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
// This should have been replaced by a `ConstantIndex` earlier.
ProjectionElem::Index(_) => return None,
};
self.ecx.project(value, elem).ok()?
self.ecx.project(value, elem).discard_interp_err()?
}
Address { place, kind, provenance: _ } => {
if !place.is_indirect_first_projection() {
@ -475,14 +480,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
}
let local = self.locals[place.local]?;
let pointer = self.evaluated[local].as_ref()?;
let mut mplace = self.ecx.deref_pointer(pointer).ok()?;
let mut mplace = self.ecx.deref_pointer(pointer).discard_interp_err()?;
for proj in place.projection.iter().skip(1) {
// We have no call stack to associate a local with a value, so we cannot
// interpret indexing.
if matches!(proj, ProjectionElem::Index(_)) {
return None;
}
mplace = self.ecx.project(&mplace, proj).ok()?;
mplace = self.ecx.project(&mplace, proj).discard_interp_err()?;
}
let pointer = mplace.to_ref(&self.ecx);
let ty = match kind {
@ -494,26 +499,28 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
),
AddressKind::Address(mutbl) => Ty::new_ptr(self.tcx, mplace.layout.ty, mutbl),
};
let layout = self.ecx.layout_of(ty).ok()?;
let layout = self.ecx.layout_of(ty).discard_interp_err()?;
ImmTy::from_immediate(pointer, layout).into()
}
Discriminant(base) => {
let base = self.evaluated[base].as_ref()?;
let variant = self.ecx.read_discriminant(base).ok()?;
let discr_value =
self.ecx.discriminant_for_variant(base.layout.ty, variant).ok()?;
let variant = self.ecx.read_discriminant(base).discard_interp_err()?;
let discr_value = self
.ecx
.discriminant_for_variant(base.layout.ty, variant)
.discard_interp_err()?;
discr_value.into()
}
Len(slice) => {
let slice = self.evaluated[slice].as_ref()?;
let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
let len = slice.len(&self.ecx).ok()?;
let len = slice.len(&self.ecx).discard_interp_err()?;
let imm = ImmTy::from_uint(len, usize_layout);
imm.into()
}
NullaryOp(null_op, ty) => {
let layout = self.ecx.layout_of(ty).ok()?;
let layout = self.ecx.layout_of(ty).discard_interp_err()?;
if let NullOp::SizeOf | NullOp::AlignOf = null_op
&& layout.is_unsized()
{
@ -535,36 +542,36 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
}
UnaryOp(un_op, operand) => {
let operand = self.evaluated[operand].as_ref()?;
let operand = self.ecx.read_immediate(operand).ok()?;
let val = self.ecx.unary_op(un_op, &operand).ok()?;
let operand = self.ecx.read_immediate(operand).discard_interp_err()?;
let val = self.ecx.unary_op(un_op, &operand).discard_interp_err()?;
val.into()
}
BinaryOp(bin_op, lhs, rhs) => {
let lhs = self.evaluated[lhs].as_ref()?;
let lhs = self.ecx.read_immediate(lhs).ok()?;
let lhs = self.ecx.read_immediate(lhs).discard_interp_err()?;
let rhs = self.evaluated[rhs].as_ref()?;
let rhs = self.ecx.read_immediate(rhs).ok()?;
let val = self.ecx.binary_op(bin_op, &lhs, &rhs).ok()?;
let rhs = self.ecx.read_immediate(rhs).discard_interp_err()?;
let val = self.ecx.binary_op(bin_op, &lhs, &rhs).discard_interp_err()?;
val.into()
}
Cast { kind, value, from: _, to } => match kind {
CastKind::IntToInt | CastKind::IntToFloat => {
let value = self.evaluated[value].as_ref()?;
let value = self.ecx.read_immediate(value).ok()?;
let to = self.ecx.layout_of(to).ok()?;
let res = self.ecx.int_to_int_or_float(&value, to).ok()?;
let value = self.ecx.read_immediate(value).discard_interp_err()?;
let to = self.ecx.layout_of(to).discard_interp_err()?;
let res = self.ecx.int_to_int_or_float(&value, to).discard_interp_err()?;
res.into()
}
CastKind::FloatToFloat | CastKind::FloatToInt => {
let value = self.evaluated[value].as_ref()?;
let value = self.ecx.read_immediate(value).ok()?;
let to = self.ecx.layout_of(to).ok()?;
let res = self.ecx.float_to_float_or_int(&value, to).ok()?;
let value = self.ecx.read_immediate(value).discard_interp_err()?;
let to = self.ecx.layout_of(to).discard_interp_err()?;
let res = self.ecx.float_to_float_or_int(&value, to).discard_interp_err()?;
res.into()
}
CastKind::Transmute => {
let value = self.evaluated[value].as_ref()?;
let to = self.ecx.layout_of(to).ok()?;
let to = self.ecx.layout_of(to).discard_interp_err()?;
// `offset` for immediates only supports scalar/scalar-pair ABIs,
// so bail out if the target is not one.
if value.as_mplace_or_imm().is_right() {
@ -574,29 +581,29 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
_ => return None,
}
}
value.offset(Size::ZERO, to, &self.ecx).ok()?
value.offset(Size::ZERO, to, &self.ecx).discard_interp_err()?
}
CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _) => {
let src = self.evaluated[value].as_ref()?;
let to = self.ecx.layout_of(to).ok()?;
let dest = self.ecx.allocate(to, MemoryKind::Stack).ok()?;
self.ecx.unsize_into(src, to, &dest.clone().into()).ok()?;
let to = self.ecx.layout_of(to).discard_interp_err()?;
let dest = self.ecx.allocate(to, MemoryKind::Stack).discard_interp_err()?;
self.ecx.unsize_into(src, to, &dest.clone().into()).discard_interp_err()?;
self.ecx
.alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id())
.ok()?;
.discard_interp_err()?;
dest.into()
}
CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
let src = self.evaluated[value].as_ref()?;
let src = self.ecx.read_immediate(src).ok()?;
let to = self.ecx.layout_of(to).ok()?;
let ret = self.ecx.ptr_to_ptr(&src, to).ok()?;
let src = self.ecx.read_immediate(src).discard_interp_err()?;
let to = self.ecx.layout_of(to).discard_interp_err()?;
let ret = self.ecx.ptr_to_ptr(&src, to).discard_interp_err()?;
ret.into()
}
CastKind::PointerCoercion(ty::adjustment::PointerCoercion::UnsafeFnPointer, _) => {
let src = self.evaluated[value].as_ref()?;
let src = self.ecx.read_immediate(src).ok()?;
let to = self.ecx.layout_of(to).ok()?;
let src = self.ecx.read_immediate(src).discard_interp_err()?;
let to = self.ecx.layout_of(to).discard_interp_err()?;
ImmTy::from_immediate(*src, to).into()
}
_ => return None,
@ -708,7 +715,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
&& let Some(idx) = self.locals[idx_local]
{
if let Some(offset) = self.evaluated[idx].as_ref()
&& let Ok(offset) = self.ecx.read_target_usize(offset)
&& let Some(offset) = self.ecx.read_target_usize(offset).discard_interp_err()
&& let Some(min_length) = offset.checked_add(1)
{
projection.to_mut()[i] =
@ -868,7 +875,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
&& let DefKind::Enum = self.tcx.def_kind(enum_did)
{
let enum_ty = self.tcx.type_of(enum_did).instantiate(self.tcx, enum_args);
let discr = self.ecx.discriminant_for_variant(enum_ty, variant).ok()?;
let discr = self.ecx.discriminant_for_variant(enum_ty, variant).discard_interp_err()?;
return Some(self.insert_scalar(discr.to_scalar(), discr.layout.ty));
}
@ -1218,13 +1225,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
return None;
}
let layout = self.ecx.layout_of(lhs_ty).ok()?;
let layout = self.ecx.layout_of(lhs_ty).discard_interp_err()?;
let as_bits = |value| {
let constant = self.evaluated[value].as_ref()?;
if layout.abi.is_scalar() {
let scalar = self.ecx.read_scalar(constant).ok()?;
scalar.to_bits(constant.layout.size).ok()
let scalar = self.ecx.read_scalar(constant).discard_interp_err()?;
scalar.to_bits(constant.layout.size).discard_interp_err()
} else {
// `constant` is a wide pointer. Do not evaluate to bits.
None
@ -1484,7 +1491,7 @@ fn op_to_prop_const<'tcx>(
// If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi
&& let Ok(scalar) = ecx.read_scalar(op)
&& let Some(scalar) = ecx.read_scalar(op).discard_interp_err()
{
if !scalar.try_to_scalar_int().is_ok() {
// Check that we do not leak a pointer.
@ -1498,12 +1505,12 @@ fn op_to_prop_const<'tcx>(
// If this constant is already represented as an `Allocation`,
// try putting it into global memory to return it.
if let Either::Left(mplace) = op.as_mplace_or_imm() {
let (size, _align) = ecx.size_and_align_of_mplace(&mplace).ok()??;
let (size, _align) = ecx.size_and_align_of_mplace(&mplace).discard_interp_err()??;
// Do not try interning a value that contains provenance.
// Due to https://github.com/rust-lang/rust/issues/79738, doing so could lead to bugs.
// FIXME: remove this hack once that issue is fixed.
let alloc_ref = ecx.get_ptr_alloc(mplace.ptr(), size).ok()??;
let alloc_ref = ecx.get_ptr_alloc(mplace.ptr(), size).discard_interp_err()??;
if alloc_ref.has_provenance() {
return None;
}
@ -1511,7 +1518,7 @@ fn op_to_prop_const<'tcx>(
let pointer = mplace.ptr().into_pointer_or_addr().ok()?;
let (prov, offset) = pointer.into_parts();
let alloc_id = prov.alloc_id();
intern_const_alloc_for_constprop(ecx, alloc_id).ok()?;
intern_const_alloc_for_constprop(ecx, alloc_id).discard_interp_err()?;
// `alloc_id` may point to a static. Codegen will choke on an `Indirect` with anything
// by `GlobalAlloc::Memory`, so do fall through to copying if needed.
@ -1526,7 +1533,9 @@ fn op_to_prop_const<'tcx>(
}
// Everything failed: create a new allocation to hold the data.
let alloc_id = ecx.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest)).ok()?;
let alloc_id = ecx
.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest))
.discard_interp_err()?;
let value = ConstValue::Indirect { alloc_id, offset: Size::ZERO };
// Check that we do not leak a pointer.

View File

@ -37,7 +37,9 @@
use rustc_arena::DroplessArena;
use rustc_const_eval::const_eval::DummyMachine;
use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, Projectable};
use rustc_const_eval::interpret::{
DiscardInterpError, ImmTy, Immediate, InterpCx, OpTy, Projectable,
};
use rustc_data_structures::fx::FxHashSet;
use rustc_index::IndexVec;
use rustc_index::bit_set::BitSet;
@ -200,7 +202,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
debug!(?discr, ?bb);
let discr_ty = discr.ty(self.body, self.tcx).ty;
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return };
let Some(discr_layout) = self.ecx.layout_of(discr_ty).discard_interp_err() else {
return;
};
let Some(discr) = self.map.find(discr.as_ref()) else { return };
debug!(?discr);
@ -388,24 +392,28 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
lhs,
constant,
&mut |elem, op| match elem {
TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(),
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(),
TrackElem::Field(idx) => {
self.ecx.project_field(op, idx.as_usize()).discard_interp_err()
}
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).discard_interp_err(),
TrackElem::Discriminant => {
let variant = self.ecx.read_discriminant(op).ok()?;
let discr_value =
self.ecx.discriminant_for_variant(op.layout.ty, variant).ok()?;
let variant = self.ecx.read_discriminant(op).discard_interp_err()?;
let discr_value = self
.ecx
.discriminant_for_variant(op.layout.ty, variant)
.discard_interp_err()?;
Some(discr_value.into())
}
TrackElem::DerefLen => {
let op: OpTy<'_> = self.ecx.deref_pointer(op).ok()?.into();
let len_usize = op.len(&self.ecx).ok()?;
let op: OpTy<'_> = self.ecx.deref_pointer(op).discard_interp_err()?.into();
let len_usize = op.len(&self.ecx).discard_interp_err()?;
let layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
Some(ImmTy::from_uint(len_usize, layout).into())
}
},
&mut |place, op| {
if let Some(conditions) = state.try_get_idx(place, &self.map)
&& let Ok(imm) = self.ecx.read_immediate_raw(op)
&& let Some(imm) = self.ecx.read_immediate_raw(op).discard_interp_err()
&& let Some(imm) = imm.right()
&& let Immediate::Scalar(Scalar::Int(int)) = *imm
{
@ -429,8 +437,10 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
match rhs {
// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
Operand::Constant(constant) => {
let Ok(constant) =
self.ecx.eval_mir_constant(&constant.const_, constant.span, None)
let Some(constant) = self
.ecx
.eval_mir_constant(&constant.const_, constant.span, None)
.discard_interp_err()
else {
return;
};
@ -469,8 +479,10 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
AggregateKind::Adt(.., Some(_)) => return,
AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => {
if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant)
&& let Ok(discr_value) =
self.ecx.discriminant_for_variant(agg_ty, *variant_index)
&& let Some(discr_value) = self
.ecx
.discriminant_for_variant(agg_ty, *variant_index)
.discard_interp_err()
{
self.process_immediate(bb, discr_target, discr_value, state);
}
@ -555,7 +567,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
// `SetDiscriminant` may be a no-op if the assigned variant is the untagged variant
// of a niche encoding. If we cannot ensure that we write to the discriminant, do
// nothing.
let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else { return };
let Some(enum_layout) = self.ecx.layout_of(enum_ty).discard_interp_err() else {
return;
};
let writes_discriminant = match enum_layout.variants {
Variants::Single { index } => {
assert_eq!(index, *variant_index);
@ -568,7 +582,10 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
} => *variant_index != untagged_variant,
};
if writes_discriminant {
let Ok(discr) = self.ecx.discriminant_for_variant(enum_ty, *variant_index)
let Some(discr) = self
.ecx
.discriminant_for_variant(enum_ty, *variant_index)
.discard_interp_err()
else {
return;
};
@ -645,7 +662,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
let Some(discr) = discr.place() else { return };
let discr_ty = discr.ty(self.body, self.tcx).ty;
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return };
let Some(discr_layout) = self.ecx.layout_of(discr_ty).discard_interp_err() else {
return;
};
let Some(conditions) = state.try_get(discr.as_ref(), &self.map) else { return };
if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) {

View File

@ -6,7 +6,7 @@ use std::fmt::Debug;
use rustc_const_eval::const_eval::DummyMachine;
use rustc_const_eval::interpret::{
ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error,
DiscardInterpError, ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error,
};
use rustc_data_structures::fx::FxHashSet;
use rustc_hir::HirId;
@ -101,7 +101,8 @@ impl<'tcx> Value<'tcx> {
}
(PlaceElem::Index(idx), Value::Aggregate { fields, .. }) => {
let idx = prop.get_const(idx.into())?.immediate()?;
let idx = prop.ecx.read_target_usize(idx).ok()?.try_into().ok()?;
let idx =
prop.ecx.read_target_usize(idx).discard_interp_err()?.try_into().ok()?;
if idx <= FieldIdx::MAX_AS_U32 {
fields.get(FieldIdx::from_u32(idx)).unwrap_or(&Value::Uninit)
} else {
@ -243,6 +244,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
"known panics lint encountered formatting error: {}",
format_interp_error(self.ecx.tcx.dcx(), error),
);
error.discard_interp_err();
None
}
}
@ -347,9 +349,9 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// We need the type of the LHS. We cannot use `place_layout` as that is the type
// of the result, which for checked binops is not the same!
let left_ty = left.ty(self.local_decls(), self.tcx);
let left_size = self.ecx.layout_of(left_ty).ok()?.size;
let left_size = self.ecx.layout_of(left_ty).discard_interp_err()?.size;
let right_size = r.layout.size;
let r_bits = r.to_scalar().to_bits(right_size).ok();
let r_bits = r.to_scalar().to_bits(right_size).discard_interp_err();
if r_bits.is_some_and(|b| b >= left_size.bits() as u128) {
debug!("check_binary_op: reporting assert for {:?}", location);
let panic = AssertKind::Overflow(
@ -496,7 +498,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// This can be `None` if the lhs wasn't const propagated and we just
// triggered the assert on the value of the rhs.
self.eval_operand(op)
.and_then(|op| self.ecx.read_immediate(&op).ok())
.and_then(|op| self.ecx.read_immediate(&op).discard_interp_err())
.map_or(DbgVal::Underscore, |op| DbgVal::Val(op.to_const_int()))
};
let msg = match msg {
@ -540,7 +542,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
return None;
}
use rustc_middle::mir::Rvalue::*;
let layout = self.ecx.layout_of(dest.ty(self.body, self.tcx).ty).ok()?;
let layout = self.ecx.layout_of(dest.ty(self.body, self.tcx).ty).discard_interp_err()?;
trace!(?layout);
let val: Value<'_> = match *rvalue {
@ -602,7 +604,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
Len(place) => {
let len = match self.get_const(place)? {
Value::Immediate(src) => src.len(&self.ecx).ok()?,
Value::Immediate(src) => src.len(&self.ecx).discard_interp_err()?,
Value::Aggregate { fields, .. } => fields.len() as u64,
Value::Uninit => match place.ty(self.local_decls(), self.tcx).ty.kind() {
ty::Array(_, n) => n.try_eval_target_usize(self.tcx, self.param_env)?,
@ -633,21 +635,21 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
Cast(ref kind, ref value, to) => match kind {
CastKind::IntToInt | CastKind::IntToFloat => {
let value = self.eval_operand(value)?;
let value = self.ecx.read_immediate(&value).ok()?;
let to = self.ecx.layout_of(to).ok()?;
let res = self.ecx.int_to_int_or_float(&value, to).ok()?;
let value = self.ecx.read_immediate(&value).discard_interp_err()?;
let to = self.ecx.layout_of(to).discard_interp_err()?;
let res = self.ecx.int_to_int_or_float(&value, to).discard_interp_err()?;
res.into()
}
CastKind::FloatToFloat | CastKind::FloatToInt => {
let value = self.eval_operand(value)?;
let value = self.ecx.read_immediate(&value).ok()?;
let to = self.ecx.layout_of(to).ok()?;
let res = self.ecx.float_to_float_or_int(&value, to).ok()?;
let value = self.ecx.read_immediate(&value).discard_interp_err()?;
let to = self.ecx.layout_of(to).discard_interp_err()?;
let res = self.ecx.float_to_float_or_int(&value, to).discard_interp_err()?;
res.into()
}
CastKind::Transmute => {
let value = self.eval_operand(value)?;
let to = self.ecx.layout_of(to).ok()?;
let to = self.ecx.layout_of(to).discard_interp_err()?;
// `offset` for immediates only supports scalar/scalar-pair ABIs,
// so bail out if the target is not one.
match (value.layout.abi, to.abi) {
@ -656,7 +658,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
_ => return None,
}
value.offset(Size::ZERO, to, &self.ecx).ok()?.into()
value.offset(Size::ZERO, to, &self.ecx).discard_interp_err()?.into()
}
_ => return None,
},
@ -781,7 +783,8 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
TerminatorKind::SwitchInt { ref discr, ref targets } => {
if let Some(ref value) = self.eval_operand(discr)
&& let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value))
&& let Ok(constant) = value_const.to_bits(value_const.size())
&& let Some(constant) =
value_const.to_bits(value_const.size()).discard_interp_err()
{
// We managed to evaluate the discriminant, so we know we only need to visit
// one target.

View File

@ -641,7 +641,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
// The program didn't actually do a read, so suppress the memory access hooks.
// This is also a very special exception where we just ignore an error -- if this read
// was UB e.g. because the memory is uninitialized, we don't want to know!
let old_val = this.run_for_validation(|this| this.read_scalar(dest)).ok();
let old_val = this.run_for_validation(|this| this.read_scalar(dest)).discard_interp_err();
this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
this.validate_atomic_store(dest, atomic)?;
this.buffered_atomic_write(val, dest, atomic, old_val)

View File

@ -223,7 +223,10 @@ pub fn report_error<'tcx>(
let info = info.downcast_ref::<TerminationInfo>().expect("invalid MachineStop payload");
use TerminationInfo::*;
let title = match info {
Exit { code, leak_check } => return Some((*code, *leak_check)),
&Exit { code, leak_check } => {
e.discard_interp_err();
return Some((code, leak_check));
}
Abort(_) => Some("abnormal termination"),
UnsupportedInIsolation(_) | Int2PtrWithStrictProvenance | UnsupportedForeignItem(_) =>
Some("unsupported operation"),
@ -375,6 +378,7 @@ pub fn report_error<'tcx>(
InvalidProgramInfo::AlreadyReported(_)
) => {
// This got already reported. No point in reporting it again.
e.discard_interp_err();
return None;
}
_ =>

View File

@ -247,7 +247,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// This does NaN adjustments.
let val = this.binary_op(mir_op, &left, &right).map_err(|err| {
match err.kind() {
InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ShiftOverflow { shift_amount, .. }) => {
&InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ShiftOverflow { shift_amount, .. }) => {
err.discard_interp_err();
// This resets the interpreter backtrace, but it's not worth avoiding that.
let shift_amount = match shift_amount {
Either::Left(v) => v.to_string(),