Rollup merge of #130885 - RalfJung:interp-error-discard, r=oli-obk

panic when an interpreter error gets unintentionally discarded

One important invariant of Miri is that when an interpreter error is raised (*in particular* a UB error), those must not be discarded: it's not okay to just check `foo().is_err()` and then continue executing.

This seems to catch new contributors by surprise fairly regularly, so this PR tries to make it so that *if* this ever happens, we get a panic rather than a silent missed UB bug. The interpreter error type now contains a "guard" that panics on drop, and that is explicitly passed to `mem::forget` when an error is deliberately discarded.

Fixes https://github.com/rust-lang/miri/issues/3855
This commit is contained in:
Jubilee 2024-10-01 23:15:59 -07:00 committed by GitHub
commit ea453bb10b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
102 changed files with 1574 additions and 1337 deletions

View File

@ -6,7 +6,8 @@ use rustc_middle::{bug, span_bug, ty};
use rustc_span::def_id::DefId;
use crate::interpret::{
self, HasStaticRootDefId, ImmTy, Immediate, InterpCx, PointerArithmetic, throw_machine_stop,
self, HasStaticRootDefId, ImmTy, Immediate, InterpCx, PointerArithmetic, interp_ok,
throw_machine_stop,
};
/// Macro for machine-specific `InterpError` without allocation.
@ -79,7 +80,7 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
throw_machine_stop_str!("can't access mutable globals in ConstProp");
}
Ok(())
interp_ok(())
}
fn find_mir_or_eval_fn(
@ -127,7 +128,7 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
right: &interpret::ImmTy<'tcx, Self::Provenance>,
) -> interpret::InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> {
use rustc_middle::mir::BinOp::*;
Ok(match bin_op {
interp_ok(match bin_op {
Eq | Ne | Lt | Le | Gt | Ge => {
// Types can differ, e.g. fn ptrs with different `for`.
assert_eq!(left.layout.abi, right.layout.abi);

View File

@ -20,7 +20,7 @@ use crate::const_eval::CheckAlignment;
use crate::interpret::{
CtfeValidationMode, GlobalId, Immediate, InternKind, InternResult, InterpCx, InterpError,
InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, StackPopCleanup, create_static_alloc,
eval_nullary_intrinsic, intern_const_alloc_recursive, throw_exhaust,
eval_nullary_intrinsic, intern_const_alloc_recursive, interp_ok, throw_exhaust,
};
use crate::{CTRL_C_RECEIVED, errors};
@ -98,19 +98,19 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>(
return Err(ecx
.tcx
.dcx()
.emit_err(errors::DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind })
.into());
.emit_err(errors::DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind }))
.into();
}
Err(InternResult::FoundBadMutablePointer) => {
return Err(ecx
.tcx
.dcx()
.emit_err(errors::MutablePtrInFinal { span: ecx.tcx.span, kind: intern_kind })
.into());
.emit_err(errors::MutablePtrInFinal { span: ecx.tcx.span, kind: intern_kind }))
.into();
}
}
Ok(R::make_result(ret, ecx))
interp_ok(R::make_result(ret, ecx))
}
/// The `InterpCx` is only meant to be used to do field and index projections into constants for
@ -147,7 +147,8 @@ pub fn mk_eval_cx_for_const_val<'tcx>(
ty: Ty<'tcx>,
) -> Option<(CompileTimeInterpCx<'tcx>, OpTy<'tcx>)> {
let ecx = mk_eval_cx_to_read_const_val(tcx.tcx, tcx.span, param_env, CanAccessMutGlobal::No);
let op = ecx.const_val_to_op(val, ty, None).ok()?;
// FIXME: is it a problem to discard the error here?
let op = ecx.const_val_to_op(val, ty, None).discard_err()?;
Some((ecx, op))
}
@ -185,12 +186,16 @@ pub(super) fn op_to_const<'tcx>(
_ => false,
};
let immediate = if force_as_immediate {
match ecx.read_immediate(op) {
match ecx.read_immediate(op).report_err() {
Ok(imm) => Right(imm),
Err(err) if !for_diagnostics => {
Err(err) => {
if for_diagnostics {
// This discard the error, but for diagnostics that's okay.
op.as_mplace_or_imm()
} else {
panic!("normalization works on validated constants: {err:?}")
}
_ => op.as_mplace_or_imm(),
}
}
} else {
op.as_mplace_or_imm()
@ -283,7 +288,8 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
let ty::FnDef(_, args) = ty.kind() else {
bug!("intrinsic with type {:?}", ty);
};
return eval_nullary_intrinsic(tcx, key.param_env, def_id, args).map_err(|error| {
return eval_nullary_intrinsic(tcx, key.param_env, def_id, args).report_err().map_err(
|error| {
let span = tcx.def_span(def_id);
super::report(
@ -293,7 +299,8 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
|| (span, vec![]),
|span, _| errors::NullaryIntrinsicError { span },
)
});
},
);
}
tcx.eval_to_allocation_raw(key).map(|val| turn_into_const_value(tcx, val, key))
@ -376,6 +383,7 @@ fn eval_in_interpreter<'tcx, R: InterpretationResult<'tcx>>(
);
let res = ecx.load_mir(cid.instance.def, cid.promoted);
res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, body))
.report_err()
.map_err(|error| report_eval_error(&ecx, cid, error))
}
@ -400,6 +408,7 @@ fn const_validate_mplace<'tcx>(
}
};
ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)
.report_err()
// Instead of just reporting the `InterpError` via the usual machinery, we give a more targeted
// error about the validation failure.
.map_err(|error| report_validation_error(&ecx, cid, error, alloc_id))?;

View File

@ -24,8 +24,8 @@ use crate::fluent_generated as fluent;
use crate::interpret::{
self, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame, GlobalAlloc, ImmTy,
InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, RangeSet, Scalar,
StackPopCleanup, compile_time_machine, err_ub, throw_exhaust, throw_inval, throw_ub_custom,
throw_unsup, throw_unsup_format,
StackPopCleanup, compile_time_machine, interp_ok, throw_exhaust, throw_inval, throw_ub,
throw_ub_custom, throw_unsup, throw_unsup_format,
};
/// When hitting this many interpreted terminators we emit a deny by default lint
@ -247,7 +247,7 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
let msg = Symbol::intern(self.read_str(&msg_place)?);
let span = self.find_closest_untracked_caller_location();
let (file, line, col) = self.location_triple_for_span(span);
return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into());
return Err(ConstEvalErrKind::Panic { msg, file, line, col }).into();
} else if self.tcx.is_lang_item(def_id, LangItem::PanicFmt) {
// For panic_fmt, call const_panic_fmt instead.
let const_def_id = self.tcx.require_lang_item(LangItem::ConstPanicFmt, None);
@ -259,16 +259,16 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
self.cur_span(),
);
return Ok(Some(new_instance));
return interp_ok(Some(new_instance));
} else if self.tcx.is_lang_item(def_id, LangItem::AlignOffset) {
let args = self.copy_fn_args(args);
// For align_offset, we replace the function call if the pointer has no address.
match self.align_offset(instance, &args, dest, ret)? {
ControlFlow::Continue(()) => return Ok(Some(instance)),
ControlFlow::Break(()) => return Ok(None),
ControlFlow::Continue(()) => return interp_ok(Some(instance)),
ControlFlow::Break(()) => return interp_ok(None),
}
}
Ok(Some(instance))
interp_ok(Some(instance))
}
/// `align_offset(ptr, target_align)` needs special handling in const eval, because the pointer
@ -323,25 +323,25 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
dest,
StackPopCleanup::Goto { ret, unwind: mir::UnwindAction::Unreachable },
)?;
Ok(ControlFlow::Break(()))
interp_ok(ControlFlow::Break(()))
} else {
// Not alignable in const, return `usize::MAX`.
let usize_max = Scalar::from_target_usize(self.target_usize_max(), self);
self.write_scalar(usize_max, dest)?;
self.return_to_block(ret)?;
Ok(ControlFlow::Break(()))
interp_ok(ControlFlow::Break(()))
}
}
Err(_addr) => {
// The pointer has an address, continue with function call.
Ok(ControlFlow::Continue(()))
interp_ok(ControlFlow::Continue(()))
}
}
}
/// See documentation on the `ptr_guaranteed_cmp` intrinsic.
fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
Ok(match (a, b) {
interp_ok(match (a, b) {
// Comparisons between integers are always known.
(Scalar::Int { .. }, Scalar::Int { .. }) => {
if a == b {
@ -403,8 +403,8 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
instance: ty::InstanceKind<'tcx>,
) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
match instance {
ty::InstanceKind::Item(def) => Ok(ecx.tcx.mir_for_ctfe(def)),
_ => Ok(ecx.tcx.instance_mir(instance)),
ty::InstanceKind::Item(def) => interp_ok(ecx.tcx.mir_for_ctfe(def)),
_ => interp_ok(ecx.tcx.instance_mir(instance)),
}
}
@ -422,7 +422,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
// Replace some functions.
let Some(instance) = ecx.hook_special_const_fn(orig_instance, args, dest, ret)? else {
// Call has already been handled.
return Ok(None);
return interp_ok(None);
};
// Only check non-glue functions
@ -444,14 +444,14 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
// This is a const fn. Call it.
// In case of replacement, we return the *original* instance to make backtraces work out
// (and we hope this does not confuse the FnAbi checks too much).
Ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
interp_ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
}
fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
let msg = Symbol::intern(msg);
let span = ecx.find_closest_untracked_caller_location();
let (file, line, col) = ecx.location_triple_for_span(span);
Err(ConstEvalErrKind::Panic { msg, file, line, col }.into())
Err(ConstEvalErrKind::Panic { msg, file, line, col }).into()
}
fn call_intrinsic(
@ -464,7 +464,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
// Shared intrinsics.
if ecx.eval_intrinsic(instance, args, dest, target)? {
return Ok(None);
return interp_ok(None);
}
let intrinsic_name = ecx.tcx.item_name(instance.def_id());
@ -541,7 +541,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
"intrinsic `{intrinsic_name}` is not supported at compile-time"
);
}
return Ok(Some(ty::Instance {
return interp_ok(Some(ty::Instance {
def: ty::InstanceKind::Item(instance.def_id()),
args: instance.args,
}));
@ -550,7 +550,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
// Intrinsic is done, jump to next block.
ecx.return_to_block(target)?;
Ok(None)
interp_ok(None)
}
fn assert_panic(
@ -581,7 +581,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
}
}
};
Err(ConstEvalErrKind::AssertFailure(err).into())
Err(ConstEvalErrKind::AssertFailure(err)).into()
}
fn binary_ptr_op(
@ -652,7 +652,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
}
}
Ok(())
interp_ok(())
}
#[inline(always)]
@ -670,7 +670,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
throw_exhaust!(StackFrameLimitReached)
} else {
Ok(frame)
interp_ok(frame)
}
}
@ -700,22 +700,22 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
if is_write {
// Write access. These are never allowed, but we give a targeted error message.
match alloc.mutability {
Mutability::Not => Err(err_ub!(WriteToReadOnly(alloc_id)).into()),
Mutability::Mut => Err(ConstEvalErrKind::ModifiedGlobal.into()),
Mutability::Not => throw_ub!(WriteToReadOnly(alloc_id)),
Mutability::Mut => Err(ConstEvalErrKind::ModifiedGlobal).into(),
}
} else {
// Read access. These are usually allowed, with some exceptions.
if machine.can_access_mut_global == CanAccessMutGlobal::Yes {
// Machine configuration allows us read from anything (e.g., `static` initializer).
Ok(())
interp_ok(())
} else if alloc.mutability == Mutability::Mut {
// Machine configuration does not allow us to read statics (e.g., `const`
// initializer).
Err(ConstEvalErrKind::ConstAccessesMutGlobal.into())
Err(ConstEvalErrKind::ConstAccessesMutGlobal).into()
} else {
// Immutable global, this read is fine.
assert_eq!(alloc.mutability, Mutability::Not);
Ok(())
interp_ok(())
}
}
}
@ -748,9 +748,9 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
// even when there is interior mutability.)
place.map_provenance(CtfeProvenance::as_shared_ref)
};
Ok(ImmTy::from_immediate(new_place.to_ref(ecx), val.layout))
interp_ok(ImmTy::from_immediate(new_place.to_ref(ecx), val.layout))
} else {
Ok(val.clone())
interp_ok(val.clone())
}
}
@ -763,20 +763,20 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
) -> InterpResult<'tcx> {
if range.size == Size::ZERO {
// Nothing to check.
return Ok(());
return interp_ok(());
}
// Reject writes through immutable pointers.
if immutable {
return Err(ConstEvalErrKind::WriteThroughImmutablePointer.into());
return Err(ConstEvalErrKind::WriteThroughImmutablePointer).into();
}
// Everything else is fine.
Ok(())
interp_ok(())
}
fn before_alloc_read(ecx: &InterpCx<'tcx, Self>, alloc_id: AllocId) -> InterpResult<'tcx> {
// Check if this is the currently evaluated static.
if Some(alloc_id) == ecx.machine.static_root_ids.map(|(id, _)| id) {
return Err(ConstEvalErrKind::RecursiveStatic.into());
return Err(ConstEvalErrKind::RecursiveStatic).into();
}
// If this is another static, make sure we fire off the query to detect cycles.
// But only do that when checks for static recursion are enabled.
@ -788,7 +788,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
ecx.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
}
}
Ok(())
interp_ok(())
}
fn cached_union_data_range<'e>(

View File

@ -1,13 +1,12 @@
// Not in interpret to make sure we do not use private implementation details
use rustc_middle::mir::interpret::InterpErrorInfo;
use rustc_middle::query::{Key, TyCtxtAt};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::{bug, mir};
use rustc_target::abi::VariantIdx;
use tracing::instrument;
use crate::interpret::{InterpCx, format_interp_error};
use crate::interpret::InterpCx;
mod dummy_machine;
mod error;
@ -33,17 +32,6 @@ pub(crate) enum ValTreeCreationError<'tcx> {
}
pub(crate) type ValTreeCreationResult<'tcx> = Result<ty::ValTree<'tcx>, ValTreeCreationError<'tcx>>;
impl<'tcx> From<InterpErrorInfo<'tcx>> for ValTreeCreationError<'tcx> {
fn from(err: InterpErrorInfo<'tcx>) -> Self {
ty::tls::with(|tcx| {
bug!(
"Unexpected Undefined Behavior error during valtree construction: {}",
format_interp_error(tcx.dcx(), err),
)
})
}
}
#[instrument(skip(tcx), level = "debug")]
pub(crate) fn try_destructure_mir_constant_for_user_output<'tcx>(
tcx: TyCtxtAt<'tcx>,
@ -60,8 +48,8 @@ pub(crate) fn try_destructure_mir_constant_for_user_output<'tcx>(
return None;
}
ty::Adt(def, _) => {
let variant = ecx.read_discriminant(&op).ok()?;
let down = ecx.project_downcast(&op, variant).ok()?;
let variant = ecx.read_discriminant(&op).discard_err()?;
let down = ecx.project_downcast(&op, variant).discard_err()?;
(def.variants()[variant].fields.len(), Some(variant), down)
}
ty::Tuple(args) => (args.len(), None, op),
@ -70,7 +58,7 @@ pub(crate) fn try_destructure_mir_constant_for_user_output<'tcx>(
let fields_iter = (0..field_count)
.map(|i| {
let field_op = ecx.project_field(&down, i).ok()?;
let field_op = ecx.project_field(&down, i).discard_err()?;
let val = op_to_const(&ecx, &field_op, /* for diagnostics */ true);
Some((val, field_op.layout.ty))
})

View File

@ -92,7 +92,7 @@ fn const_to_valtree_inner<'tcx>(
Ok(ty::ValTree::zst())
}
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
let val = ecx.read_immediate(place)?;
let val = ecx.read_immediate(place).unwrap();
let val = val.to_scalar_int().unwrap();
*num_nodes += 1;
@ -114,7 +114,7 @@ fn const_to_valtree_inner<'tcx>(
// equality at compile-time (see `ptr_guaranteed_cmp`).
// However we allow those that are just integers in disguise.
// First, get the pointer. Remember it might be wide!
let val = ecx.read_immediate(place)?;
let val = ecx.read_immediate(place).unwrap();
// We could allow wide raw pointers where both sides are integers in the future,
// but for now we reject them.
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
@ -135,7 +135,7 @@ fn const_to_valtree_inner<'tcx>(
ty::FnPtr(..) => Err(ValTreeCreationError::NonSupportedType(ty)),
ty::Ref(_, _, _) => {
let derefd_place = ecx.deref_pointer(place)?;
let derefd_place = ecx.deref_pointer(place).unwrap();
const_to_valtree_inner(ecx, &derefd_place, num_nodes)
}
@ -159,7 +159,7 @@ fn const_to_valtree_inner<'tcx>(
bug!("uninhabited types should have errored and never gotten converted to valtree")
}
let variant = ecx.read_discriminant(place)?;
let variant = ecx.read_discriminant(place).unwrap();
branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant), num_nodes)
}

View File

@ -15,8 +15,8 @@ use tracing::{info, instrument, trace};
use super::{
CtfeProvenance, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy,
Projectable, Provenance, ReturnAction, Scalar, StackPopCleanup, StackPopInfo, throw_ub,
throw_ub_custom, throw_unsup_format,
Projectable, Provenance, ReturnAction, Scalar, StackPopCleanup, StackPopInfo, interp_ok,
throw_ub, throw_ub_custom, throw_unsup_format,
};
use crate::fluent_generated as fluent;
@ -64,7 +64,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
arg: &FnArg<'tcx, M::Provenance>,
field: usize,
) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
Ok(match arg {
interp_ok(match arg {
FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?),
FnArg::InPlace(mplace) => FnArg::InPlace(self.project_field(mplace, field)?),
})
@ -97,7 +97,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// another type.
let ty::Adt(def, args) = layout.ty.kind() else {
// Not an ADT, so definitely no NPO.
return Ok(layout);
return interp_ok(layout);
};
let inner = if self.tcx.is_diagnostic_item(sym::Option, def.did()) {
// The wrapped type is the only arg.
@ -111,10 +111,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
} else if rhs.is_1zst() {
lhs
} else {
return Ok(layout); // no NPO
return interp_ok(layout); // no NPO
}
} else {
return Ok(layout); // no NPO
return interp_ok(layout); // no NPO
};
// Check if the inner type is one of the NPO-guaranteed ones.
@ -126,7 +126,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Stop at NPO types so that we don't miss that attribute in the check below!
def.is_struct() && !is_npo(def)
});
Ok(match inner.ty.kind() {
interp_ok(match inner.ty.kind() {
ty::Ref(..) | ty::FnPtr(..) => {
// Option<&T> behaves like &T, and same for fn()
inner
@ -153,11 +153,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
) -> InterpResult<'tcx, bool> {
// Fast path: equal types are definitely compatible.
if caller.ty == callee.ty {
return Ok(true);
return interp_ok(true);
}
// 1-ZST are compatible with all 1-ZST (and with nothing else).
if caller.is_1zst() || callee.is_1zst() {
return Ok(caller.is_1zst() && callee.is_1zst());
return interp_ok(caller.is_1zst() && callee.is_1zst());
}
// Unfold newtypes and NPO optimizations.
let unfold = |layout: TyAndLayout<'tcx>| {
@ -180,17 +180,17 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
_ => None,
};
if let (Some(caller), Some(callee)) = (thin_pointer(caller), thin_pointer(callee)) {
return Ok(caller == callee);
return interp_ok(caller == callee);
}
// For wide pointers we have to get the pointee type.
let pointee_ty = |ty: Ty<'tcx>| -> InterpResult<'tcx, Option<Ty<'tcx>>> {
// We cannot use `builtin_deref` here since we need to reject `Box<T, MyAlloc>`.
Ok(Some(match ty.kind() {
interp_ok(Some(match ty.kind() {
ty::Ref(_, ty, _) => *ty,
ty::RawPtr(ty, _) => *ty,
// We only accept `Box` with the default allocator.
_ if ty.is_box_global(*self.tcx) => ty.expect_boxed_ty(),
_ => return Ok(None),
_ => return interp_ok(None),
}))
};
if let (Some(caller), Some(callee)) = (pointee_ty(caller.ty)?, pointee_ty(callee.ty)?) {
@ -202,7 +202,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let normalize = |ty| self.tcx.normalize_erasing_regions(self.param_env, ty);
ty.ptr_metadata_ty(*self.tcx, normalize)
};
return Ok(meta_ty(caller) == meta_ty(callee));
return interp_ok(meta_ty(caller) == meta_ty(callee));
}
// Compatible integer types (in particular, usize vs ptr-sized-u32/u64).
@ -217,11 +217,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
};
if let (Some(caller), Some(callee)) = (int_ty(caller.ty), int_ty(callee.ty)) {
// This is okay if they are the same integer type.
return Ok(caller == callee);
return interp_ok(caller == callee);
}
// Fall back to exact equality.
Ok(caller == callee)
interp_ok(caller == callee)
}
fn check_argument_compat(
@ -235,13 +235,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Ensure that our checks imply actual ABI compatibility for this concrete call.
// (This can fail e.g. if `#[rustc_nonnull_optimization_guaranteed]` is used incorrectly.)
assert!(caller_abi.eq_abi(callee_abi));
Ok(true)
interp_ok(true)
} else {
trace!(
"check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
caller_abi, callee_abi
);
Ok(false)
interp_ok(false)
}
}
@ -266,7 +266,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if !already_live {
self.storage_live(callee_arg.as_local().unwrap())?;
}
return Ok(());
return interp_ok(());
}
// Find next caller arg.
let Some((caller_arg, caller_abi)) = caller_args.next() else {
@ -308,7 +308,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if let FnArg::InPlace(mplace) = caller_arg {
M::protect_in_place_function_argument(self, mplace)?;
}
Ok(())
interp_ok(())
}
/// The main entry point for creating a new stack frame: performs ABI checks and initializes
@ -536,7 +536,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
unwind,
);
} else {
Ok(())
interp_ok(())
}
}
ty::InstanceKind::VTableShim(..)
@ -561,7 +561,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
unwind,
)?
else {
return Ok(());
return interp_ok(());
};
// Special handling for the closure ABI: untuple the last argument.
@ -572,7 +572,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
trace!("init_fn_call: Will pass last argument by untupling");
Cow::from(
args.iter()
.map(|a| Ok(a.clone()))
.map(|a| interp_ok(a.clone()))
.chain(
(0..untuple_arg.layout().fields.count())
.map(|i| self.fn_arg_field(untuple_arg, i)),
@ -886,27 +886,25 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// this transmute.
res
} else {
Ok(())
interp_ok(())
};
// All right, now it is time to actually pop the frame.
let stack_pop_info = self.pop_stack_frame_raw(unwinding)?;
// Report error from return value copy, if any.
copy_ret_result?;
// An error here takes precedence over the copy error.
let (stack_pop_info, ()) = self.pop_stack_frame_raw(unwinding).and(copy_ret_result)?;
match stack_pop_info.return_action {
ReturnAction::Normal => {}
ReturnAction::NoJump => {
// The hook already did everything.
return Ok(());
return interp_ok(());
}
ReturnAction::NoCleanup => {
// If we are not doing cleanup, also skip everything else.
assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
assert!(!unwinding, "tried to skip cleanup during unwinding");
// Skip machine hook.
return Ok(());
return interp_ok(());
}
}
@ -931,7 +929,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.stack().is_empty(),
"only the bottommost frame can have StackPopCleanup::Root"
);
Ok(())
interp_ok(())
}
}
}

View File

@ -14,7 +14,8 @@ use tracing::trace;
use super::util::ensure_monomorphic_enough;
use super::{
FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy, err_inval, throw_ub, throw_ub_custom,
FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy, err_inval, interp_ok, throw_ub,
throw_ub_custom,
};
use crate::fluent_generated as fluent;
@ -157,7 +158,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.copy_op_allow_transmute(src, dest)?;
}
}
Ok(())
interp_ok(())
}
/// Handles 'IntToInt' and 'IntToFloat' casts.
@ -169,7 +170,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool());
assert!(cast_to.ty.is_floating_point() || cast_to.ty.is_integral() || cast_to.ty.is_char());
Ok(ImmTy::from_scalar(
interp_ok(ImmTy::from_scalar(
self.cast_from_int_like(src.to_scalar(), src.layout, cast_to.ty)?,
cast_to,
))
@ -192,7 +193,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
FloatTy::F64 => self.cast_from_float(src.to_scalar().to_f64()?, cast_to.ty),
FloatTy::F128 => self.cast_from_float(src.to_scalar().to_f128()?, cast_to.ty),
};
Ok(ImmTy::from_scalar(val, cast_to))
interp_ok(ImmTy::from_scalar(val, cast_to))
}
/// Handles 'FnPtrToPtr' and 'PtrToPtr' casts.
@ -206,14 +207,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Handle casting any ptr to raw ptr (might be a fat ptr).
if cast_to.size == src.layout.size {
// Thin or fat pointer that just has the ptr kind of target type changed.
return Ok(ImmTy::from_immediate(**src, cast_to));
return interp_ok(ImmTy::from_immediate(**src, cast_to));
} else {
// Casting the metadata away from a fat ptr.
assert_eq!(src.layout.size, 2 * self.pointer_size());
assert_eq!(cast_to.size, self.pointer_size());
assert!(src.layout.ty.is_unsafe_ptr());
return match **src {
Immediate::ScalarPair(data, _) => Ok(ImmTy::from_scalar(data, cast_to)),
Immediate::ScalarPair(data, _) => interp_ok(ImmTy::from_scalar(data, cast_to)),
Immediate::Scalar(..) => span_bug!(
self.cur_span(),
"{:?} input to a fat-to-thin cast ({} -> {})",
@ -240,7 +241,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
Ok(ptr) => M::expose_ptr(self, ptr)?,
Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
};
Ok(ImmTy::from_scalar(self.cast_from_int_like(scalar, src.layout, cast_to.ty)?, cast_to))
interp_ok(ImmTy::from_scalar(
self.cast_from_int_like(scalar, src.layout, cast_to.ty)?,
cast_to,
))
}
pub fn pointer_with_exposed_provenance_cast(
@ -258,7 +262,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Then turn address into pointer.
let ptr = M::ptr_from_addr_cast(self, addr)?;
Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), cast_to))
interp_ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), cast_to))
}
/// Low-level cast helper function. This works directly on scalars and can take 'int-like' input
@ -280,7 +284,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
_ => span_bug!(self.cur_span(), "invalid int-like cast from {}", src_layout.ty),
};
Ok(match *cast_ty.kind() {
interp_ok(match *cast_ty.kind() {
// int -> int
Int(_) | Uint(_) => {
let size = match *cast_ty.kind() {
@ -505,7 +509,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
}
}
Ok(())
interp_ok(())
}
_ => {
// Do not ICE if we are not monomorphic enough.

View File

@ -7,7 +7,8 @@ use rustc_target::abi::{self, TagEncoding, VariantIdx, Variants};
use tracing::{instrument, trace};
use super::{
ImmTy, InterpCx, InterpResult, Machine, Projectable, Scalar, Writeable, err_ub, throw_ub,
ImmTy, InterpCx, InterpResult, Machine, Projectable, Scalar, Writeable, err_ub, interp_ok,
throw_ub,
};
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
@ -48,7 +49,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if actual_variant != variant_index {
throw_ub!(InvalidNichedEnumVariantWritten { enum_ty: dest.layout().ty });
}
Ok(())
interp_ok(())
}
}
}
@ -89,7 +90,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
throw_ub!(UninhabitedEnumVariantRead(index))
}
}
return Ok(index);
return interp_ok(index);
}
Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
(tag, tag_encoding, tag_field)
@ -205,7 +206,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if op.layout().for_variant(self, index).abi.is_uninhabited() {
throw_ub!(UninhabitedEnumVariantRead(index))
}
Ok(index)
interp_ok(index)
}
pub fn discriminant_for_variant(
@ -226,7 +227,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
Scalar::from_uint(variant.as_u32(), discr_layout.size)
}
};
Ok(ImmTy::from_scalar(discr_value, discr_layout))
interp_ok(ImmTy::from_scalar(discr_value, discr_layout))
}
/// Computes how to write the tag of a given variant of enum `ty`:
@ -247,7 +248,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// discriminant is encoded implicitly, so any attempt to write
// the wrong discriminant for a `Single` enum will reliably
// result in UB.
Ok(None)
interp_ok(None)
}
abi::Variants::Multiple {
@ -265,7 +266,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let tag_size = tag_layout.size(self);
let tag_val = tag_size.truncate(discr_val);
let tag = ScalarInt::try_from_uint(tag_val, tag_size).unwrap();
Ok(Some((tag, tag_field)))
interp_ok(Some((tag, tag_field)))
}
abi::Variants::Multiple {
@ -274,7 +275,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
} if untagged_variant == variant_index => {
// The untagged variant is implicitly encoded simply by having a
// value that is outside the niche variants.
Ok(None)
interp_ok(None)
}
abi::Variants::Multiple {
@ -299,7 +300,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let tag = self
.binary_op(mir::BinOp::Add, &variant_index_relative_val, &niche_start_val)?
.to_scalar_int()?;
Ok(Some((tag, tag_field)))
interp_ok(Some((tag, tag_field)))
}
}
}

View File

@ -19,9 +19,9 @@ use rustc_trait_selection::traits::ObligationCtxt;
use tracing::{debug, instrument, trace};
use super::{
Frame, FrameInfo, GlobalId, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlaceMeta,
Memory, OpTy, Place, PlaceTy, PointerArithmetic, Projectable, Provenance, err_inval,
throw_inval, throw_ub, throw_ub_custom,
Frame, FrameInfo, GlobalId, InterpError, InterpErrorInfo, InterpResult, MPlaceTy, Machine,
MemPlaceMeta, Memory, OpTy, Place, PlaceTy, PointerArithmetic, Projectable, Provenance,
err_inval, interp_ok, throw_inval, throw_ub, throw_ub_custom,
};
use crate::{ReportErrorExt, fluent_generated as fluent, util};
@ -73,7 +73,7 @@ where
}
impl<'tcx, M: Machine<'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'tcx, M> {
type LayoutOfResult = InterpResult<'tcx, TyAndLayout<'tcx>>;
type LayoutOfResult = Result<TyAndLayout<'tcx>, InterpError<'tcx>>;
#[inline]
fn layout_tcx_at_span(&self) -> Span {
@ -82,29 +82,24 @@ impl<'tcx, M: Machine<'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'tcx, M> {
}
#[inline]
fn handle_layout_err(
&self,
err: LayoutError<'tcx>,
_: Span,
_: Ty<'tcx>,
) -> InterpErrorInfo<'tcx> {
err_inval!(Layout(err)).into()
fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> InterpError<'tcx> {
err_inval!(Layout(err))
}
}
impl<'tcx, M: Machine<'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'tcx, M> {
type FnAbiOfResult = InterpResult<'tcx, &'tcx FnAbi<'tcx, Ty<'tcx>>>;
type FnAbiOfResult = Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, InterpError<'tcx>>;
fn handle_fn_abi_err(
&self,
err: FnAbiError<'tcx>,
_span: Span,
_fn_abi_request: FnAbiRequest<'tcx>,
) -> InterpErrorInfo<'tcx> {
) -> InterpError<'tcx> {
match err {
FnAbiError::Layout(err) => err_inval!(Layout(err)).into(),
FnAbiError::Layout(err) => err_inval!(Layout(err)),
FnAbiError::AdjustForForeignAbi(err) => {
err_inval!(FnAbiAdjustForForeignAbi(err)).into()
err_inval!(FnAbiAdjustForForeignAbi(err))
}
}
}
@ -160,7 +155,7 @@ pub(super) fn from_known_layout<'tcx>(
);
}
}
Ok(known_layout)
interp_ok(known_layout)
}
}
}
@ -262,7 +257,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if let Some(err) = body.tainted_by_errors {
throw_inval!(AlreadyReported(ReportedErrorInfo::tainted_by_errors(err)));
}
Ok(body)
interp_ok(body)
}
/// Call this on things you got out of the MIR (so it is as generic as the current
@ -305,7 +300,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
trace!("param_env: {:#?}", self.param_env);
trace!("args: {:#?}", args);
match ty::Instance::try_resolve(*self.tcx, self.param_env, def, args) {
Ok(Some(instance)) => Ok(instance),
Ok(Some(instance)) => interp_ok(instance),
Ok(None) => throw_inval!(TooGeneric),
// FIXME(eddyb) this could be a bit more specific than `AlreadyReported`.
@ -401,7 +396,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
layout: &TyAndLayout<'tcx>,
) -> InterpResult<'tcx, Option<(Size, Align)>> {
if layout.is_sized() {
return Ok(Some((layout.size, layout.align.abi)));
return interp_ok(Some((layout.size, layout.align.abi)));
}
match layout.ty.kind() {
ty::Adt(..) | ty::Tuple(..) => {
@ -425,7 +420,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
else {
// A field with an extern type. We don't know the actual dynamic size
// or the alignment.
return Ok(None);
return interp_ok(None);
};
// # First compute the dynamic alignment
@ -456,12 +451,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if full_size > self.max_size_of_val() {
throw_ub!(InvalidMeta(InvalidMetaKind::TooBig));
}
Ok(Some((full_size, full_align)))
interp_ok(Some((full_size, full_align)))
}
ty::Dynamic(expected_trait, _, ty::Dyn) => {
let vtable = metadata.unwrap_meta().to_pointer(self)?;
// Read size and align from vtable (already checks size).
Ok(Some(self.get_vtable_size_and_align(vtable, Some(expected_trait))?))
interp_ok(Some(self.get_vtable_size_and_align(vtable, Some(expected_trait))?))
}
ty::Slice(_) | ty::Str => {
@ -474,10 +469,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if size > self.max_size_of_val() {
throw_ub!(InvalidMeta(InvalidMetaKind::SliceTooBig));
}
Ok(Some((size, elem.align.abi)))
interp_ok(Some((size, elem.align.abi)))
}
ty::Foreign(_) => Ok(None),
ty::Foreign(_) => interp_ok(None),
_ => span_bug!(self.cur_span(), "size_and_align_of::<{}> not supported", layout.ty),
}
@ -503,7 +498,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
if let Some(target) = target {
self.go_to_block(target);
Ok(())
interp_ok(())
} else {
throw_ub!(Unreachable)
}
@ -530,10 +525,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
M::unwind_terminate(self, reason)?;
// This might have pushed a new stack frame, or it terminated execution.
// Either way, `loc` will not be updated.
return Ok(());
return interp_ok(());
}
};
Ok(())
interp_ok(())
}
/// Call a query that can return `ErrorHandled`. Should be used for statics and other globals.

View File

@ -26,7 +26,9 @@ use rustc_span::def_id::LocalDefId;
use rustc_span::sym;
use tracing::{instrument, trace};
use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, err_ub};
use super::{
AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, err_ub, interp_ok,
};
use crate::const_eval;
use crate::errors::NestedStaticInThreadLocal;
@ -307,7 +309,7 @@ pub fn intern_const_alloc_for_constprop<'tcx, T, M: CompileTimeMachine<'tcx, T>>
) -> InterpResult<'tcx, ()> {
if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
// The constant is already in global memory. Do nothing.
return Ok(());
return interp_ok(());
}
// Move allocation to `tcx`.
if let Some(_) =
@ -318,7 +320,7 @@ pub fn intern_const_alloc_for_constprop<'tcx, T, M: CompileTimeMachine<'tcx, T>>
// proper recursive interning loop -- or just call `intern_const_alloc_recursive`.
panic!("`intern_const_alloc_for_constprop` called on allocation with nested provenance")
}
Ok(())
interp_ok(())
}
impl<'tcx, M: super::intern::CompileTimeMachine<'tcx, !>> InterpCx<'tcx, M> {
@ -342,6 +344,6 @@ impl<'tcx, M: super::intern::CompileTimeMachine<'tcx, !>> InterpCx<'tcx, M> {
panic!("`intern_with_temp_alloc` with nested allocations");
}
}
Ok(alloc_id)
interp_ok(alloc_id)
}
}

View File

@ -18,7 +18,7 @@ use super::util::ensure_monomorphic_enough;
use super::{
Allocation, CheckInAllocMsg, ConstAllocation, GlobalId, ImmTy, InterpCx, InterpResult,
MPlaceTy, Machine, OpTy, Pointer, PointerArithmetic, Provenance, Scalar, err_inval,
err_ub_custom, err_unsup_format, throw_inval, throw_ub_custom, throw_ub_format,
err_ub_custom, err_unsup_format, interp_ok, throw_inval, throw_ub_custom, throw_ub_format,
};
use crate::fluent_generated as fluent;
@ -39,7 +39,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
) -> InterpResult<'tcx, ConstValue<'tcx>> {
let tp_ty = args.type_at(0);
let name = tcx.item_name(def_id);
Ok(match name {
interp_ok(match name {
sym::type_name => {
ensure_monomorphic_enough(tcx, tp_ty)?;
let alloc = alloc_type_name(tcx, tp_ty);
@ -329,6 +329,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
fluent::const_eval_offset_from_different_allocations,
name = intrinsic_name,
)
.into()
})?;
// Perform division by size to compute return value.
@ -378,7 +379,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
M::panic_nounwind(self, &msg)?;
// Skip the `return_to_block` at the end (we panicked, we do not return).
return Ok(true);
return interp_ok(true);
}
}
sym::simd_insert => {
@ -438,12 +439,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
// Unsupported intrinsic: skip the return_to_block below.
_ => return Ok(false),
_ => return interp_ok(false),
}
trace!("{:?}", self.dump_place(&dest.clone().into()));
self.return_to_block(ret)?;
Ok(true)
interp_ok(true)
}
pub(super) fn eval_nondiverging_intrinsic(
@ -457,7 +458,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if !cond {
throw_ub_custom!(fluent::const_eval_assume_false);
}
Ok(())
interp_ok(())
}
NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
count,
@ -499,7 +500,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
_ => bug!("not a numeric intrinsic: {}", name),
};
Ok(Scalar::from_uint(bits_out, ret_layout.size))
interp_ok(Scalar::from_uint(bits_out, ret_layout.size))
}
pub fn exact_div(
@ -540,7 +541,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let (val, overflowed) =
self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
Ok(if overflowed.to_bool()? {
interp_ok(if overflowed.to_bool()? {
let size = l.layout.size;
if l.layout.abi.is_signed() {
// For signed ints the saturated value depends on the sign of the first
@ -582,7 +583,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// The offset must be in bounds starting from `ptr`.
self.check_ptr_access_signed(ptr, offset_bytes, CheckInAllocMsg::PointerArithmeticTest)?;
// This also implies that there is no overflow, so we are done.
Ok(ptr.wrapping_signed_offset(offset_bytes, self))
interp_ok(ptr.wrapping_signed_offset(offset_bytes, self))
}
/// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
@ -628,7 +629,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.copy_op(&right, &left)?;
self.copy_op(&temp, &right)?;
self.deallocate_ptr(temp.ptr(), None, kind)?;
Ok(())
interp_ok(())
}
pub fn write_bytes_intrinsic(
@ -669,7 +670,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// `Ordering`'s discriminants are -1/0/+1, so casting does the right thing.
let result = Ord::cmp(left_bytes, right_bytes) as i32;
Ok(Scalar::from_i32(result))
interp_ok(Scalar::from_i32(result))
}
pub(crate) fn raw_eq_intrinsic(
@ -687,13 +688,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
this.check_ptr_align(ptr, layout.align.abi)?;
let Some(alloc_ref) = self.get_ptr_alloc(ptr, layout.size)? else {
// zero-sized access
return Ok(&[]);
return interp_ok(&[]);
};
alloc_ref.get_bytes_strip_provenance()
};
let lhs_bytes = get_bytes(self, lhs)?;
let rhs_bytes = get_bytes(self, rhs)?;
Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
interp_ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
}
}

View File

@ -20,7 +20,8 @@ use rustc_target::spec::abi::Abi as CallAbi;
use super::{
AllocBytes, AllocId, AllocKind, AllocRange, Allocation, CTFE_ALLOC_SALT, ConstAllocation,
CtfeProvenance, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, MemoryKind,
Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, throw_unsup, throw_unsup_format,
Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, interp_ok, throw_unsup,
throw_unsup_format,
};
/// Data returned by [`Machine::after_stack_pop`], and consumed by
@ -185,7 +186,7 @@ pub trait Machine<'tcx>: Sized {
ecx: &InterpCx<'tcx, Self>,
instance: ty::InstanceKind<'tcx>,
) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
Ok(ecx.tcx.instance_mir(instance))
interp_ok(ecx.tcx.instance_mir(instance))
}
/// Entry point to all function calls.
@ -280,7 +281,7 @@ pub trait Machine<'tcx>: Sized {
/// Called before a basic block terminator is executed.
#[inline]
fn before_terminator(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Determines the result of a `NullaryOp::UbChecks` invocation.
@ -290,7 +291,7 @@ pub trait Machine<'tcx>: Sized {
/// You can use this to detect long or endlessly running programs.
#[inline]
fn increment_const_eval_counter(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Called before a global allocation is accessed.
@ -304,7 +305,7 @@ pub trait Machine<'tcx>: Sized {
_static_def_id: Option<DefId>,
_is_write: bool,
) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Return the `AllocId` for the given thread-local static in the current thread.
@ -422,7 +423,7 @@ pub trait Machine<'tcx>: Sized {
_prov: (AllocId, Self::ProvenanceExtra),
_range: AllocRange,
) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Hook for performing extra checks on any memory read access,
@ -433,7 +434,7 @@ pub trait Machine<'tcx>: Sized {
/// Used to prevent statics from self-initializing by reading from their own memory
/// as it is being initialized.
fn before_alloc_read(_ecx: &InterpCx<'tcx, Self>, _alloc_id: AllocId) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Hook for performing extra checks on a memory write access.
@ -446,7 +447,7 @@ pub trait Machine<'tcx>: Sized {
_prov: (AllocId, Self::ProvenanceExtra),
_range: AllocRange,
) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Hook for performing extra operations on a memory deallocation.
@ -460,7 +461,7 @@ pub trait Machine<'tcx>: Sized {
_align: Align,
_kind: MemoryKind<Self::MemoryKind>,
) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Executes a retagging operation for a single pointer.
@ -471,7 +472,7 @@ pub trait Machine<'tcx>: Sized {
_kind: mir::RetagKind,
val: &ImmTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> {
Ok(val.clone())
interp_ok(val.clone())
}
/// Executes a retagging operation on a compound value.
@ -482,7 +483,7 @@ pub trait Machine<'tcx>: Sized {
_kind: mir::RetagKind,
_place: &PlaceTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Called on places used for in-place function argument and return value handling.
@ -516,7 +517,7 @@ pub trait Machine<'tcx>: Sized {
/// Called immediately after a stack frame got pushed and its locals got initialized.
fn after_stack_push(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Called just before the return value is copied to the caller-provided return place.
@ -524,7 +525,7 @@ pub trait Machine<'tcx>: Sized {
_ecx: &InterpCx<'tcx, Self>,
_frame: &Frame<'tcx, Self::Provenance, Self::FrameExtra>,
) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Called immediately after a stack frame got popped, but before jumping back to the caller.
@ -537,14 +538,14 @@ pub trait Machine<'tcx>: Sized {
) -> InterpResult<'tcx, ReturnAction> {
// By default, we do not support unwinding from panics
assert!(!unwinding);
Ok(ReturnAction::Normal)
interp_ok(ReturnAction::Normal)
}
/// Called immediately after an "immediate" local variable is read
/// (i.e., this is called for reads that do not end up accessing addressable memory).
#[inline(always)]
fn after_local_read(_ecx: &InterpCx<'tcx, Self>, _local: mir::Local) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Called immediately after an "immediate" local variable is assigned a new value
@ -556,7 +557,7 @@ pub trait Machine<'tcx>: Sized {
_local: mir::Local,
_storage_live: bool,
) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Called immediately after actual memory was allocated for a local
@ -567,7 +568,7 @@ pub trait Machine<'tcx>: Sized {
_local: mir::Local,
_mplace: &MPlaceTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Evaluate the given constant. The `eval` function will do all the required evaluation,
@ -645,7 +646,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
) -> InterpResult<$tcx> {
// For now we don't do any checking here. We can't use `tcx.sess` because that can differ
// between crates, and we need to ensure that const-eval always behaves the same.
Ok(())
interp_ok(())
}
#[inline(always)]
@ -665,7 +666,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
fn ub_checks(_ecx: &InterpCx<$tcx, Self>) -> InterpResult<$tcx, bool> {
// We can't look at `tcx.sess` here as that can differ across crates, which can lead to
// unsound differences in evaluating the same constant at different instantiation sites.
Ok(true)
interp_ok(true)
}
#[inline(always)]
@ -675,7 +676,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
alloc: &'b Allocation,
) -> InterpResult<$tcx, Cow<'b, Allocation<Self::Provenance>>> {
// Overwrite default implementation: no need to adjust anything.
Ok(Cow::Borrowed(alloc))
interp_ok(Cow::Borrowed(alloc))
}
fn init_alloc_extra(
@ -685,7 +686,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
_size: Size,
_align: Align,
) -> InterpResult<$tcx, Self::AllocExtra> {
Ok(())
interp_ok(())
}
fn extern_static_pointer(
@ -693,7 +694,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
def_id: DefId,
) -> InterpResult<$tcx, Pointer> {
// Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
Ok(Pointer::new(ecx.tcx.reserve_and_set_static_alloc(def_id).into(), Size::ZERO))
interp_ok(Pointer::new(ecx.tcx.reserve_and_set_static_alloc(def_id).into(), Size::ZERO))
}
#[inline(always)]
@ -702,7 +703,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
ptr: Pointer<CtfeProvenance>,
_kind: Option<MemoryKind<Self::MemoryKind>>,
) -> InterpResult<$tcx, Pointer<CtfeProvenance>> {
Ok(ptr)
interp_ok(ptr)
}
#[inline(always)]
@ -713,7 +714,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
// Allow these casts, but make the pointer not dereferenceable.
// (I.e., they behave like transmutation.)
// This is correct because no pointers can ever be exposed in compile-time evaluation.
Ok(Pointer::from_addr_invalid(addr))
interp_ok(Pointer::from_addr_invalid(addr))
}
#[inline(always)]

View File

@ -23,7 +23,7 @@ use tracing::{debug, instrument, trace};
use super::{
AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckAlignMsg, CheckInAllocMsg,
CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Misalignment, Pointer,
PointerArithmetic, Provenance, Scalar, alloc_range, err_ub, err_ub_custom, throw_ub,
PointerArithmetic, Provenance, Scalar, alloc_range, err_ub, err_ub_custom, interp_ok, throw_ub,
throw_ub_custom, throw_unsup, throw_unsup_format,
};
use crate::fluent_generated as fluent;
@ -82,7 +82,7 @@ pub enum FnVal<'tcx, Other> {
impl<'tcx, Other> FnVal<'tcx, Other> {
pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
match self {
FnVal::Instance(instance) => Ok(instance),
FnVal::Instance(instance) => interp_ok(instance),
FnVal::Other(_) => {
throw_unsup_format!("'foreign' function pointers are not supported in this context")
}
@ -284,7 +284,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
self.deallocate_ptr(ptr, old_size_and_align, kind)?;
Ok(new_ptr)
interp_ok(new_ptr)
}
#[instrument(skip(self), level = "debug")]
@ -330,8 +330,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
)
}
None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccessTest)),
}
.into());
})
.into();
};
if alloc.mutability.is_not() {
@ -376,7 +376,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
bug!("Nothing can be deallocated twice");
}
Ok(())
interp_ok(())
}
/// Internal helper function to determine the allocation and offset of a pointer (if any).
@ -395,7 +395,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|this, alloc_id, offset, prov| {
let (size, align) = this
.get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccessTest)?;
Ok((size, align, (alloc_id, offset, prov)))
interp_ok((size, align, (alloc_id, offset, prov)))
},
)
}
@ -412,9 +412,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
Ok((size, align, ()))
interp_ok((size, align, ()))
})?;
Ok(())
interp_ok(())
}
/// Check whether the given pointer points to live memory for a signed amount of bytes.
@ -428,9 +428,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
) -> InterpResult<'tcx> {
Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
Ok((size, align, ()))
interp_ok((size, align, ()))
})?;
Ok(())
interp_ok(())
}
/// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
@ -455,10 +455,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
) -> InterpResult<'tcx, Option<T>> {
// Everything is okay with size 0.
if size == 0 {
return Ok(None);
return interp_ok(None);
}
Ok(match this.borrow().ptr_try_get_alloc_id(ptr, size) {
interp_ok(match this.borrow().ptr_try_get_alloc_id(ptr, size) {
Err(addr) => {
// We couldn't get a proper allocation.
throw_ub!(DanglingIntPointer { addr, inbounds_size: size, msg });
@ -498,7 +498,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if let Some(misaligned) = misaligned {
throw_ub!(AlignmentCheckFailed(misaligned, msg))
}
Ok(())
interp_ok(())
}
pub(super) fn is_ptr_misaligned(
@ -634,7 +634,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// `get_global_alloc` that we can actually use directly without inserting anything anywhere.
// So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
let a = self.memory.alloc_map.get_or(id, || {
let alloc = self.get_global_alloc(id, /*is_write*/ false).map_err(Err)?;
// We have to funnel the `InterpErrorInfo` through a `Result` to match the `get_or` API,
// so we use `report_err` for that.
let alloc = self.get_global_alloc(id, /*is_write*/ false).report_err().map_err(Err)?;
match alloc {
Cow::Borrowed(alloc) => {
// We got a ref, cheaply return that as an "error" so that the
@ -653,8 +655,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
});
// Now unpack that funny error type
match a {
Ok(a) => Ok(&a.1),
Err(a) => a,
Ok(a) => interp_ok(&a.1),
Err(a) => a.into(),
}
}
@ -662,7 +664,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// The caller is responsible for calling the access hooks!
pub fn get_alloc_bytes_unchecked_raw(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
let alloc = self.get_alloc_raw(id)?;
Ok(alloc.get_bytes_unchecked_raw())
interp_ok(alloc.get_bytes_unchecked_raw())
}
/// Bounds-checked *but not align-checked* allocation access.
@ -680,7 +682,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
CheckInAllocMsg::MemoryAccessTest,
|this, alloc_id, offset, prov| {
let alloc = this.get_alloc_raw(alloc_id)?;
Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
},
)?;
// We want to call the hook on *all* accesses that involve an AllocId, including zero-sized
@ -703,20 +705,20 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
range,
)?;
}
Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
interp_ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
} else {
Ok(None)
interp_ok(None)
}
}
/// Return the `extra` field of the given allocation.
pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
Ok(&self.get_alloc_raw(id)?.extra)
interp_ok(&self.get_alloc_raw(id)?.extra)
}
/// Return the `mutability` field of the given allocation.
pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
Ok(self.get_alloc_raw(id)?.mutability)
interp_ok(self.get_alloc_raw(id)?.mutability)
}
/// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
@ -750,7 +752,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if alloc.mutability.is_not() {
throw_ub!(WriteToReadOnly(id))
}
Ok((alloc, &mut self.machine))
interp_ok((alloc, &mut self.machine))
}
/// Gives raw, mutable access to the `Allocation` address, without bounds or alignment checks.
@ -760,7 +762,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
id: AllocId,
) -> InterpResult<'tcx, *mut u8> {
let alloc = self.get_alloc_raw_mut(id)?.0;
Ok(alloc.get_bytes_unchecked_raw_mut())
interp_ok(alloc.get_bytes_unchecked_raw_mut())
}
/// Bounds-checked *but not align-checked* allocation access.
@ -781,7 +783,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
CheckInAllocMsg::MemoryAccessTest,
|this, alloc_id, offset, prov| {
let (alloc, machine) = this.get_alloc_raw_mut(alloc_id)?;
Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc, machine)))
interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc, machine)))
},
)?;
@ -790,9 +792,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if !validation_in_progress {
M::before_memory_write(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
}
Ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
interp_ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
} else {
Ok(None)
interp_ok(None)
}
}
@ -802,7 +804,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
id: AllocId,
) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
let (alloc, machine) = self.get_alloc_raw_mut(id)?;
Ok((&mut alloc.extra, machine))
interp_ok((&mut alloc.extra, machine))
}
/// Check whether an allocation is live. This is faster than calling
@ -904,7 +906,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if matches!(kind, AllocKind::Dead) {
throw_ub!(PointerUseAfterFree(id, msg))
}
Ok((size, align))
interp_ok((size, align))
}
fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
@ -928,7 +930,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
}
self.get_fn_alloc(alloc_id)
.ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))).into())
.ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))))
.into()
}
/// Get the dynamic type of the given vtable pointer.
@ -951,12 +954,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if let Some(expected_dyn_type) = expected_trait {
self.check_vtable_for_type(vtable_dyn_type, expected_dyn_type)?;
}
Ok(ty)
interp_ok(ty)
}
pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
Ok(())
interp_ok(())
}
/// Create a lazy debug printer that prints the given allocation and all allocations it points
@ -1144,10 +1147,11 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
let range = self.range.subrange(range);
debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
Ok(self
.alloc
self.alloc
.write_scalar(&self.tcx, range, val)
.map_err(|e| e.to_interp_error(self.alloc_id))?)
.map_err(|e| e.to_interp_error(self.alloc_id))
.into()
}
/// `offset` is relative to this allocation reference, not the base of the allocation.
@ -1158,26 +1162,27 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
/// Mark the given sub-range (relative to this allocation reference) as uninitialized.
pub fn write_uninit(&mut self, range: AllocRange) -> InterpResult<'tcx> {
let range = self.range.subrange(range);
Ok(self
.alloc
self.alloc
.write_uninit(&self.tcx, range)
.map_err(|e| e.to_interp_error(self.alloc_id))?)
.map_err(|e| e.to_interp_error(self.alloc_id))
.into()
}
/// Mark the entire referenced range as uninitialized
pub fn write_uninit_full(&mut self) -> InterpResult<'tcx> {
Ok(self
.alloc
self.alloc
.write_uninit(&self.tcx, self.range)
.map_err(|e| e.to_interp_error(self.alloc_id))?)
.map_err(|e| e.to_interp_error(self.alloc_id))
.into()
}
/// Remove all provenance in the reference range.
pub fn clear_provenance(&mut self) -> InterpResult<'tcx> {
Ok(self
.alloc
self.alloc
.clear_provenance(&self.tcx, self.range)
.map_err(|e| e.to_interp_error(self.alloc_id))?)
.map_err(|e| e.to_interp_error(self.alloc_id))
.into()
}
}
@ -1189,12 +1194,10 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr
read_provenance: bool,
) -> InterpResult<'tcx, Scalar<Prov>> {
let range = self.range.subrange(range);
let res = self
.alloc
self.alloc
.read_scalar(&self.tcx, range, read_provenance)
.map_err(|e| e.to_interp_error(self.alloc_id))?;
debug!("read_scalar at {:?}{range:?}: {res:?}", self.alloc_id);
Ok(res)
.map_err(|e| e.to_interp_error(self.alloc_id))
.into()
}
/// `range` is relative to this allocation reference, not the base of the allocation.
@ -1212,10 +1215,10 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr
/// `range` is relative to this allocation reference, not the base of the allocation.
pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
Ok(self
.alloc
self.alloc
.get_bytes_strip_provenance(&self.tcx, self.range)
.map_err(|e| e.to_interp_error(self.alloc_id))?)
.map_err(|e| e.to_interp_error(self.alloc_id))
.into()
}
/// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
@ -1236,14 +1239,16 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
) -> InterpResult<'tcx, &[u8]> {
let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
// zero-sized access
return Ok(&[]);
return interp_ok(&[]);
};
// Side-step AllocRef and directly access the underlying bytes more efficiently.
// (We are staying inside the bounds here so all is good.)
Ok(alloc_ref
interp_ok(
alloc_ref
.alloc
.get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
.map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
.map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?,
)
}
/// Writes the given stream of bytes into memory.
@ -1263,7 +1268,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
// zero-sized access
assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
return Ok(());
return interp_ok(());
};
// Side-step AllocRef and directly access the underlying bytes more efficiently.
@ -1279,7 +1284,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
*dest = src.next().expect("iterator was shorter than it said it would be");
}
assert_matches!(src.next(), None, "iterator was longer than it said it would be");
Ok(())
interp_ok(())
}
pub fn mem_copy(
@ -1316,7 +1321,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Source alloc preparations and access hooks.
let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
// Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
return Ok(());
return interp_ok(());
};
let src_alloc = self.get_alloc_raw(src_alloc_id)?;
let src_range = alloc_range(src_offset, size);
@ -1332,7 +1337,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// We already did the source checks and called the hooks so we are good to return early.
let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
// Zero-sized *destination*.
return Ok(());
return interp_ok(());
};
// Prepare getting source provenance.
@ -1375,7 +1380,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
.write_uninit(&tcx, dest_range)
.map_err(|e| e.to_interp_error(dest_alloc_id))?;
// We can forget about the provenance, this is all not initialized anyway.
return Ok(());
return interp_ok(());
}
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
@ -1432,7 +1437,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// copy the provenance to the destination
dest_alloc.provenance_apply_copy(provenance);
Ok(())
interp_ok(())
}
}
@ -1441,7 +1446,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// Test if this value might be null.
/// If the machine does not support ptr-to-int casts, this is conservative.
pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
Ok(match scalar.try_to_scalar_int() {
interp_ok(match scalar.try_to_scalar_int() {
Ok(int) => int.is_null(),
Err(_) => {
// Can only happen during CTFE.
@ -1508,13 +1513,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
ptr: Pointer<Option<M::Provenance>>,
size: i64,
) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
self.ptr_try_get_alloc_id(ptr, size).map_err(|offset| {
self.ptr_try_get_alloc_id(ptr, size)
.map_err(|offset| {
err_ub!(DanglingIntPointer {
addr: offset,
inbounds_size: size,
msg: CheckInAllocMsg::InboundsTest
})
.into()
})
.into()
}
}

View File

@ -16,7 +16,7 @@ use tracing::trace;
use super::{
CtfeProvenance, InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, OffsetMode,
PlaceTy, Pointer, Projectable, Provenance, Scalar, alloc_range, err_ub, from_known_layout,
mir_assign_valid_types, throw_ub,
interp_ok, mir_assign_valid_types, throw_ub,
};
/// An `Immediate` represents a single immediate self-contained Rust value.
@ -149,7 +149,7 @@ impl<Prov: Provenance> Immediate<Prov> {
}
Immediate::Uninit => {}
}
Ok(())
interp_ok(())
}
}
@ -307,7 +307,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
data_size: s.size().bytes(),
}));
}
Ok(s)
interp_ok(s)
}
#[inline]
@ -430,7 +430,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
ecx: &InterpCx<'tcx, M>,
) -> InterpResult<'tcx, Self> {
assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
Ok(self.offset_(offset, layout, ecx))
interp_ok(self.offset_(offset, layout, ecx))
}
#[inline(always)]
@ -438,7 +438,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
&self,
_ecx: &InterpCx<'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.clone().into())
interp_ok(self.clone().into())
}
}
@ -514,11 +514,13 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
ecx: &InterpCx<'tcx, M>,
) -> InterpResult<'tcx, Self> {
match self.as_mplace_or_imm() {
Left(mplace) => Ok(mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into()),
Left(mplace) => {
interp_ok(mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into())
}
Right(imm) => {
assert_matches!(meta, MemPlaceMeta::None); // no place to store metadata here
// Every part of an uninit is uninit.
Ok(imm.offset_(offset, layout, ecx).into())
interp_ok(imm.offset_(offset, layout, ecx).into())
}
}
}
@ -528,7 +530,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
&self,
_ecx: &InterpCx<'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.clone())
interp_ok(self.clone())
}
}
@ -543,12 +545,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> {
if mplace.layout.is_unsized() {
// Don't touch unsized
return Ok(None);
return interp_ok(None);
}
let Some(alloc) = self.get_place_alloc(mplace)? else {
// zero-sized type can be left uninit
return Ok(Some(ImmTy::uninit(mplace.layout)));
return interp_ok(Some(ImmTy::uninit(mplace.layout)));
};
// It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
@ -557,7 +559,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// case where some of the bytes are initialized and others are not. So, we need an extra
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
// like a `Scalar` (or `ScalarPair`).
Ok(match mplace.layout.abi {
interp_ok(match mplace.layout.abi {
Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
let size = s.size(self);
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
@ -606,7 +608,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
&self,
src: &impl Projectable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Either<MPlaceTy<'tcx, M::Provenance>, ImmTy<'tcx, M::Provenance>>> {
Ok(match src.to_op(self)?.as_mplace_or_imm() {
interp_ok(match src.to_op(self)?.as_mplace_or_imm() {
Left(ref mplace) => {
if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? {
Right(val)
@ -637,7 +639,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if matches!(*imm, Immediate::Uninit) {
throw_ub!(InvalidUninitBytes(None));
}
Ok(imm)
interp_ok(imm)
}
/// Read a scalar from a place
@ -645,7 +647,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
&self,
op: &impl Projectable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
Ok(self.read_immediate(op)?.to_scalar())
interp_ok(self.read_immediate(op)?.to_scalar())
}
// Pointer-sized reads are fairly common and need target layout access, so we wrap them in
@ -678,7 +680,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let len = mplace.len(self)?;
let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr(), Size::from_bytes(len))?;
let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
Ok(str)
interp_ok(str)
}
/// Read from a local of the current frame.
@ -698,7 +700,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
assert!(!layout.is_unsized());
}
M::after_local_read(self, local)?;
Ok(OpTy { op, layout })
interp_ok(OpTy { op, layout })
}
/// Every place can be read from, so we can turn them into an operand.
@ -709,12 +711,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
place: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
match place.as_mplace_or_local() {
Left(mplace) => Ok(mplace.into()),
Left(mplace) => interp_ok(mplace.into()),
Right((local, offset, locals_addr, _)) => {
debug_assert!(place.layout.is_sized()); // only sized locals can ever be `Place::Local`.
debug_assert_eq!(locals_addr, self.frame().locals_addr());
let base = self.local_to_op(local, None)?;
Ok(match offset {
interp_ok(match offset {
Some(offset) => base.offset(offset, place.layout, self)?,
None => {
// In the common case this hasn't been projected.
@ -764,7 +766,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
)
}
}
Ok(op)
interp_ok(op)
}
/// Evaluate the operand, returning a place where you can then find the data.
@ -794,7 +796,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
};
trace!("{:?}: {:?}", mir_op, op);
Ok(op)
interp_ok(op)
}
pub(crate) fn const_val_to_op(
@ -805,12 +807,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
// Other cases need layout.
let adjust_scalar = |scalar| -> InterpResult<'tcx, _> {
Ok(match scalar {
interp_ok(match scalar {
Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_root_pointer(ptr)?, size),
Scalar::Int(int) => Scalar::Int(int),
})
};
let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
let layout =
from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty).into())?;
let imm = match val_val {
mir::ConstValue::Indirect { alloc_id, offset } => {
// This is const data, no mutation allowed.
@ -818,7 +821,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
CtfeProvenance::from(alloc_id).as_immutable(),
offset,
))?;
return Ok(self.ptr_to_mplace(ptr.into(), layout).into());
return interp_ok(self.ptr_to_mplace(ptr.into(), layout).into());
}
mir::ConstValue::Scalar(x) => adjust_scalar(x)?.into(),
mir::ConstValue::ZeroSized => Immediate::Uninit,
@ -829,7 +832,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
Immediate::new_slice(self.global_root_pointer(ptr)?.into(), meta, self)
}
};
Ok(OpTy { op: Operand::Immediate(imm), layout })
interp_ok(OpTy { op: Operand::Immediate(imm), layout })
}
}

View File

@ -9,7 +9,7 @@ use rustc_span::symbol::sym;
use rustc_target::abi::Size;
use tracing::trace;
use super::{ImmTy, InterpCx, Machine, MemPlaceMeta, throw_ub};
use super::{ImmTy, InterpCx, Machine, MemPlaceMeta, interp_ok, throw_ub};
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
fn three_way_compare<T: Ord>(&self, lhs: T, rhs: T) -> ImmTy<'tcx, M::Provenance> {
@ -156,7 +156,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
});
}
return Ok(ImmTy::from_scalar_int(result, left.layout));
return interp_ok(ImmTy::from_scalar_int(result, left.layout));
}
// For the remaining ops, the types must be the same on both sides
@ -181,10 +181,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
_ => None,
};
if let Some(op) = op {
return Ok(ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx));
return interp_ok(ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx));
}
if bin_op == Cmp {
return Ok(self.three_way_compare(l_signed(), r_signed()));
return interp_ok(self.three_way_compare(l_signed(), r_signed()));
}
let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
Div if r.is_null() => throw_ub!(DivisionByZero),
@ -221,7 +221,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
throw_ub!(ArithOverflow { intrinsic });
}
let res = ImmTy::from_scalar_int(result, left.layout);
return Ok(if with_overflow {
return interp_ok(if with_overflow {
let overflow = ImmTy::from_bool(overflow, *self.tcx);
ImmTy::from_pair(res, overflow, *self.tcx)
} else {
@ -234,10 +234,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let r = r_unsigned();
if bin_op == Cmp {
return Ok(self.three_way_compare(l, r));
return interp_ok(self.three_way_compare(l, r));
}
Ok(match bin_op {
interp_ok(match bin_op {
Eq => ImmTy::from_bool(l == r, *self.tcx),
Ne => ImmTy::from_bool(l != r, *self.tcx),
@ -339,7 +339,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
throw_ub!(PointerArithOverflow)
}
let offset_ptr = self.ptr_offset_inbounds(ptr, offset_bytes)?;
Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout))
interp_ok(ImmTy::from_scalar(
Scalar::from_maybe_pointer(offset_ptr, self),
left.layout,
))
}
// Fall back to machine hook so Miri can support more pointer ops.
@ -366,20 +369,20 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
assert_eq!(left.layout.ty, right.layout.ty);
let left = left.to_scalar();
let right = right.to_scalar();
Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
interp_ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
}
ty::Bool => {
assert_eq!(left.layout.ty, right.layout.ty);
let left = left.to_scalar();
let right = right.to_scalar();
Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
interp_ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
}
ty::Float(fty) => {
assert_eq!(left.layout.ty, right.layout.ty);
let layout = left.layout;
let left = left.to_scalar();
let right = right.to_scalar();
Ok(match fty {
interp_ok(match fty {
FloatTy::F16 => {
self.binary_float_op(bin_op, layout, left.to_f16()?, right.to_f16()?)
}
@ -447,7 +450,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
Not => !val,
_ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
};
Ok(ImmTy::from_bool(res, *self.tcx))
interp_ok(ImmTy::from_bool(res, *self.tcx))
}
ty::Float(fty) => {
let val = val.to_scalar();
@ -462,7 +465,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
FloatTy::F64 => Scalar::from_f64(-val.to_f64()?),
FloatTy::F128 => Scalar::from_f128(-val.to_f128()?),
};
Ok(ImmTy::from_scalar(res, layout))
interp_ok(ImmTy::from_scalar(res, layout))
}
ty::Int(..) => {
let val = val.to_scalar().to_int(layout.size)?;
@ -472,7 +475,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
_ => span_bug!(self.cur_span(), "Invalid integer op {:?}", un_op),
};
let res = ScalarInt::truncate_from_int(res, layout.size).0;
Ok(ImmTy::from_scalar(res.into(), layout))
interp_ok(ImmTy::from_scalar(res.into(), layout))
}
ty::Uint(..) => {
let val = val.to_scalar().to_uint(layout.size)?;
@ -481,12 +484,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
_ => span_bug!(self.cur_span(), "Invalid unsigned integer op {:?}", un_op),
};
let res = ScalarInt::truncate_from_uint(res, layout.size).0;
Ok(ImmTy::from_scalar(res.into(), layout))
interp_ok(ImmTy::from_scalar(res.into(), layout))
}
ty::RawPtr(..) | ty::Ref(..) => {
assert_eq!(un_op, PtrMetadata);
let (_, meta) = val.to_scalar_and_meta();
Ok(match meta {
interp_ok(match meta {
MemPlaceMeta::Meta(scalar) => {
let ty = un_op.ty(*self.tcx, val.layout.ty);
let layout = self.layout_of(ty)?;
@ -514,7 +517,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let layout = self.layout_of(arg_ty)?;
let usize_layout = || self.layout_of(self.tcx.types.usize).unwrap();
Ok(match null_op {
interp_ok(match null_op {
SizeOf => {
if !layout.abi.is_sized() {
span_bug!(self.cur_span(), "unsized type for `NullaryOp::SizeOf`");

View File

@ -15,7 +15,7 @@ use tracing::{instrument, trace};
use super::{
AllocRef, AllocRefMut, CheckAlignMsg, CtfeProvenance, ImmTy, Immediate, InterpCx, InterpResult,
Machine, MemoryKind, Misalignment, OffsetMode, OpTy, Operand, Pointer, Projectable, Provenance,
Scalar, alloc_range, mir_assign_valid_types,
Scalar, alloc_range, interp_ok, mir_assign_valid_types,
};
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@ -90,7 +90,7 @@ impl<Prov: Provenance> MemPlace<Prov> {
}
OffsetMode::Wrapping => self.ptr.wrapping_offset(offset, ecx),
};
Ok(MemPlace { ptr, meta, misaligned: self.misaligned })
interp_ok(MemPlace { ptr, meta, misaligned: self.misaligned })
}
}
@ -163,7 +163,10 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'tcx, M>,
) -> InterpResult<'tcx, Self> {
Ok(MPlaceTy { mplace: self.mplace.offset_with_meta_(offset, mode, meta, ecx)?, layout })
interp_ok(MPlaceTy {
mplace: self.mplace.offset_with_meta_(offset, mode, meta, ecx)?,
layout,
})
}
#[inline(always)]
@ -171,7 +174,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
&self,
_ecx: &InterpCx<'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.clone().into())
interp_ok(self.clone().into())
}
}
@ -279,7 +282,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
layout: TyAndLayout<'tcx>,
ecx: &InterpCx<'tcx, M>,
) -> InterpResult<'tcx, Self> {
Ok(match self.as_mplace_or_local() {
interp_ok(match self.as_mplace_or_local() {
Left(mplace) => mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into(),
Right((local, old_offset, locals_addr, _)) => {
debug_assert!(layout.is_sized(), "unsized locals should live in memory");
@ -367,7 +370,7 @@ impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
&self,
_ecx: &mut InterpCx<'tcx, M>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
Ok(self.clone())
interp_ok(self.clone())
}
}
@ -425,7 +428,7 @@ where
// `ref_to_mplace` is called on raw pointers even if they don't actually get dereferenced;
// we hence can't call `size_and_align_of` since that asserts more validity than we want.
let ptr = ptr.to_pointer(self)?;
Ok(self.ptr_with_meta_to_mplace(ptr, meta, layout, /*unaligned*/ false))
interp_ok(self.ptr_with_meta_to_mplace(ptr, meta, layout, /*unaligned*/ false))
}
/// Turn a mplace into a (thin or wide) mutable raw pointer, pointing to the same space.
@ -437,7 +440,7 @@ where
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let imm = mplace.mplace.to_ref(self);
let layout = self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, mplace.layout.ty))?;
Ok(ImmTy::from_immediate(imm, layout))
interp_ok(ImmTy::from_immediate(imm, layout))
}
/// Take an operand, representing a pointer, and dereference it to a place.
@ -458,7 +461,7 @@ where
trace!("deref to {} on {:?}", val.layout.ty, *val);
let mplace = self.ref_to_mplace(&val)?;
Ok(mplace)
interp_ok(mplace)
}
#[inline]
@ -474,7 +477,7 @@ where
// If an access is both OOB and misaligned, we want to see the bounds error.
let a = self.get_ptr_alloc(mplace.ptr(), size)?;
self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn)?;
Ok(a)
interp_ok(a)
}
#[inline]
@ -489,10 +492,10 @@ where
// We check alignment separately, and raise that error *after* checking everything else.
// If an access is both OOB and misaligned, we want to see the bounds error.
// However we have to call `check_misalign` first to make the borrow checker happy.
let misalign_err = self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn);
let a = self.get_ptr_alloc_mut(mplace.ptr(), size)?;
misalign_err?;
Ok(a)
let misalign_res = self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn);
// An error from get_ptr_alloc_mut takes precedence.
let (a, ()) = self.get_ptr_alloc_mut(mplace.ptr(), size).and(misalign_res)?;
interp_ok(a)
}
/// Turn a local in the current frame into a place.
@ -512,7 +515,7 @@ where
Operand::Indirect(mplace) => Place::Ptr(*mplace),
}
};
Ok(PlaceTy { place, layout })
interp_ok(PlaceTy { place, layout })
}
/// Computes a place. You should only use this if you intend to write into this
@ -549,7 +552,7 @@ where
)
}
}
Ok(place)
interp_ok(place)
}
/// Given a place, returns either the underlying mplace or a reference to where the value of
@ -565,7 +568,7 @@ where
(&mut Immediate<M::Provenance>, TyAndLayout<'tcx>, mir::Local),
>,
> {
Ok(match place.to_place().as_mplace_or_local() {
interp_ok(match place.to_place().as_mplace_or_local() {
Left(mplace) => Left(mplace),
Right((local, offset, locals_addr, layout)) => {
if offset.is_some() {
@ -610,7 +613,7 @@ where
)?;
}
Ok(())
interp_ok(())
}
/// Write a scalar to a place
@ -660,7 +663,7 @@ where
self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.mplace)?;
}
}
Ok(())
interp_ok(())
}
/// Write an immediate to memory.
@ -683,7 +686,7 @@ where
let tcx = *self.tcx;
let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout })? else {
// zero-sized access
return Ok(());
return interp_ok(());
};
match value {
@ -708,7 +711,7 @@ where
alloc.write_scalar(alloc_range(Size::ZERO, a_val.size()), a_val)?;
alloc.write_scalar(alloc_range(b_offset, b_val.size()), b_val)?;
// We don't have to reset padding here, `write_immediate` will anyway do a validation run.
Ok(())
interp_ok(())
}
Immediate::Uninit => alloc.write_uninit_full(),
}
@ -729,12 +732,12 @@ where
Left(mplace) => {
let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
// Zero-sized access
return Ok(());
return interp_ok(());
};
alloc.write_uninit_full()?;
}
}
Ok(())
interp_ok(())
}
/// Remove all provenance in the given place.
@ -753,12 +756,12 @@ where
Left(mplace) => {
let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
// Zero-sized access
return Ok(());
return interp_ok(());
};
alloc.clear_provenance()?;
}
}
Ok(())
interp_ok(())
}
/// Copies the data from an operand to a place.
@ -841,7 +844,7 @@ where
)?;
}
Ok(())
interp_ok(())
}
/// Copies the data from an operand to a place.
@ -918,7 +921,7 @@ where
self.mem_copy(src.ptr(), dest.ptr(), dest_size, /*nonoverlapping*/ true)?;
self.check_misalign(src.mplace.misaligned, CheckAlignMsg::BasedOn)?;
self.check_misalign(dest.mplace.misaligned, CheckAlignMsg::BasedOn)?;
Ok(())
interp_ok(())
}
/// Ensures that a place is in memory, and returns where it is.
@ -980,7 +983,7 @@ where
Place::Ptr(mplace) => mplace,
};
// Return with the original layout and align, so that the caller can go on
Ok(MPlaceTy { mplace, layout: place.layout })
interp_ok(MPlaceTy { mplace, layout: place.layout })
}
pub fn allocate_dyn(
@ -993,7 +996,7 @@ where
span_bug!(self.cur_span(), "cannot allocate space for `extern` type, size is not known")
};
let ptr = self.allocate_ptr(size, align, kind)?;
Ok(self.ptr_with_meta_to_mplace(ptr.into(), meta, layout, /*unaligned*/ false))
interp_ok(self.ptr_with_meta_to_mplace(ptr.into(), meta, layout, /*unaligned*/ false))
}
pub fn allocate(
@ -1028,7 +1031,7 @@ where
};
let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self);
let layout = self.layout_of(self.tcx.types.str_).unwrap();
Ok(self.ptr_with_meta_to_mplace(
interp_ok(self.ptr_with_meta_to_mplace(
ptr.into(),
MemPlaceMeta::Meta(meta),
layout,
@ -1044,7 +1047,7 @@ where
let _ = self.tcx.global_alloc(raw.alloc_id);
let ptr = self.global_root_pointer(Pointer::from(raw.alloc_id))?;
let layout = self.layout_of(raw.ty)?;
Ok(self.ptr_to_mplace(ptr.into(), layout))
interp_ok(self.ptr_to_mplace(ptr.into(), layout))
}
}

View File

@ -18,7 +18,7 @@ use tracing::{debug, instrument};
use super::{
InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Provenance, Scalar, err_ub,
throw_ub, throw_unsup,
interp_ok, throw_ub, throw_unsup,
};
/// Describes the constraints placed on offset-projections.
@ -54,7 +54,7 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
// Go through the layout. There are lots of types that support a length,
// e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
match layout.fields {
abi::FieldsShape::Array { count, .. } => Ok(count),
abi::FieldsShape::Array { count, .. } => interp_ok(count),
_ => bug!("len not supported on sized type {:?}", layout.ty),
}
}
@ -115,9 +115,9 @@ impl<'a, 'tcx, Prov: Provenance, P: Projectable<'tcx, Prov>> ArrayIterator<'a, '
&mut self,
ecx: &InterpCx<'tcx, M>,
) -> InterpResult<'tcx, Option<(u64, P)>> {
let Some(idx) = self.range.next() else { return Ok(None) };
let Some(idx) = self.range.next() else { return interp_ok(None) };
// We use `Wrapping` here since the offset has already been checked when the iterator was created.
Ok(Some((
interp_ok(Some((
idx,
self.base.offset_with_meta(
self.stride * idx,
@ -258,7 +258,7 @@ where
// SIMD types must be newtypes around arrays, so all we have to do is project to their only field.
let array = self.project_field(base, 0)?;
let len = array.len(self)?;
Ok((array, len))
interp_ok((array, len))
}
fn project_constant_index<P: Projectable<'tcx, M::Provenance>>(
@ -300,7 +300,13 @@ where
debug!("project_array_fields: {base:?} {len}");
base.offset(len * stride, self.layout_of(self.tcx.types.unit).unwrap(), self)?;
// Create the iterator.
Ok(ArrayIterator { base, range: 0..len, stride, field_layout, _phantom: PhantomData })
interp_ok(ArrayIterator {
base,
range: 0..len,
stride,
field_layout,
_phantom: PhantomData,
})
}
/// Subslicing
@ -367,7 +373,7 @@ where
P: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>> + std::fmt::Debug,
{
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
interp_ok(match proj_elem {
OpaqueCast(ty) => {
span_bug!(self.cur_span(), "OpaqueCast({ty}) encountered after borrowck")
}

View File

@ -17,7 +17,7 @@ use tracing::{info_span, instrument, trace};
use super::{
AllocId, CtfeProvenance, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlace,
MemPlaceMeta, MemoryKind, Operand, Pointer, Provenance, ReturnAction, Scalar,
from_known_layout, throw_ub, throw_unsup,
from_known_layout, interp_ok, throw_ub, throw_unsup,
};
use crate::errors;
@ -189,7 +189,7 @@ impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
pub(super) fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
match &self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
LocalValue::Live(val) => interp_ok(val),
}
}
@ -199,7 +199,7 @@ impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
match &mut self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
LocalValue::Live(val) => interp_ok(val),
}
}
}
@ -391,7 +391,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let span = info_span!("frame", "{}", instance);
self.frame_mut().tracing_span.enter(span);
Ok(())
interp_ok(())
}
/// Low-level helper that pops a stack frame from the stack and returns some information about
@ -426,7 +426,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
return_action = ReturnAction::NoCleanup;
};
Ok(StackPopInfo { return_action, return_to_block, return_place })
interp_ok(StackPopInfo { return_action, return_to_block, return_place })
}
/// A private helper for [`pop_stack_frame_raw`](InterpCx::pop_stack_frame_raw).
@ -449,7 +449,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
}
Ok(cleanup)
interp_ok(cleanup)
}
/// In the current stack frame, mark all locals as live that are not arguments and don't have
@ -464,7 +464,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.storage_live(local)?;
}
}
Ok(())
interp_ok(())
}
pub fn storage_live_dyn(
@ -550,7 +550,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// If the local is already live, deallocate its old memory.
let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
self.deallocate_local(old)?;
Ok(())
interp_ok(())
}
/// Mark a storage as live, killing the previous content.
@ -566,7 +566,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// If the local is already dead, this is a NOP.
let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead);
self.deallocate_local(old)?;
Ok(())
interp_ok(())
}
fn deallocate_local(&mut self, local: LocalValue<M::Provenance>) -> InterpResult<'tcx> {
@ -581,7 +581,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
);
self.deallocate_ptr(ptr, None, MemoryKind::Stack)?;
};
Ok(())
interp_ok(())
}
#[inline(always)]
@ -593,19 +593,19 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
let state = &frame.locals[local];
if let Some(layout) = state.layout.get() {
return Ok(layout);
return interp_ok(layout);
}
let layout = from_known_layout(self.tcx, self.param_env, layout, || {
let local_ty = frame.body.local_decls[local].ty;
let local_ty =
self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
self.layout_of(local_ty)
self.layout_of(local_ty).into()
})?;
// Layouts of locals are requested a lot, so we cache them.
state.layout.set(Some(layout));
Ok(layout)
interp_ok(layout)
}
}

View File

@ -14,7 +14,7 @@ use tracing::{info, instrument, trace};
use super::{
FnArg, FnVal, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy,
Projectable, Scalar, throw_ub,
Projectable, Scalar, interp_ok, throw_ub,
};
use crate::util;
@ -36,7 +36,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
#[inline(always)]
pub fn step(&mut self) -> InterpResult<'tcx, bool> {
if self.stack().is_empty() {
return Ok(false);
return interp_ok(false);
}
let Either::Left(loc) = self.frame().loc else {
@ -44,7 +44,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Just go on unwinding.
trace!("unwinding: skipping frame");
self.return_from_current_stack_frame(/* unwinding */ true)?;
return Ok(true);
return interp_ok(true);
};
let basic_block = &self.body().basic_blocks[loc.block];
@ -55,7 +55,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
assert_eq!(old_frames, self.frame_idx());
// Advance the program counter.
self.frame_mut().loc.as_mut().left().unwrap().statement_index += 1;
return Ok(true);
return interp_ok(true);
}
M::before_terminator(self)?;
@ -67,7 +67,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
info!("// executing {:?}", loc.block);
}
}
Ok(true)
interp_ok(true)
}
/// Runs the interpretation logic for the given `mir::Statement` at the current frame and
@ -145,7 +145,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
Nop => {}
}
Ok(())
interp_ok(())
}
/// Evaluate an assignment statement.
@ -277,7 +277,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
trace!("{:?}", self.dump_place(&dest));
Ok(())
interp_ok(())
}
/// Writes the aggregate to the destination.
@ -313,7 +313,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let ptr_imm = Immediate::new_pointer_with_meta(data, meta, self);
let ptr = ImmTy::from_immediate(ptr_imm, dest.layout);
self.copy_op(&ptr, dest)?;
return Ok(());
return interp_ok(());
}
_ => (FIRST_VARIANT, dest.clone(), None),
};
@ -365,7 +365,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
)?;
}
Ok(())
interp_ok(())
}
/// Evaluate the arguments of a function call
@ -373,7 +373,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
&self,
op: &mir::Operand<'tcx>,
) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
Ok(match op {
interp_ok(match op {
mir::Operand::Copy(_) | mir::Operand::Constant(_) => {
// Make a regular copy.
let op = self.eval_operand(op, None)?;
@ -442,7 +442,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
};
Ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location })
interp_ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location })
}
fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
@ -537,7 +537,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// generic. In order to make sure that generic and non-generic code behaves
// roughly the same (and in keeping with Mir semantics) we do nothing here.
self.go_to_block(target);
return Ok(());
return interp_ok(());
}
trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty);
self.init_drop_in_place_call(&place, instance, target, unwind)?;
@ -566,7 +566,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// By definition, a Resume terminator means
// that we're unwinding
self.return_from_current_stack_frame(/* unwinding */ true)?;
return Ok(());
return interp_ok(());
}
// It is UB to ever encounter this.
@ -584,6 +584,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
}
Ok(())
interp_ok(())
}
}

View File

@ -5,7 +5,9 @@ use rustc_target::abi::{Align, Size};
use tracing::trace;
use super::util::ensure_monomorphic_enough;
use super::{InterpCx, MPlaceTy, Machine, MemPlaceMeta, OffsetMode, Projectable, throw_ub};
use super::{
InterpCx, MPlaceTy, Machine, MemPlaceMeta, OffsetMode, Projectable, interp_ok, throw_ub,
};
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for
@ -31,7 +33,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let salt = M::get_global_alloc_salt(self, None);
let vtable_symbolic_allocation = self.tcx.reserve_and_set_vtable_alloc(ty, dyn_ty, salt);
let vtable_ptr = self.global_root_pointer(Pointer::from(vtable_symbolic_allocation))?;
Ok(vtable_ptr.into())
interp_ok(vtable_ptr.into())
}
pub fn get_vtable_size_and_align(
@ -42,7 +44,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let ty = self.get_ptr_vtable_ty(vtable, expected_trait)?;
let layout = self.layout_of(ty)?;
assert!(layout.is_sized(), "there are no vtables for unsized types");
Ok((layout.size, layout.align.abi))
interp_ok((layout.size, layout.align.abi))
}
pub(super) fn vtable_entries(
@ -102,7 +104,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
}
Ok(())
interp_ok(())
}
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
@ -127,7 +129,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
layout,
self,
)?;
Ok(mplace)
interp_ok(mplace)
}
/// Turn a `dyn* Trait` type into an value with the actual dynamic type.
@ -147,6 +149,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// `data` is already the right thing but has the wrong type. So we transmute it.
let layout = self.layout_of(ty)?;
let data = data.transmute(layout, self)?;
Ok(data)
interp_ok(data)
}
}

View File

@ -9,7 +9,7 @@ use rustc_middle::ty::{
};
use tracing::debug;
use super::{InterpCx, MPlaceTy, MemoryKind, throw_inval};
use super::{InterpCx, MPlaceTy, MemoryKind, interp_ok, throw_inval};
use crate::const_eval::{CompileTimeInterpCx, CompileTimeMachine, InterpretationResult};
/// Checks whether a type contains generic parameters which must be instantiated.
@ -23,7 +23,7 @@ where
{
debug!("ensure_monomorphic_enough: ty={:?}", ty);
if !ty.has_param() {
return Ok(());
return interp_ok(());
}
struct FoundParam;
@ -78,7 +78,7 @@ where
if matches!(ty.visit_with(&mut vis), ControlFlow::Break(FoundParam)) {
throw_inval!(TooGeneric);
} else {
Ok(())
interp_ok(())
}
}
@ -103,5 +103,5 @@ pub(crate) fn create_static_alloc<'tcx>(
assert_eq!(ecx.machine.static_root_ids, None);
ecx.machine.static_root_ids = Some((alloc_id, static_def_id));
assert!(ecx.memory.alloc_map.insert(alloc_id, (MemoryKind::Stack, alloc)).is_none());
Ok(ecx.ptr_to_mplace(Pointer::from(alloc_id).into(), layout))
interp_ok(ecx.ptr_to_mplace(Pointer::from(alloc_id).into(), layout))
}

View File

@ -17,8 +17,8 @@ use rustc_hir as hir;
use rustc_middle::bug;
use rustc_middle::mir::interpret::ValidationErrorKind::{self, *};
use rustc_middle::mir::interpret::{
ExpectedKind, InterpError, InvalidMetaKind, Misalignment, PointerKind, Provenance,
UnsupportedOpInfo, ValidationErrorInfo, alloc_range,
ExpectedKind, InterpError, InterpErrorInfo, InvalidMetaKind, Misalignment, PointerKind,
Provenance, UnsupportedOpInfo, ValidationErrorInfo, alloc_range, interp_ok,
};
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Ty};
@ -32,7 +32,7 @@ use super::machine::AllocMap;
use super::{
AllocId, AllocKind, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult,
MPlaceTy, Machine, MemPlaceMeta, PlaceTy, Pointer, Projectable, Scalar, ValueVisitor, err_ub,
format_interp_error, throw_ub,
format_interp_error,
};
// for the validation errors
@ -42,7 +42,7 @@ use super::InterpError::Unsupported as Unsup;
use super::UndefinedBehaviorInfo::*;
use super::UnsupportedOpInfo::*;
macro_rules! throw_validation_failure {
macro_rules! err_validation_failure {
($where:expr, $kind: expr) => {{
let where_ = &$where;
let path = if !where_.is_empty() {
@ -53,10 +53,16 @@ macro_rules! throw_validation_failure {
None
};
throw_ub!(ValidationError(ValidationErrorInfo { path, kind: $kind }))
err_ub!(ValidationError(ValidationErrorInfo { path, kind: $kind }))
}};
}
macro_rules! throw_validation_failure {
($where:expr, $kind: expr) => {
do yeet err_validation_failure!($where, $kind)
};
}
/// If $e throws an error matching the pattern, throw a validation failure.
/// Other errors are passed back to the caller, unchanged -- and if they reach the root of
/// the visitor, we make sure only validation errors and `InvalidProgram` errors are left.
@ -91,22 +97,22 @@ macro_rules! try_validation {
($e:expr, $where:expr,
$( $( $p:pat_param )|+ => $kind: expr ),+ $(,)?
) => {{
match $e {
Ok(x) => x,
$e.map_err(|e| {
// We catch the error and turn it into a validation failure. We are okay with
// allocation here as this can only slow down builds that fail anyway.
Err(e) => match e.kind() {
let (kind, backtrace) = e.into_parts();
match kind {
$(
$($p)|+ =>
throw_validation_failure!(
$($p)|+ => {
err_validation_failure!(
$where,
$kind
)
).into()
}
),+,
#[allow(unreachable_patterns)]
_ => Err::<!, _>(e)?,
}
_ => InterpErrorInfo::from_parts(kind, backtrace),
}
})?
}};
}
@ -378,7 +384,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
// Undo changes
self.path.truncate(path_len);
// Done
Ok(r)
interp_ok(r)
}
fn read_immediate(
@ -386,7 +392,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
val: &PlaceTy<'tcx, M::Provenance>,
expected: ExpectedKind,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
Ok(try_validation!(
interp_ok(try_validation!(
self.ecx.read_immediate(val),
self.path,
Ub(InvalidUninitBytes(None)) =>
@ -404,7 +410,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
val: &PlaceTy<'tcx, M::Provenance>,
expected: ExpectedKind,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
Ok(self.read_immediate(val, expected)?.to_scalar())
interp_ok(self.read_immediate(val, expected)?.to_scalar())
}
fn deref_pointer(
@ -469,7 +475,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
_ => bug!("Unexpected unsized type tail: {:?}", tail),
}
Ok(())
interp_ok(())
}
/// Check a reference or `Box`.
@ -510,7 +516,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
Ub(DanglingIntPointer { addr: i, .. }) => DanglingPtrNoProvenance {
ptr_kind,
// FIXME this says "null pointer" when null but we need translate
pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(*i))
pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(i))
},
Ub(PointerOutOfBounds { .. }) => DanglingPtrOutOfBounds {
ptr_kind
@ -632,7 +638,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
}
// Potentially skip recursive check.
if skip_recursive_check {
return Ok(());
return interp_ok(());
}
} else {
// This is not CTFE, so it's Miri with recursive checking.
@ -641,7 +647,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
// FIXME: should we also skip `UnsafeCell` behind shared references? Currently that is not
// needed since validation reads bypass Stacked Borrows and data race checks.
if matches!(ptr_kind, PointerKind::Box) {
return Ok(());
return interp_ok(());
}
}
let path = &self.path;
@ -654,7 +660,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
new_path
});
}
Ok(())
interp_ok(())
}
/// Check if this is a value of primitive type, and if yes check the validity of the value
@ -681,7 +687,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
self.ecx.clear_provenance(value)?;
self.add_data_range_place(value);
}
Ok(true)
interp_ok(true)
}
ty::Char => {
let scalar = self.read_scalar(value, ExpectedKind::Char)?;
@ -696,7 +702,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
self.ecx.clear_provenance(value)?;
self.add_data_range_place(value);
}
Ok(true)
interp_ok(true)
}
ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
// NOTE: Keep this in sync with the array optimization for int/float
@ -713,18 +719,18 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
self.ecx.clear_provenance(value)?;
self.add_data_range_place(value);
}
Ok(true)
interp_ok(true)
}
ty::RawPtr(..) => {
let place = self.deref_pointer(value, ExpectedKind::RawPtr)?;
if place.layout.is_unsized() {
self.check_wide_ptr_meta(place.meta(), place.layout)?;
}
Ok(true)
interp_ok(true)
}
ty::Ref(_, _ty, mutbl) => {
self.check_safe_pointer(value, PointerKind::Ref(*mutbl))?;
Ok(true)
interp_ok(true)
}
ty::FnPtr(..) => {
let scalar = self.read_scalar(value, ExpectedKind::FnPtr)?;
@ -753,12 +759,12 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
}
self.add_data_range_place(value);
}
Ok(true)
interp_ok(true)
}
ty::Never => throw_validation_failure!(self.path, NeverVal),
ty::Foreign(..) | ty::FnDef(..) => {
// Nothing to check.
Ok(true)
interp_ok(true)
}
// The above should be all the primitive types. The rest is compound, we
// check them by visiting their fields/variants.
@ -771,7 +777,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
| ty::Closure(..)
| ty::Pat(..)
| ty::CoroutineClosure(..)
| ty::Coroutine(..) => Ok(false),
| ty::Coroutine(..) => interp_ok(false),
// Some types only occur during typechecking, they have no layout.
// We should not see them here and we could not check them anyway.
ty::Error(_)
@ -808,11 +814,11 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
max_value
})
} else {
return Ok(());
return interp_ok(());
}
} else if scalar_layout.is_always_valid(self.ecx) {
// Easy. (This is reachable if `enforce_number_validity` is set.)
return Ok(());
return interp_ok(());
} else {
// Conservatively, we reject, because the pointer *could* have a bad
// value.
@ -825,7 +831,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
};
// Now compare.
if valid_range.contains(bits) {
Ok(())
interp_ok(())
} else {
throw_validation_failure!(self.path, OutOfRange {
value: format!("{bits}"),
@ -884,7 +890,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
}
fn reset_padding(&mut self, place: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
let Some(data_bytes) = self.data_bytes.as_mut() else { return Ok(()) };
let Some(data_bytes) = self.data_bytes.as_mut() else { return interp_ok(()) };
// Our value must be in memory, otherwise we would not have set up `data_bytes`.
let mplace = self.ecx.force_allocation(place)?;
// Determine starting offset and size.
@ -896,14 +902,14 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
// If there is no padding at all, we can skip the rest: check for
// a single data range covering the entire value.
if data_bytes.0 == &[(start_offset, size)] {
return Ok(());
return interp_ok(());
}
// Get a handle for the allocation. Do this only once, to avoid looking up the same
// allocation over and over again. (Though to be fair, iterating the value already does
// exactly that.)
let Some(mut alloc) = self.ecx.get_ptr_alloc_mut(mplace.ptr(), size)? else {
// A ZST, no padding to clear.
return Ok(());
return interp_ok(());
};
// Add a "finalizer" data range at the end, so that the iteration below finds all gaps
// between ranges.
@ -930,7 +936,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
padding_cleared_until = offset + size;
}
assert!(padding_cleared_until == start_offset + size);
Ok(())
interp_ok(())
}
/// Computes the data range of this union type:
@ -1070,7 +1076,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
val: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, VariantIdx> {
self.with_elem(PathElem::EnumTag, move |this| {
Ok(try_validation!(
interp_ok(try_validation!(
this.ecx.read_discriminant(val),
this.path,
Ub(InvalidTag(val)) => InvalidEnumTag {
@ -1134,7 +1140,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
data_bytes.add_range(base_offset + offset, size);
}
}
Ok(())
interp_ok(())
}
#[inline]
@ -1144,7 +1150,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
val: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
self.check_safe_pointer(val, PointerKind::Box)?;
Ok(())
interp_ok(())
}
#[inline]
@ -1157,7 +1163,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
// We assume that the Scalar validity range does not restrict these values
// any further than `try_visit_primitive` does!
if self.try_visit_primitive(val)? {
return Ok(());
return interp_ok(());
}
// Special check preventing `UnsafeCell` in the inner part of constants
@ -1204,7 +1210,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
// If the size is 0, there is nothing to check.
// (`size` can only be 0 if `len` is 0, and empty arrays are always valid.)
if size == Size::ZERO {
return Ok(());
return interp_ok(());
}
// Now that we definitely have a non-ZST array, we know it lives in memory -- except it may
// be an uninitialized local variable, those are also "immediate".
@ -1224,14 +1230,12 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
// No need for an alignment check here, this is not an actual memory access.
let alloc = self.ecx.get_ptr_alloc(mplace.ptr(), size)?.expect("we already excluded size 0");
match alloc.get_bytes_strip_provenance() {
// In the happy case, we needn't check anything else.
Ok(_) => {}
alloc.get_bytes_strip_provenance().map_err(|err| {
// Some error happened, try to provide a more detailed description.
Err(err) => {
// For some errors we might be able to provide extra information.
// (This custom logic does not fit the `try_validation!` macro.)
match err.kind() {
let (kind, backtrace) = err.into_parts();
match kind {
Ub(InvalidUninitBytes(Some((_alloc_id, access)))) | Unsup(ReadPointerAsInt(Some((_alloc_id, access)))) => {
// Some byte was uninitialized, determine which
// element that byte belongs to so we can
@ -1242,18 +1246,17 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
.unwrap();
self.path.push(PathElem::ArrayElem(i));
if matches!(err.kind(), Ub(InvalidUninitBytes(_))) {
throw_validation_failure!(self.path, Uninit { expected })
if matches!(kind, Ub(InvalidUninitBytes(_))) {
err_validation_failure!(self.path, Uninit { expected }).into()
} else {
throw_validation_failure!(self.path, PointerAsInt { expected })
err_validation_failure!(self.path, PointerAsInt { expected }).into()
}
}
// Propagate upwards (that will also check for unexpected errors).
_ => return Err(err),
}
}
_ => return InterpErrorInfo::from_parts(kind, backtrace),
}
})?;
// Don't forget that these are all non-pointer types, and thus do not preserve
// provenance.
@ -1282,7 +1285,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
// It's not great to catch errors here, since we can't give a very good path,
// but it's better than ICEing.
Ub(InvalidVTableTrait { vtable_dyn_type, expected_dyn_type }) => {
InvalidMetaWrongTrait { vtable_dyn_type, expected_dyn_type: *expected_dyn_type }
InvalidMetaWrongTrait { vtable_dyn_type, expected_dyn_type }
},
);
}
@ -1331,7 +1334,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
}
}
Ok(())
interp_ok(())
}
}
@ -1347,7 +1350,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
trace!("validate_operand_internal: {:?}, {:?}", *val, val.layout.ty);
// Run the visitor.
match self.run_for_validation(|ecx| {
self.run_for_validation(|ecx| {
let reset_padding = reset_provenance_and_padding && {
// Check if `val` is actually stored in memory. If not, padding is not even
// represented and we need not reset it.
@ -1363,29 +1366,22 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
};
v.visit_value(val)?;
v.reset_padding(val)?;
InterpResult::Ok(())
}) {
Ok(()) => Ok(()),
// Pass through validation failures and "invalid program" issues.
Err(err)
if matches!(
interp_ok(())
})
.map_err(|err| {
if !matches!(
err.kind(),
err_ub!(ValidationError { .. })
| InterpError::InvalidProgram(_)
| InterpError::Unsupported(UnsupportedOpInfo::ExternTypeField)
) =>
{
Err(err)
}
// Complain about any other kind of error -- those are bad because we'd like to
// report them in a way that shows *where* in the value the issue lies.
Err(err) => {
) {
bug!(
"Unexpected error during validation: {}",
format_interp_error(self.tcx.dcx(), err)
);
}
}
err
})
}
/// This function checks the data at `op` to be const-valid.
@ -1456,6 +1452,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/*reset_provenance_and_padding*/ false,
)?;
}
Ok(())
interp_ok(())
}
}

View File

@ -10,7 +10,7 @@ use rustc_middle::ty::{self, Ty};
use rustc_target::abi::{FieldIdx, FieldsShape, VariantIdx, Variants};
use tracing::trace;
use super::{InterpCx, MPlaceTy, Machine, Projectable, throw_inval};
use super::{InterpCx, MPlaceTy, Machine, Projectable, interp_ok, throw_inval};
/// How to traverse a value and what to do when we are at the leaves.
pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
@ -46,14 +46,14 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
/// Visits the given value as a union. No automatic recursion can happen here.
#[inline(always)]
fn visit_union(&mut self, _v: &Self::V, _fields: NonZero<usize>) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
/// The type of `v` will be a raw pointer to `T`, but this is a field of `Box<T>` and the
/// pointee type is the actual `T`. `box_ty` provides the full type of the `Box` itself.
#[inline(always)]
fn visit_box(&mut self, _box_ty: Ty<'tcx>, _v: &Self::V) -> InterpResult<'tcx> {
Ok(())
interp_ok(())
}
/// Called each time we recurse down to a field of a "product-like" aggregate
@ -165,7 +165,7 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
self.visit_field(v, 1, &alloc)?;
// We visited all parts of this one.
return Ok(());
return interp_ok(());
}
// Non-normalized types should never show up here.
@ -222,6 +222,6 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
Variants::Single { .. } => {}
}
Ok(())
interp_ok(())
}
}

View File

@ -75,7 +75,8 @@ fn check_validity_requirement_strict<'tcx>(
/*recursive*/ false,
/*reset_provenance_and_padding*/ false,
)
.is_ok())
.discard_err()
.is_some())
}
/// Implements the 'lax' (default) version of the [`check_validity_requirement`] checks; see that

View File

@ -61,6 +61,8 @@
#![feature(trait_upcasting)]
#![feature(trusted_len)]
#![feature(try_blocks)]
#![feature(try_trait_v2)]
#![feature(try_trait_v2_yeet)]
#![feature(type_alias_impl_trait)]
#![feature(yeet_expr)]
#![warn(unreachable_pub)]

View File

@ -148,7 +148,7 @@ impl<'tcx> ConstValue<'tcx> {
/* read_provenance */ true,
)
.ok()?;
let ptr = ptr.to_pointer(&tcx).ok()?;
let ptr = ptr.to_pointer(&tcx).discard_err()?;
let len = a
.read_scalar(
&tcx,
@ -156,7 +156,7 @@ impl<'tcx> ConstValue<'tcx> {
/* read_provenance */ false,
)
.ok()?;
let len = len.to_target_usize(&tcx).ok()?;
let len = len.to_target_usize(&tcx).discard_err()?;
if len == 0 {
return Some(&[]);
}

View File

@ -20,7 +20,7 @@ use rustc_target::abi::{Align, HasDataLayout, Size};
use super::{
AllocId, BadBytesAccess, CtfeProvenance, InterpError, InterpResult, Pointer, PointerArithmetic,
Provenance, ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo,
UnsupportedOpInfo, read_target_uint, write_target_uint,
UnsupportedOpInfo, interp_ok, read_target_uint, write_target_uint,
};
use crate::ty;
@ -318,8 +318,9 @@ impl<Prov: Provenance, Bytes: AllocBytes> Allocation<Prov, (), Bytes> {
pub fn try_uninit<'tcx>(size: Size, align: Align) -> InterpResult<'tcx, Self> {
Self::uninit_inner(size, align, || {
ty::tls::with(|tcx| tcx.dcx().delayed_bug("exhausted memory during interpretation"));
InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted).into()
InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
})
.into()
}
/// Try to create an Allocation of `size` bytes, panics if there is not enough memory
@ -355,12 +356,12 @@ impl<Prov: Provenance, Bytes: AllocBytes> Allocation<Prov, (), Bytes> {
impl Allocation {
/// Adjust allocation from the ones in `tcx` to a custom Machine instance
/// with a different `Provenance` and `Byte` type.
pub fn adjust_from_tcx<Prov: Provenance, Bytes: AllocBytes, Err>(
pub fn adjust_from_tcx<'tcx, Prov: Provenance, Bytes: AllocBytes>(
&self,
cx: &impl HasDataLayout,
mut alloc_bytes: impl FnMut(&[u8], Align) -> Result<Bytes, Err>,
mut adjust_ptr: impl FnMut(Pointer<CtfeProvenance>) -> Result<Pointer<Prov>, Err>,
) -> Result<Allocation<Prov, (), Bytes>, Err> {
mut alloc_bytes: impl FnMut(&[u8], Align) -> InterpResult<'tcx, Bytes>,
mut adjust_ptr: impl FnMut(Pointer<CtfeProvenance>) -> InterpResult<'tcx, Pointer<Prov>>,
) -> InterpResult<'tcx, Allocation<Prov, (), Bytes>> {
// Copy the data.
let mut bytes = alloc_bytes(&*self.bytes, self.align)?;
// Adjust provenance of pointers stored in this allocation.
@ -377,7 +378,7 @@ impl Allocation {
new_provenance.push((offset, ptr_prov));
}
// Create allocation.
Ok(Allocation {
interp_ok(Allocation {
bytes,
provenance: ProvenanceMap::from_presorted_ptrs(new_provenance),
init_mask: self.init_mask.clone(),

View File

@ -1,7 +1,7 @@
use std::any::Any;
use std::backtrace::Backtrace;
use std::borrow::Cow;
use std::fmt;
use std::{convert, fmt, mem, ops};
use either::Either;
use rustc_ast_ir::Mutability;
@ -104,6 +104,10 @@ rustc_data_structures::static_assert_size!(InterpErrorInfo<'_>, 8);
/// These should always be constructed by calling `.into()` on
/// an `InterpError`. In `rustc_mir::interpret`, we have `throw_err_*`
/// macros for this.
///
/// Interpreter errors must *not* be silently discarded (that will lead to a panic). Instead,
/// explicitly call `discard_err` if this is really the right thing to do. Note that if
/// this happens during const-eval or in Miri, it could lead to a UB error being lost!
#[derive(Debug)]
pub struct InterpErrorInfo<'tcx>(Box<InterpErrorInfoInner<'tcx>>);
@ -156,8 +160,11 @@ impl<'tcx> InterpErrorInfo<'tcx> {
}
pub fn into_kind(self) -> InterpError<'tcx> {
let InterpErrorInfo(box InterpErrorInfoInner { kind, .. }) = self;
kind
self.0.kind
}
pub fn from_parts(kind: InterpError<'tcx>, backtrace: InterpErrorBacktrace) -> Self {
Self(Box::new(InterpErrorInfoInner { kind, backtrace }))
}
#[inline]
@ -599,8 +606,6 @@ pub enum InterpError<'tcx> {
MachineStop(Box<dyn MachineStopType>),
}
pub type InterpResult<'tcx, T = ()> = Result<T, InterpErrorInfo<'tcx>>;
impl InterpError<'_> {
/// Some errors do string formatting even if the error is never printed.
/// To avoid performance issues, there are places where we want to be sure to never raise these formatting errors,
@ -728,3 +733,182 @@ macro_rules! throw_exhaust {
macro_rules! throw_machine_stop {
($($tt:tt)*) => { do yeet $crate::err_machine_stop!($($tt)*) };
}
/// Guard type that panics on drop.
#[derive(Debug)]
struct Guard;
impl Drop for Guard {
fn drop(&mut self) {
// We silence the guard if we are already panicking, to avoid double-panics.
if !std::thread::panicking() {
panic!(
"an interpreter error got improperly discarded; use `discard_err()` if this is intentional"
);
}
}
}
/// The result type used by the interpreter. This is a newtype around `Result`
/// to block access to operations like `ok()` that discard UB errors.
///
/// We also make things panic if this type is ever implicitly dropped.
#[derive(Debug)]
pub struct InterpResult_<'tcx, T> {
res: Result<T, InterpErrorInfo<'tcx>>,
guard: Guard,
}
// Type alias to be able to set a default type argument.
pub type InterpResult<'tcx, T = ()> = InterpResult_<'tcx, T>;
impl<'tcx, T> ops::Try for InterpResult_<'tcx, T> {
type Output = T;
type Residual = InterpResult_<'tcx, convert::Infallible>;
#[inline]
fn from_output(output: Self::Output) -> Self {
InterpResult_::new(Ok(output))
}
#[inline]
fn branch(self) -> ops::ControlFlow<Self::Residual, Self::Output> {
match self.disarm() {
Ok(v) => ops::ControlFlow::Continue(v),
Err(e) => ops::ControlFlow::Break(InterpResult_::new(Err(e))),
}
}
}
impl<'tcx, T> ops::FromResidual for InterpResult_<'tcx, T> {
#[inline]
#[track_caller]
fn from_residual(residual: InterpResult_<'tcx, convert::Infallible>) -> Self {
match residual.disarm() {
Err(e) => Self::new(Err(e)),
}
}
}
// Allow `yeet`ing `InterpError` in functions returning `InterpResult_`.
impl<'tcx, T> ops::FromResidual<ops::Yeet<InterpError<'tcx>>> for InterpResult_<'tcx, T> {
#[inline]
fn from_residual(ops::Yeet(e): ops::Yeet<InterpError<'tcx>>) -> Self {
Self::new(Err(e.into()))
}
}
// Allow `?` on `Result<_, InterpError>` in functions returning `InterpResult_`.
// This is useful e.g. for `option.ok_or_else(|| err_ub!(...))`.
impl<'tcx, T, E: Into<InterpErrorInfo<'tcx>>> ops::FromResidual<Result<convert::Infallible, E>>
for InterpResult_<'tcx, T>
{
#[inline]
fn from_residual(residual: Result<convert::Infallible, E>) -> Self {
match residual {
Err(e) => Self::new(Err(e.into())),
}
}
}
impl<'tcx, T, E: Into<InterpErrorInfo<'tcx>>> From<Result<T, E>> for InterpResult<'tcx, T> {
#[inline]
fn from(value: Result<T, E>) -> Self {
Self::new(value.map_err(|e| e.into()))
}
}
impl<'tcx, T, V: FromIterator<T>> FromIterator<InterpResult<'tcx, T>> for InterpResult<'tcx, V> {
fn from_iter<I: IntoIterator<Item = InterpResult<'tcx, T>>>(iter: I) -> Self {
Self::new(iter.into_iter().map(|x| x.disarm()).collect())
}
}
impl<'tcx, T> InterpResult_<'tcx, T> {
#[inline(always)]
fn new(res: Result<T, InterpErrorInfo<'tcx>>) -> Self {
Self { res, guard: Guard }
}
#[inline(always)]
fn disarm(self) -> Result<T, InterpErrorInfo<'tcx>> {
mem::forget(self.guard);
self.res
}
/// Discard the error information in this result. Only use this if ignoring Undefined Behavior is okay!
#[inline]
pub fn discard_err(self) -> Option<T> {
self.disarm().ok()
}
/// Look at the `Result` wrapped inside of this.
/// Must only be used to report the error!
#[inline]
pub fn report_err(self) -> Result<T, InterpErrorInfo<'tcx>> {
self.disarm()
}
#[inline]
pub fn map<U>(self, f: impl FnOnce(T) -> U) -> InterpResult<'tcx, U> {
InterpResult_::new(self.disarm().map(f))
}
#[inline]
pub fn map_err(
self,
f: impl FnOnce(InterpErrorInfo<'tcx>) -> InterpErrorInfo<'tcx>,
) -> InterpResult<'tcx, T> {
InterpResult_::new(self.disarm().map_err(f))
}
#[inline]
pub fn inspect_err(self, f: impl FnOnce(&InterpErrorInfo<'tcx>)) -> InterpResult<'tcx, T> {
InterpResult_::new(self.disarm().inspect_err(f))
}
#[inline]
#[track_caller]
pub fn unwrap(self) -> T {
self.disarm().unwrap()
}
#[inline]
#[track_caller]
pub fn unwrap_or_else(self, f: impl FnOnce(InterpErrorInfo<'tcx>) -> T) -> T {
self.disarm().unwrap_or_else(f)
}
#[inline]
#[track_caller]
pub fn expect(self, msg: &str) -> T {
self.disarm().expect(msg)
}
#[inline]
pub fn and_then<U>(self, f: impl FnOnce(T) -> InterpResult<'tcx, U>) -> InterpResult<'tcx, U> {
InterpResult_::new(self.disarm().and_then(|t| f(t).disarm()))
}
/// Returns success if both `self` and `other` succeed, while ensuring we don't
/// accidentally drop an error.
///
/// If both are an error, `self` will be reported.
#[inline]
pub fn and<U>(self, other: InterpResult<'tcx, U>) -> InterpResult<'tcx, (T, U)> {
match self.disarm() {
Ok(t) => interp_ok((t, other?)),
Err(e) => {
// Discard the other error.
drop(other.disarm());
// Return `self`.
InterpResult_::new(Err(e))
}
}
}
}
#[inline(always)]
pub fn interp_ok<'tcx, T>(x: T) -> InterpResult<'tcx, T> {
InterpResult_::new(Ok(x))
}

View File

@ -39,7 +39,7 @@ pub use self::error::{
InterpError, InterpErrorInfo, InterpResult, InvalidMetaKind, InvalidProgramInfo,
MachineStopType, Misalignment, PointerKind, ReportedErrorInfo, ResourceExhaustionInfo,
ScalarSizeMismatch, UndefinedBehaviorInfo, UnsupportedOpInfo, ValidationErrorInfo,
ValidationErrorKind,
ValidationErrorKind, interp_ok,
};
pub use self::pointer::{CtfeProvenance, Pointer, PointerArithmetic, Provenance};
pub use self::value::Scalar;

View File

@ -8,7 +8,7 @@ use rustc_target::abi::{HasDataLayout, Size};
use super::{
AllocId, CtfeProvenance, InterpResult, Pointer, PointerArithmetic, Provenance,
ScalarSizeMismatch,
ScalarSizeMismatch, interp_ok,
};
use crate::ty::ScalarInt;
@ -273,10 +273,10 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
.to_bits_or_ptr_internal(cx.pointer_size())
.map_err(|s| err_ub!(ScalarSizeMismatch(s)))?
{
Right(ptr) => Ok(ptr.into()),
Right(ptr) => interp_ok(ptr.into()),
Left(bits) => {
let addr = u64::try_from(bits).unwrap();
Ok(Pointer::from_addr_invalid(addr))
interp_ok(Pointer::from_addr_invalid(addr))
}
}
}
@ -311,12 +311,12 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
if matches!(self, Scalar::Ptr(..)) {
*self = self.to_scalar_int()?.into();
}
Ok(())
interp_ok(())
}
#[inline(always)]
pub fn to_scalar_int(self) -> InterpResult<'tcx, ScalarInt> {
self.try_to_scalar_int().map_err(|_| err_unsup!(ReadPointerAsInt(None)).into())
self.try_to_scalar_int().map_err(|_| err_unsup!(ReadPointerAsInt(None))).into()
}
#[inline(always)]
@ -330,20 +330,22 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
#[inline]
pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
self.to_scalar_int()?.try_to_bits(target_size).map_err(|size| {
self.to_scalar_int()?
.try_to_bits(target_size)
.map_err(|size| {
err_ub!(ScalarSizeMismatch(ScalarSizeMismatch {
target_size: target_size.bytes(),
data_size: size.bytes(),
}))
.into()
})
.into()
}
pub fn to_bool(self) -> InterpResult<'tcx, bool> {
let val = self.to_u8()?;
match val {
0 => Ok(false),
1 => Ok(true),
0 => interp_ok(false),
1 => interp_ok(true),
_ => throw_ub!(InvalidBool(val)),
}
}
@ -351,7 +353,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
pub fn to_char(self) -> InterpResult<'tcx, char> {
let val = self.to_u32()?;
match std::char::from_u32(val) {
Some(c) => Ok(c),
Some(c) => interp_ok(c),
None => throw_ub!(InvalidChar(val)),
}
}
@ -392,7 +394,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
/// Fails if the scalar is a pointer.
pub fn to_target_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
let b = self.to_uint(cx.data_layout().pointer_size)?;
Ok(u64::try_from(b).unwrap())
interp_ok(u64::try_from(b).unwrap())
}
/// Converts the scalar to produce a signed integer of the given size.
@ -400,7 +402,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
#[inline]
pub fn to_int(self, size: Size) -> InterpResult<'tcx, i128> {
let b = self.to_bits(size)?;
Ok(size.sign_extend(b))
interp_ok(size.sign_extend(b))
}
/// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer.
@ -432,13 +434,13 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
/// Fails if the scalar is a pointer.
pub fn to_target_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
let b = self.to_int(cx.data_layout().pointer_size)?;
Ok(i64::try_from(b).unwrap())
interp_ok(i64::try_from(b).unwrap())
}
#[inline]
pub fn to_float<F: Float>(self) -> InterpResult<'tcx, F> {
// Going through `to_bits` to check size and truncation.
Ok(F::from_bits(self.to_bits(Size::from_bits(F::BITS))?))
interp_ok(F::from_bits(self.to_bits(Size::from_bits(F::BITS))?))
}
#[inline]

View File

@ -519,7 +519,7 @@ impl<'tcx> Const<'tcx> {
}
pub fn try_to_bool(self) -> Option<bool> {
self.try_to_scalar()?.to_bool().ok()
self.try_to_valtree()?.try_to_scalar_int()?.try_to_bool().ok()
}
#[inline]

View File

@ -3,7 +3,9 @@
//! Currently, this pass only propagates scalar values.
use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str};
use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable};
use rustc_const_eval::interpret::{
ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable, interp_ok,
};
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def::DefKind;
use rustc_middle::bug;
@ -236,6 +238,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
FlatSet::Elem(op) => self
.ecx
.int_to_int_or_float(&op, layout)
.discard_err()
.map_or(FlatSet::Top, |result| self.wrap_immediate(*result)),
FlatSet::Bottom => FlatSet::Bottom,
FlatSet::Top => FlatSet::Top,
@ -249,6 +252,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
FlatSet::Elem(op) => self
.ecx
.float_to_float_or_int(&op, layout)
.discard_err()
.map_or(FlatSet::Top, |result| self.wrap_immediate(*result)),
FlatSet::Bottom => FlatSet::Bottom,
FlatSet::Top => FlatSet::Top,
@ -271,6 +275,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
FlatSet::Elem(value) => self
.ecx
.unary_op(*op, &value)
.discard_err()
.map_or(FlatSet::Top, |val| self.wrap_immediate(*val)),
FlatSet::Bottom => FlatSet::Bottom,
FlatSet::Top => FlatSet::Top,
@ -364,8 +369,8 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
}
}
Operand::Constant(box constant) => {
if let Ok(constant) =
self.ecx.eval_mir_constant(&constant.const_, constant.span, None)
if let Some(constant) =
self.ecx.eval_mir_constant(&constant.const_, constant.span, None).discard_err()
{
self.assign_constant(state, place, constant, &[]);
}
@ -387,7 +392,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
for &(mut proj_elem) in projection {
if let PlaceElem::Index(index) = proj_elem {
if let FlatSet::Elem(index) = state.get(index.into(), &self.map)
&& let Ok(offset) = index.to_target_usize(&self.tcx)
&& let Some(offset) = index.to_target_usize(&self.tcx).discard_err()
&& let Some(min_length) = offset.checked_add(1)
{
proj_elem = PlaceElem::ConstantIndex { offset, min_length, from_end: false };
@ -395,7 +400,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
return;
}
}
operand = if let Ok(operand) = self.ecx.project(&operand, proj_elem) {
operand = if let Some(operand) = self.ecx.project(&operand, proj_elem).discard_err() {
operand
} else {
return;
@ -406,24 +411,24 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
place,
operand,
&mut |elem, op| match elem {
TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(),
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(),
TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).discard_err(),
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).discard_err(),
TrackElem::Discriminant => {
let variant = self.ecx.read_discriminant(op).ok()?;
let variant = self.ecx.read_discriminant(op).discard_err()?;
let discr_value =
self.ecx.discriminant_for_variant(op.layout.ty, variant).ok()?;
self.ecx.discriminant_for_variant(op.layout.ty, variant).discard_err()?;
Some(discr_value.into())
}
TrackElem::DerefLen => {
let op: OpTy<'_> = self.ecx.deref_pointer(op).ok()?.into();
let len_usize = op.len(&self.ecx).ok()?;
let op: OpTy<'_> = self.ecx.deref_pointer(op).discard_err()?.into();
let len_usize = op.len(&self.ecx).discard_err()?;
let layout =
self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).unwrap();
Some(ImmTy::from_uint(len_usize, layout).into())
}
},
&mut |place, op| {
if let Ok(imm) = self.ecx.read_immediate_raw(op)
if let Some(imm) = self.ecx.read_immediate_raw(op).discard_err()
&& let Some(imm) = imm.right()
{
let elem = self.wrap_immediate(*imm);
@ -447,11 +452,11 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
(FlatSet::Bottom, _) | (_, FlatSet::Bottom) => (FlatSet::Bottom, FlatSet::Bottom),
// Both sides are known, do the actual computation.
(FlatSet::Elem(left), FlatSet::Elem(right)) => {
match self.ecx.binary_op(op, &left, &right) {
match self.ecx.binary_op(op, &left, &right).discard_err() {
// Ideally this would return an Immediate, since it's sometimes
// a pair and sometimes not. But as a hack we always return a pair
// and just make the 2nd component `Bottom` when it does not exist.
Ok(val) => {
Some(val) => {
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
let (val, overflow) = val.to_scalar_pair();
(FlatSet::Elem(val), FlatSet::Elem(overflow))
@ -470,7 +475,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
}
let arg_scalar = const_arg.to_scalar();
let Ok(arg_value) = arg_scalar.to_bits(layout.size) else {
let Some(arg_value) = arg_scalar.to_bits(layout.size).discard_err() else {
return (FlatSet::Top, FlatSet::Top);
};
@ -519,7 +524,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
}
let enum_ty_layout = self.tcx.layout_of(self.param_env.and(enum_ty)).ok()?;
let discr_value =
self.ecx.discriminant_for_variant(enum_ty_layout.ty, variant_index).ok()?;
self.ecx.discriminant_for_variant(enum_ty_layout.ty, variant_index).discard_err()?;
Some(discr_value.to_scalar())
}
@ -595,7 +600,7 @@ impl<'a, 'tcx> Collector<'a, 'tcx> {
.intern_with_temp_alloc(layout, |ecx, dest| {
try_write_constant(ecx, dest, place, ty, state, map)
})
.ok()?;
.discard_err()?;
return Some(Const::Val(ConstValue::Indirect { alloc_id, offset: Size::ZERO }, ty));
}
@ -632,7 +637,7 @@ fn try_write_constant<'tcx>(
// Fast path for ZSTs.
if layout.is_zst() {
return Ok(());
return interp_ok(());
}
// Fast path for scalars.
@ -717,7 +722,7 @@ fn try_write_constant<'tcx>(
ty::Error(_) | ty::Infer(..) | ty::CoroutineWitness(..) => bug!(),
}
Ok(())
interp_ok(())
}
impl<'mir, 'tcx>
@ -830,7 +835,7 @@ impl<'tcx> MutVisitor<'tcx> for Patch<'tcx> {
if let PlaceElem::Index(local) = elem {
let offset = self.before_effect.get(&(location, local.into()))?;
let offset = offset.try_to_scalar()?;
let offset = offset.to_target_usize(&self.tcx).ok()?;
let offset = offset.to_target_usize(&self.tcx).discard_err()?;
let min_length = offset.checked_add(1)?;
Some(PlaceElem::ConstantIndex { offset, min_length, from_end: false })
} else {

View File

@ -393,7 +393,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
Repeat(..) => return None,
Constant { ref value, disambiguator: _ } => {
self.ecx.eval_mir_constant(value, DUMMY_SP, None).ok()?
self.ecx.eval_mir_constant(value, DUMMY_SP, None).discard_err()?
}
Aggregate(kind, variant, ref fields) => {
let fields = fields
@ -419,29 +419,32 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
ImmTy::uninit(ty).into()
} else if matches!(kind, AggregateTy::RawPtr { .. }) {
// Pointers don't have fields, so don't `project_field` them.
let data = self.ecx.read_pointer(fields[0]).ok()?;
let data = self.ecx.read_pointer(fields[0]).discard_err()?;
let meta = if fields[1].layout.is_zst() {
MemPlaceMeta::None
} else {
MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).ok()?)
MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).discard_err()?)
};
let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
ImmTy::from_immediate(ptr_imm, ty).into()
} else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
let dest = self.ecx.allocate(ty, MemoryKind::Stack).ok()?;
let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?;
let variant_dest = if let Some(variant) = variant {
self.ecx.project_downcast(&dest, variant).ok()?
self.ecx.project_downcast(&dest, variant).discard_err()?
} else {
dest.clone()
};
for (field_index, op) in fields.into_iter().enumerate() {
let field_dest = self.ecx.project_field(&variant_dest, field_index).ok()?;
self.ecx.copy_op(op, &field_dest).ok()?;
let field_dest =
self.ecx.project_field(&variant_dest, field_index).discard_err()?;
self.ecx.copy_op(op, &field_dest).discard_err()?;
}
self.ecx.write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest).ok()?;
self.ecx
.write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest)
.discard_err()?;
self.ecx
.alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id())
.ok()?;
.discard_err()?;
dest.into()
} else {
return None;
@ -467,7 +470,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
// This should have been replaced by a `ConstantIndex` earlier.
ProjectionElem::Index(_) => return None,
};
self.ecx.project(value, elem).ok()?
self.ecx.project(value, elem).discard_err()?
}
Address { place, kind, provenance: _ } => {
if !place.is_indirect_first_projection() {
@ -475,14 +478,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
}
let local = self.locals[place.local]?;
let pointer = self.evaluated[local].as_ref()?;
let mut mplace = self.ecx.deref_pointer(pointer).ok()?;
let mut mplace = self.ecx.deref_pointer(pointer).discard_err()?;
for proj in place.projection.iter().skip(1) {
// We have no call stack to associate a local with a value, so we cannot
// interpret indexing.
if matches!(proj, ProjectionElem::Index(_)) {
return None;
}
mplace = self.ecx.project(&mplace, proj).ok()?;
mplace = self.ecx.project(&mplace, proj).discard_err()?;
}
let pointer = mplace.to_ref(&self.ecx);
let ty = match kind {
@ -500,15 +503,15 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
Discriminant(base) => {
let base = self.evaluated[base].as_ref()?;
let variant = self.ecx.read_discriminant(base).ok()?;
let variant = self.ecx.read_discriminant(base).discard_err()?;
let discr_value =
self.ecx.discriminant_for_variant(base.layout.ty, variant).ok()?;
self.ecx.discriminant_for_variant(base.layout.ty, variant).discard_err()?;
discr_value.into()
}
Len(slice) => {
let slice = self.evaluated[slice].as_ref()?;
let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
let len = slice.len(&self.ecx).ok()?;
let len = slice.len(&self.ecx).discard_err()?;
let imm = ImmTy::from_uint(len, usize_layout);
imm.into()
}
@ -535,31 +538,31 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
}
UnaryOp(un_op, operand) => {
let operand = self.evaluated[operand].as_ref()?;
let operand = self.ecx.read_immediate(operand).ok()?;
let val = self.ecx.unary_op(un_op, &operand).ok()?;
let operand = self.ecx.read_immediate(operand).discard_err()?;
let val = self.ecx.unary_op(un_op, &operand).discard_err()?;
val.into()
}
BinaryOp(bin_op, lhs, rhs) => {
let lhs = self.evaluated[lhs].as_ref()?;
let lhs = self.ecx.read_immediate(lhs).ok()?;
let lhs = self.ecx.read_immediate(lhs).discard_err()?;
let rhs = self.evaluated[rhs].as_ref()?;
let rhs = self.ecx.read_immediate(rhs).ok()?;
let val = self.ecx.binary_op(bin_op, &lhs, &rhs).ok()?;
let rhs = self.ecx.read_immediate(rhs).discard_err()?;
let val = self.ecx.binary_op(bin_op, &lhs, &rhs).discard_err()?;
val.into()
}
Cast { kind, value, from: _, to } => match kind {
CastKind::IntToInt | CastKind::IntToFloat => {
let value = self.evaluated[value].as_ref()?;
let value = self.ecx.read_immediate(value).ok()?;
let value = self.ecx.read_immediate(value).discard_err()?;
let to = self.ecx.layout_of(to).ok()?;
let res = self.ecx.int_to_int_or_float(&value, to).ok()?;
let res = self.ecx.int_to_int_or_float(&value, to).discard_err()?;
res.into()
}
CastKind::FloatToFloat | CastKind::FloatToInt => {
let value = self.evaluated[value].as_ref()?;
let value = self.ecx.read_immediate(value).ok()?;
let value = self.ecx.read_immediate(value).discard_err()?;
let to = self.ecx.layout_of(to).ok()?;
let res = self.ecx.float_to_float_or_int(&value, to).ok()?;
let res = self.ecx.float_to_float_or_int(&value, to).discard_err()?;
res.into()
}
CastKind::Transmute => {
@ -574,28 +577,28 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
_ => return None,
}
}
value.offset(Size::ZERO, to, &self.ecx).ok()?
value.offset(Size::ZERO, to, &self.ecx).discard_err()?
}
CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _) => {
let src = self.evaluated[value].as_ref()?;
let to = self.ecx.layout_of(to).ok()?;
let dest = self.ecx.allocate(to, MemoryKind::Stack).ok()?;
self.ecx.unsize_into(src, to, &dest.clone().into()).ok()?;
let dest = self.ecx.allocate(to, MemoryKind::Stack).discard_err()?;
self.ecx.unsize_into(src, to, &dest.clone().into()).discard_err()?;
self.ecx
.alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id())
.ok()?;
.discard_err()?;
dest.into()
}
CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
let src = self.evaluated[value].as_ref()?;
let src = self.ecx.read_immediate(src).ok()?;
let src = self.ecx.read_immediate(src).discard_err()?;
let to = self.ecx.layout_of(to).ok()?;
let ret = self.ecx.ptr_to_ptr(&src, to).ok()?;
let ret = self.ecx.ptr_to_ptr(&src, to).discard_err()?;
ret.into()
}
CastKind::PointerCoercion(ty::adjustment::PointerCoercion::UnsafeFnPointer, _) => {
let src = self.evaluated[value].as_ref()?;
let src = self.ecx.read_immediate(src).ok()?;
let src = self.ecx.read_immediate(src).discard_err()?;
let to = self.ecx.layout_of(to).ok()?;
ImmTy::from_immediate(*src, to).into()
}
@ -708,7 +711,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
&& let Some(idx) = self.locals[idx_local]
{
if let Some(offset) = self.evaluated[idx].as_ref()
&& let Ok(offset) = self.ecx.read_target_usize(offset)
&& let Some(offset) = self.ecx.read_target_usize(offset).discard_err()
&& let Some(min_length) = offset.checked_add(1)
{
projection.to_mut()[i] =
@ -868,7 +871,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
&& let DefKind::Enum = self.tcx.def_kind(enum_did)
{
let enum_ty = self.tcx.type_of(enum_did).instantiate(self.tcx, enum_args);
let discr = self.ecx.discriminant_for_variant(enum_ty, variant).ok()?;
let discr = self.ecx.discriminant_for_variant(enum_ty, variant).discard_err()?;
return Some(self.insert_scalar(discr.to_scalar(), discr.layout.ty));
}
@ -1223,8 +1226,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
let as_bits = |value| {
let constant = self.evaluated[value].as_ref()?;
if layout.abi.is_scalar() {
let scalar = self.ecx.read_scalar(constant).ok()?;
scalar.to_bits(constant.layout.size).ok()
let scalar = self.ecx.read_scalar(constant).discard_err()?;
scalar.to_bits(constant.layout.size).discard_err()
} else {
// `constant` is a wide pointer. Do not evaluate to bits.
None
@ -1484,7 +1487,7 @@ fn op_to_prop_const<'tcx>(
// If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi
&& let Ok(scalar) = ecx.read_scalar(op)
&& let Some(scalar) = ecx.read_scalar(op).discard_err()
{
if !scalar.try_to_scalar_int().is_ok() {
// Check that we do not leak a pointer.
@ -1498,12 +1501,12 @@ fn op_to_prop_const<'tcx>(
// If this constant is already represented as an `Allocation`,
// try putting it into global memory to return it.
if let Either::Left(mplace) = op.as_mplace_or_imm() {
let (size, _align) = ecx.size_and_align_of_mplace(&mplace).ok()??;
let (size, _align) = ecx.size_and_align_of_mplace(&mplace).discard_err()??;
// Do not try interning a value that contains provenance.
// Due to https://github.com/rust-lang/rust/issues/79738, doing so could lead to bugs.
// FIXME: remove this hack once that issue is fixed.
let alloc_ref = ecx.get_ptr_alloc(mplace.ptr(), size).ok()??;
let alloc_ref = ecx.get_ptr_alloc(mplace.ptr(), size).discard_err()??;
if alloc_ref.has_provenance() {
return None;
}
@ -1511,7 +1514,7 @@ fn op_to_prop_const<'tcx>(
let pointer = mplace.ptr().into_pointer_or_addr().ok()?;
let (prov, offset) = pointer.into_parts();
let alloc_id = prov.alloc_id();
intern_const_alloc_for_constprop(ecx, alloc_id).ok()?;
intern_const_alloc_for_constprop(ecx, alloc_id).discard_err()?;
// `alloc_id` may point to a static. Codegen will choke on an `Indirect` with anything
// by `GlobalAlloc::Memory`, so do fall through to copying if needed.
@ -1526,7 +1529,8 @@ fn op_to_prop_const<'tcx>(
}
// Everything failed: create a new allocation to hold the data.
let alloc_id = ecx.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest)).ok()?;
let alloc_id =
ecx.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest)).discard_err()?;
let value = ConstValue::Indirect { alloc_id, offset: Size::ZERO };
// Check that we do not leak a pointer.

View File

@ -200,7 +200,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
debug!(?discr, ?bb);
let discr_ty = discr.ty(self.body, self.tcx).ty;
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return };
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else {
return;
};
let Some(discr) = self.map.find(discr.as_ref()) else { return };
debug!(?discr);
@ -388,24 +390,24 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
lhs,
constant,
&mut |elem, op| match elem {
TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(),
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(),
TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).discard_err(),
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).discard_err(),
TrackElem::Discriminant => {
let variant = self.ecx.read_discriminant(op).ok()?;
let variant = self.ecx.read_discriminant(op).discard_err()?;
let discr_value =
self.ecx.discriminant_for_variant(op.layout.ty, variant).ok()?;
self.ecx.discriminant_for_variant(op.layout.ty, variant).discard_err()?;
Some(discr_value.into())
}
TrackElem::DerefLen => {
let op: OpTy<'_> = self.ecx.deref_pointer(op).ok()?.into();
let len_usize = op.len(&self.ecx).ok()?;
let op: OpTy<'_> = self.ecx.deref_pointer(op).discard_err()?.into();
let len_usize = op.len(&self.ecx).discard_err()?;
let layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
Some(ImmTy::from_uint(len_usize, layout).into())
}
},
&mut |place, op| {
if let Some(conditions) = state.try_get_idx(place, &self.map)
&& let Ok(imm) = self.ecx.read_immediate_raw(op)
&& let Some(imm) = self.ecx.read_immediate_raw(op).discard_err()
&& let Some(imm) = imm.right()
&& let Immediate::Scalar(Scalar::Int(int)) = *imm
{
@ -429,8 +431,8 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
match rhs {
// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
Operand::Constant(constant) => {
let Ok(constant) =
self.ecx.eval_mir_constant(&constant.const_, constant.span, None)
let Some(constant) =
self.ecx.eval_mir_constant(&constant.const_, constant.span, None).discard_err()
else {
return;
};
@ -469,8 +471,10 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
AggregateKind::Adt(.., Some(_)) => return,
AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => {
if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant)
&& let Ok(discr_value) =
self.ecx.discriminant_for_variant(agg_ty, *variant_index)
&& let Some(discr_value) = self
.ecx
.discriminant_for_variant(agg_ty, *variant_index)
.discard_err()
{
self.process_immediate(bb, discr_target, discr_value, state);
}
@ -555,7 +559,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
// `SetDiscriminant` may be a no-op if the assigned variant is the untagged variant
// of a niche encoding. If we cannot ensure that we write to the discriminant, do
// nothing.
let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else { return };
let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else {
return;
};
let writes_discriminant = match enum_layout.variants {
Variants::Single { index } => {
assert_eq!(index, *variant_index);
@ -568,7 +574,8 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
} => *variant_index != untagged_variant,
};
if writes_discriminant {
let Ok(discr) = self.ecx.discriminant_for_variant(enum_ty, *variant_index)
let Some(discr) =
self.ecx.discriminant_for_variant(enum_ty, *variant_index).discard_err()
else {
return;
};
@ -645,7 +652,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
let Some(discr) = discr.place() else { return };
let discr_ty = discr.ty(self.body, self.tcx).ty;
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return };
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else {
return;
};
let Some(conditions) = state.try_get(discr.as_ref(), &self.map) else { return };
if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) {

View File

@ -6,7 +6,7 @@ use std::fmt::Debug;
use rustc_const_eval::const_eval::DummyMachine;
use rustc_const_eval::interpret::{
ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error,
ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, interp_ok,
};
use rustc_data_structures::fx::FxHashSet;
use rustc_hir::HirId;
@ -101,7 +101,7 @@ impl<'tcx> Value<'tcx> {
}
(PlaceElem::Index(idx), Value::Aggregate { fields, .. }) => {
let idx = prop.get_const(idx.into())?.immediate()?;
let idx = prop.ecx.read_target_usize(idx).ok()?.try_into().ok()?;
let idx = prop.ecx.read_target_usize(idx).discard_err()?.try_into().ok()?;
if idx <= FieldIdx::MAX_AS_U32 {
fields.get(FieldIdx::from_u32(idx)).unwrap_or(&Value::Uninit)
} else {
@ -231,21 +231,20 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
where
F: FnOnce(&mut Self) -> InterpResult<'tcx, T>,
{
match f(self) {
Ok(val) => Some(val),
Err(error) => {
trace!("InterpCx operation failed: {:?}", error);
f(self)
.map_err(|err| {
trace!("InterpCx operation failed: {:?}", err);
// Some errors shouldn't come up because creating them causes
// an allocation, which we should avoid. When that happens,
// dedicated error variants should be introduced instead.
assert!(
!error.kind().formatted_string(),
!err.kind().formatted_string(),
"known panics lint encountered formatting error: {}",
format_interp_error(self.ecx.tcx.dcx(), error),
format_interp_error(self.ecx.tcx.dcx(), err),
);
None
}
}
err
})
.discard_err()
}
/// Returns the value, if any, of evaluating `c`.
@ -315,7 +314,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
.ecx
.binary_op(BinOp::SubWithOverflow, &ImmTy::from_int(0, arg.layout), &arg)?
.to_scalar_pair();
Ok((arg, overflow.to_bool()?))
interp_ok((arg, overflow.to_bool()?))
})?;
if overflow {
self.report_assert_as_lint(
@ -349,7 +348,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
let left_ty = left.ty(self.local_decls(), self.tcx);
let left_size = self.ecx.layout_of(left_ty).ok()?.size;
let right_size = r.layout.size;
let r_bits = r.to_scalar().to_bits(right_size).ok();
let r_bits = r.to_scalar().to_bits(right_size).discard_err();
if r_bits.is_some_and(|b| b >= left_size.bits() as u128) {
debug!("check_binary_op: reporting assert for {:?}", location);
let panic = AssertKind::Overflow(
@ -496,7 +495,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// This can be `None` if the lhs wasn't const propagated and we just
// triggered the assert on the value of the rhs.
self.eval_operand(op)
.and_then(|op| self.ecx.read_immediate(&op).ok())
.and_then(|op| self.ecx.read_immediate(&op).discard_err())
.map_or(DbgVal::Underscore, |op| DbgVal::Val(op.to_const_int()))
};
let msg = match msg {
@ -602,7 +601,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
Len(place) => {
let len = match self.get_const(place)? {
Value::Immediate(src) => src.len(&self.ecx).ok()?,
Value::Immediate(src) => src.len(&self.ecx).discard_err()?,
Value::Aggregate { fields, .. } => fields.len() as u64,
Value::Uninit => match place.ty(self.local_decls(), self.tcx).ty.kind() {
ty::Array(_, n) => n.try_eval_target_usize(self.tcx, self.param_env)?,
@ -615,7 +614,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
Ref(..) | RawPtr(..) => return None,
NullaryOp(ref null_op, ty) => {
let op_layout = self.use_ecx(|this| this.ecx.layout_of(ty))?;
let op_layout = self.ecx.layout_of(ty).ok()?;
let val = match null_op {
NullOp::SizeOf => op_layout.size.bytes(),
NullOp::AlignOf => op_layout.align.abi.bytes(),
@ -633,16 +632,16 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
Cast(ref kind, ref value, to) => match kind {
CastKind::IntToInt | CastKind::IntToFloat => {
let value = self.eval_operand(value)?;
let value = self.ecx.read_immediate(&value).ok()?;
let value = self.ecx.read_immediate(&value).discard_err()?;
let to = self.ecx.layout_of(to).ok()?;
let res = self.ecx.int_to_int_or_float(&value, to).ok()?;
let res = self.ecx.int_to_int_or_float(&value, to).discard_err()?;
res.into()
}
CastKind::FloatToFloat | CastKind::FloatToInt => {
let value = self.eval_operand(value)?;
let value = self.ecx.read_immediate(&value).ok()?;
let value = self.ecx.read_immediate(&value).discard_err()?;
let to = self.ecx.layout_of(to).ok()?;
let res = self.ecx.float_to_float_or_int(&value, to).ok()?;
let res = self.ecx.float_to_float_or_int(&value, to).discard_err()?;
res.into()
}
CastKind::Transmute => {
@ -656,7 +655,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
_ => return None,
}
value.offset(Size::ZERO, to, &self.ecx).ok()?.into()
value.offset(Size::ZERO, to, &self.ecx).discard_err()?.into()
}
_ => return None,
},
@ -781,7 +780,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
TerminatorKind::SwitchInt { ref discr, ref targets } => {
if let Some(ref value) = self.eval_operand(discr)
&& let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value))
&& let Ok(constant) = value_const.to_bits(value_const.size())
&& let Some(constant) = value_const.to_bits(value_const.size()).discard_err()
{
// We managed to evaluate the discriminant, so we know we only need to visit
// one target.

View File

@ -870,10 +870,10 @@ pub fn mir_to_const<'tcx>(tcx: TyCtxt<'tcx>, result: mir::Const<'tcx>) -> Option
let range = alloc_range(offset + size * idx, size);
let val = alloc.read_scalar(&tcx, range, /* read_provenance */ false).ok()?;
res.push(match flt {
FloatTy::F16 => Constant::F16(f16::from_bits(val.to_u16().ok()?)),
FloatTy::F32 => Constant::F32(f32::from_bits(val.to_u32().ok()?)),
FloatTy::F64 => Constant::F64(f64::from_bits(val.to_u64().ok()?)),
FloatTy::F128 => Constant::F128(f128::from_bits(val.to_u128().ok()?)),
FloatTy::F16 => Constant::F16(f16::from_bits(val.to_u16().discard_err()?)),
FloatTy::F32 => Constant::F32(f32::from_bits(val.to_u32().discard_err()?)),
FloatTy::F64 => Constant::F64(f64::from_bits(val.to_u64().discard_err()?)),
FloatTy::F128 => Constant::F128(f128::from_bits(val.to_u128().discard_err()?)),
});
}
Some(Constant::Vec(res))
@ -903,7 +903,7 @@ fn mir_is_empty<'tcx>(tcx: TyCtxt<'tcx>, result: mir::Const<'tcx>) -> Option<boo
.read_scalar(&tcx, alloc_range(offset + ptr_size, ptr_size), false)
.ok()?
.to_target_usize(&tcx)
.ok()?;
.discard_err()?;
Some(len == 0)
} else {
None

View File

@ -200,7 +200,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
AllocKind::Dead => unreachable!(),
};
// Ensure this pointer's provenance is exposed, so that it can be used by FFI code.
return Ok(base_ptr.expose_provenance().try_into().unwrap());
return interp_ok(base_ptr.expose_provenance().try_into().unwrap());
}
// We are not in native lib mode, so we control the addresses ourselves.
if let Some((reuse_addr, clock)) =
@ -209,7 +209,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let Some(clock) = clock {
ecx.acquire_clock(&clock);
}
Ok(reuse_addr)
interp_ok(reuse_addr)
} else {
// We have to pick a fresh address.
// Leave some space to the previous allocation, to give it some chance to be less aligned.
@ -234,7 +234,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
throw_exhaust!(AddressSpaceFull);
}
Ok(base_addr)
interp_ok(base_addr)
}
}
@ -248,7 +248,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
let global_state = &mut *global_state;
match global_state.base_addr.get(&alloc_id) {
Some(&addr) => Ok(addr),
Some(&addr) => interp_ok(addr),
None => {
// First time we're looking for the absolute address of this allocation.
let base_addr =
@ -274,7 +274,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
};
global_state.int_to_ptr_map.insert(pos, (base_addr, alloc_id));
Ok(base_addr)
interp_ok(base_addr)
}
}
}
@ -287,12 +287,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let global_state = ecx.machine.alloc_addresses.get_mut();
// In strict mode, we don't need this, so we can save some cycles by not tracking it.
if global_state.provenance_mode == ProvenanceMode::Strict {
return Ok(());
return interp_ok(());
}
// Exposing a dead alloc is a no-op, because it's not possible to get a dead allocation
// via int2ptr.
if !ecx.is_alloc_live(alloc_id) {
return Ok(());
return interp_ok(());
}
trace!("Exposing allocation id {alloc_id:?}");
let global_state = ecx.machine.alloc_addresses.get_mut();
@ -300,7 +300,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if ecx.machine.borrow_tracker.is_some() {
ecx.expose_tag(alloc_id, tag)?;
}
Ok(())
interp_ok(())
}
fn ptr_from_addr_cast(&self, addr: u64) -> InterpResult<'tcx, Pointer> {
@ -337,7 +337,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// cast is fairly irrelevant. Instead we generate this as a "wildcard" pointer, such that
// *every time the pointer is used*, we do an `AllocId` lookup to find the (exposed)
// allocation it might be referencing.
Ok(Pointer::new(Some(Provenance::Wildcard), Size::from_bytes(addr)))
interp_ok(Pointer::new(Some(Provenance::Wildcard), Size::from_bytes(addr)))
}
/// Convert a relative (tcx) pointer to a Miri pointer.
@ -359,7 +359,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Size::from_bytes(base_addr),
);
// Add offset with the right kind of pointer-overflowing arithmetic.
Ok(base_ptr.wrapping_offset(offset, ecx))
interp_ok(base_ptr.wrapping_offset(offset, ecx))
}
// This returns some prepared `MiriAllocBytes`, either because `addr_from_alloc_id` reserved
@ -390,9 +390,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
assert_eq!(prepared_alloc_bytes.len(), bytes.len());
// Copy allocation contents into prepared memory.
prepared_alloc_bytes.copy_from_slice(bytes);
Ok(prepared_alloc_bytes)
interp_ok(prepared_alloc_bytes)
} else {
Ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align))
interp_ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align))
}
}

View File

@ -322,7 +322,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
match method {
BorrowTrackerMethod::StackedBorrows => {
this.tcx.tcx.dcx().warn("Stacked Borrows does not support named pointers; `miri_pointer_name` is a no-op");
Ok(())
interp_ok(())
}
BorrowTrackerMethod::TreeBorrows =>
this.tb_give_pointer_debug_name(ptr, nth_parent, name),
@ -333,7 +333,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let Some(borrow_tracker) = &this.machine.borrow_tracker else {
eprintln!("attempted to print borrow state, but no borrow state is being tracked");
return Ok(());
return interp_ok(());
};
let method = borrow_tracker.borrow().borrow_tracker_method;
match method {
@ -376,7 +376,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
borrow_tracker.borrow_mut().end_call(&frame.extra);
Ok(())
interp_ok(())
}
}
@ -489,7 +489,7 @@ impl AllocState {
alloc_id: AllocId, // diagnostics
) -> InterpResult<'tcx> {
match self {
AllocState::StackedBorrows(_sb) => Ok(()),
AllocState::StackedBorrows(_sb) => interp_ok(()),
AllocState::TreeBorrows(tb) =>
tb.borrow_mut().release_protector(machine, global, tag, alloc_id),
}

View File

@ -230,7 +230,7 @@ impl<'tcx> Stack {
}
if !item.protected() {
return Ok(());
return interp_ok(());
}
// We store tags twice, once in global.protected_tags and once in each call frame.
@ -252,10 +252,10 @@ impl<'tcx> Stack {
let allowed = matches!(cause, ItemInvalidationCause::Dealloc)
&& matches!(protector_kind, ProtectorKind::WeakProtector);
if !allowed {
return Err(dcx.protector_error(item, protector_kind).into());
return Err(dcx.protector_error(item, protector_kind)).into();
}
}
Ok(())
interp_ok(())
}
/// Test if a memory `access` using pointer tagged `tag` is granted.
@ -295,7 +295,7 @@ impl<'tcx> Stack {
self.pop_items_after(first_incompatible_idx, |item| {
Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
dcx.log_invalidation(item.tag());
Ok(())
interp_ok(())
})?;
} else {
// On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
@ -316,7 +316,7 @@ impl<'tcx> Stack {
self.disable_uniques_starting_at(first_incompatible_idx, |item| {
Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
dcx.log_invalidation(item.tag());
Ok(())
interp_ok(())
})?;
}
@ -345,7 +345,7 @@ impl<'tcx> Stack {
}
// Done.
Ok(())
interp_ok(())
}
/// Deallocate a location: Like a write access, but also there must be no
@ -367,7 +367,7 @@ impl<'tcx> Stack {
Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Dealloc)?;
}
Ok(())
interp_ok(())
}
/// Derive a new pointer from one with the given tag.
@ -418,7 +418,7 @@ impl<'tcx> Stack {
"reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown"
);
self.set_unknown_bottom(global.next_ptr_tag);
return Ok(());
return interp_ok(());
};
// SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
@ -431,7 +431,7 @@ impl<'tcx> Stack {
// Put the new item there.
trace!("reborrow: adding item {:?}", new);
self.insert(new_idx, new);
Ok(())
interp_ok(())
}
}
// # Stacked Borrows Core End
@ -491,7 +491,7 @@ impl<'tcx> Stacks {
f(stack, &mut dcx, &mut self.exposed_tags)?;
dcx_builder = dcx.unbuild();
}
Ok(())
interp_ok(())
}
}
@ -576,7 +576,7 @@ impl Stacks {
self.for_each(alloc_range(Size::ZERO, size), dcx, |stack, dcx, exposed_tags| {
stack.dealloc(tag, &state, dcx, exposed_tags)
})?;
Ok(())
interp_ok(())
}
}
@ -623,7 +623,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
drop(global); // don't hold that reference any longer than we have to
let Some((alloc_id, base_offset, orig_tag)) = loc else {
return Ok(())
return interp_ok(())
};
let (_size, _align, alloc_kind) = this.get_alloc_info(alloc_id);
@ -655,7 +655,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
// No stacked borrows on these allocations.
}
}
Ok(())
interp_ok(())
};
if size == Size::ZERO {
@ -676,12 +676,12 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
{
log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
// Still give it the new provenance, it got retagged after all.
return Ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }));
return interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }));
} else {
// This pointer doesn't come with an AllocId. :shrug:
log_creation(this, None)?;
// Provenance unchanged.
return Ok(place.ptr().provenance);
return interp_ok(place.ptr().provenance);
}
}
@ -800,12 +800,12 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
)?;
}
}
Ok(())
interp_ok(())
})?;
}
}
Ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }))
interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }))
}
fn sb_retag_place(
@ -832,7 +832,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
*shown = true;
this.emit_diagnostic(NonHaltingDiagnostic::ExternTypeReborrow);
});
return Ok(place.clone());
return interp_ok(place.clone());
}
};
@ -845,7 +845,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
// Adjust place.
// (If the closure gets called, that means the old provenance was `Some`, and hence the new
// one must also be `Some`.)
Ok(place.clone().map_provenance(|_| new_prov.unwrap()))
interp_ok(place.clone().map_provenance(|_| new_prov.unwrap()))
}
/// Retags an individual pointer, returning the retagged version.
@ -859,7 +859,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let place = this.ref_to_mplace(val)?;
let new_place = this.sb_retag_place(&place, new_perm, info)?;
Ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
interp_ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
}
}
@ -917,7 +917,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
in_field: self.in_field,
})?;
self.ecx.write_immediate(*val, place)?;
Ok(())
interp_ok(())
}
}
impl<'ecx, 'tcx> ValueVisitor<'tcx, MiriMachine<'tcx>> for RetagVisitor<'ecx, 'tcx> {
@ -935,7 +935,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let new_perm = NewPermission::from_box_ty(place.layout.ty, self.kind, self.ecx);
self.retag_ptr_inplace(place, new_perm)?;
}
Ok(())
interp_ok(())
}
fn visit_value(&mut self, place: &PlaceTy<'tcx>) -> InterpResult<'tcx> {
@ -944,7 +944,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// This optimization is crucial for ZSTs, because they can contain way more fields
// than we can ever visit.
if place.layout.is_sized() && place.layout.size < self.ecx.pointer_size() {
return Ok(());
return interp_ok(());
}
// Check the type of this value to see what to do with it (retag, or recurse).
@ -983,7 +983,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
Ok(())
interp_ok(())
}
}
}
@ -1028,7 +1028,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// No stacked borrows on these allocations.
}
}
Ok(())
interp_ok(())
}
fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> {
@ -1046,6 +1046,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
println!(" ]");
}
Ok(())
interp_ok(())
}
}

View File

@ -4,9 +4,9 @@ use std::ops::Range;
use rustc_data_structures::fx::FxHashSet;
use tracing::trace;
use crate::ProvenanceExtra;
use crate::borrow_tracker::stacked_borrows::{Item, Permission};
use crate::borrow_tracker::{AccessKind, BorTag};
use crate::{InterpResult, ProvenanceExtra, interp_ok};
/// Exactly what cache size we should use is a difficult trade-off. There will always be some
/// workload which has a `BorTag` working set which exceeds the size of the cache, and ends up
@ -380,8 +380,8 @@ impl<'tcx> Stack {
pub fn disable_uniques_starting_at(
&mut self,
disable_start: usize,
mut visitor: impl FnMut(Item) -> crate::InterpResult<'tcx>,
) -> crate::InterpResult<'tcx> {
mut visitor: impl FnMut(Item) -> InterpResult<'tcx>,
) -> InterpResult<'tcx> {
#[cfg(feature = "stack-cache")]
let unique_range = self.unique_range.clone();
#[cfg(not(feature = "stack-cache"))]
@ -420,16 +420,16 @@ impl<'tcx> Stack {
#[cfg(feature = "stack-cache-consistency-check")]
self.verify_cache_consistency();
Ok(())
interp_ok(())
}
/// Produces an iterator which iterates over `range` in reverse, and when dropped removes that
/// range of `Item`s from this `Stack`.
pub fn pop_items_after<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
pub fn pop_items_after<V: FnMut(Item) -> InterpResult<'tcx>>(
&mut self,
start: usize,
mut visitor: V,
) -> crate::InterpResult<'tcx> {
) -> InterpResult<'tcx> {
while self.borrows.len() > start {
let item = self.borrows.pop().unwrap();
visitor(item)?;
@ -474,6 +474,6 @@ impl<'tcx> Stack {
#[cfg(feature = "stack-cache-consistency-check")]
self.verify_cache_consistency();
Ok(())
interp_ok(())
}
}

View File

@ -226,7 +226,7 @@ impl<'tcx> Tree {
} else {
eprintln!("Tag {tag:?} (to be named '{name}') not found!");
}
Ok(())
interp_ok(())
}
/// Debug helper: determines if the tree contains a tag.
@ -798,6 +798,6 @@ impl<'tcx> Tree {
/* print warning message about tags not shown */ !show_unnamed,
);
}
Ok(())
interp_ok(())
}
}

View File

@ -56,7 +56,7 @@ impl<'tcx> Tree {
// handle them as much as we can.
let tag = match prov {
ProvenanceExtra::Concrete(tag) => tag,
ProvenanceExtra::Wildcard => return Ok(()),
ProvenanceExtra::Wildcard => return interp_ok(()),
};
let global = machine.borrow_tracker.as_ref().unwrap();
let span = machine.current_span();
@ -81,7 +81,7 @@ impl<'tcx> Tree {
// handle them as much as we can.
let tag = match prov {
ProvenanceExtra::Concrete(tag) => tag,
ProvenanceExtra::Wildcard => return Ok(()),
ProvenanceExtra::Wildcard => return interp_ok(()),
};
let global = machine.borrow_tracker.as_ref().unwrap();
let span = machine.current_span();
@ -213,7 +213,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
));
}
drop(global); // don't hold that reference any longer than we have to
Ok(())
interp_ok(())
};
trace!("Reborrow of size {:?}", ptr_size);
@ -235,13 +235,13 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
);
log_creation(this, None)?;
// Keep original provenance.
return Ok(place.ptr().provenance);
return interp_ok(place.ptr().provenance);
}
};
log_creation(this, Some((alloc_id, base_offset, parent_prov)))?;
let orig_tag = match parent_prov {
ProvenanceExtra::Wildcard => return Ok(place.ptr().provenance), // TODO: handle wildcard pointers
ProvenanceExtra::Wildcard => return interp_ok(place.ptr().provenance), // TODO: handle wildcard pointers
ProvenanceExtra::Concrete(tag) => tag,
};
@ -279,7 +279,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
assert_eq!(ptr_size, Size::ZERO); // we did the deref check above, size has to be 0 here
// There's not actually any bytes here where accesses could even be tracked.
// Just produce the new provenance, nothing else to do.
return Ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }));
return interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }));
}
let span = this.machine.current_span();
@ -312,7 +312,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
Ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }))
interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }))
}
fn tb_retag_place(
@ -350,7 +350,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Adjust place.
// (If the closure gets called, that means the old provenance was `Some`, and hence the new
// one must also be `Some`.)
Ok(place.clone().map_provenance(|_| new_prov.unwrap()))
interp_ok(place.clone().map_provenance(|_| new_prov.unwrap()))
}
/// Retags an individual pointer, returning the retagged version.
@ -362,7 +362,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let place = this.ref_to_mplace(val)?;
let new_place = this.tb_retag_place(&place, new_perm)?;
Ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
interp_ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
}
}
@ -384,7 +384,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let Some(new_perm) = new_perm {
this.tb_retag_reference(val, new_perm)
} else {
Ok(val.clone())
interp_ok(val.clone())
}
}
@ -421,7 +421,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let val = self.ecx.tb_retag_reference(&val, new_perm)?;
self.ecx.write_immediate(*val, place)?;
}
Ok(())
interp_ok(())
}
}
impl<'ecx, 'tcx> ValueVisitor<'tcx, MiriMachine<'tcx>> for RetagVisitor<'ecx, 'tcx> {
@ -446,7 +446,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
);
self.retag_ptr_inplace(place, new_perm)?;
}
Ok(())
interp_ok(())
}
fn visit_value(&mut self, place: &PlaceTy<'tcx>) -> InterpResult<'tcx> {
@ -455,7 +455,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// This optimization is crucial for ZSTs, because they can contain way more fields
// than we can ever visit.
if place.layout.is_sized() && place.layout.size < self.ecx.pointer_size() {
return Ok(());
return interp_ok(());
}
// Check the type of this value to see what to do with it (retag, or recurse).
@ -503,7 +503,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
}
Ok(())
interp_ok(())
}
}
}
@ -549,7 +549,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// No tree borrows on these allocations.
}
}
Ok(())
interp_ok(())
}
/// Display the tree.
@ -575,7 +575,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Some(Provenance::Concrete { tag, alloc_id }) => (tag, alloc_id),
_ => {
eprintln!("Can't give the name {name} to Wildcard pointer");
return Ok(());
return interp_ok(());
}
};
let alloc_extra = this.get_alloc_extra(alloc_id)?;
@ -605,5 +605,5 @@ fn inner_ptr_of_unique<'tcx>(
assert_eq!(nonnull.layout.fields.count(), 1, "NonNull must have exactly 1 field");
let ptr = ecx.project_field(&nonnull, 0)?;
// Finally a plain `*mut`
Ok(ptr)
interp_ok(ptr)
}

View File

@ -637,7 +637,7 @@ impl<'tcx> Tree {
{
perms.insert(idx, perm);
}
Ok(())
interp_ok(())
}
/// Deallocation requires
@ -688,7 +688,7 @@ impl<'tcx> Tree {
},
)?;
}
Ok(())
interp_ok(())
}
/// Map the per-node and per-location `LocationState::perform_access`
@ -827,7 +827,7 @@ impl<'tcx> Tree {
}
}
}
Ok(())
interp_ok(())
}
}

View File

@ -624,7 +624,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
let buffered_scalar = this.buffered_atomic_read(place, atomic, scalar, || {
this.validate_atomic_load(place, atomic)
})?;
Ok(buffered_scalar.ok_or_else(|| err_ub!(InvalidUninitBytes(None)))?)
interp_ok(buffered_scalar.ok_or_else(|| err_ub!(InvalidUninitBytes(None)))?)
}
/// Perform an atomic write operation at the memory location.
@ -641,7 +641,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
// The program didn't actually do a read, so suppress the memory access hooks.
// This is also a very special exception where we just ignore an error -- if this read
// was UB e.g. because the memory is uninitialized, we don't want to know!
let old_val = this.run_for_validation(|this| this.read_scalar(dest)).ok();
let old_val = this.run_for_validation(|this| this.read_scalar(dest)).discard_err();
this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
this.validate_atomic_store(dest, atomic)?;
this.buffered_atomic_write(val, dest, atomic, old_val)
@ -668,7 +668,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
this.validate_atomic_rmw(place, atomic)?;
this.buffered_atomic_rmw(val.to_scalar(), place, atomic, old.to_scalar())?;
Ok(old)
interp_ok(old)
}
/// Perform an atomic exchange with a memory place and a new
@ -688,7 +688,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
this.validate_atomic_rmw(place, atomic)?;
this.buffered_atomic_rmw(new, place, atomic, old)?;
Ok(old)
interp_ok(old)
}
/// Perform an conditional atomic exchange with a memory place and a new
@ -720,7 +720,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
this.buffered_atomic_rmw(new_val.to_scalar(), place, atomic, old.to_scalar())?;
// Return the old value.
Ok(old)
interp_ok(old)
}
/// Perform an atomic compare and exchange at a given memory location.
@ -777,7 +777,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
}
// Return the old value.
Ok(res)
interp_ok(res)
}
/// Update the data-race detector for an atomic fence on the current thread.
@ -809,11 +809,11 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
}
// Increment timestamp in case of release semantics.
Ok(atomic != AtomicFenceOrd::Acquire)
interp_ok(atomic != AtomicFenceOrd::Acquire)
},
)
} else {
Ok(())
interp_ok(())
}
}
@ -1047,7 +1047,7 @@ impl VClockAlloc {
let current_span = machine.current_span();
let global = machine.data_race.as_ref().unwrap();
if !global.race_detecting() {
return Ok(());
return interp_ok(());
}
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
@ -1070,7 +1070,7 @@ impl VClockAlloc {
);
}
}
Ok(())
interp_ok(())
}
/// Detect data-races for an unsynchronized write operation. It will not perform
@ -1089,7 +1089,7 @@ impl VClockAlloc {
let current_span = machine.current_span();
let global = machine.data_race.as_mut().unwrap();
if !global.race_detecting() {
return Ok(());
return interp_ok(());
}
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
for (mem_clocks_range, mem_clocks) in
@ -1111,7 +1111,7 @@ impl VClockAlloc {
);
}
}
Ok(())
interp_ok(())
}
}
@ -1307,7 +1307,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
}
}
}
Ok(())
interp_ok(())
}
/// Update the data-race detector for an atomic read occurring at the
@ -1399,9 +1399,9 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
assert!(access.is_atomic());
let Some(data_race) = &this.machine.data_race else { return Ok(()) };
let Some(data_race) = &this.machine.data_race else { return interp_ok(()) };
if !data_race.race_detecting() {
return Ok(());
return interp_ok(());
}
let size = place.layout.size;
let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
@ -1444,7 +1444,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
}
// This conservatively assumes all operations have release semantics
Ok(true)
interp_ok(true)
},
)?;
@ -1460,7 +1460,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
}
}
Ok(())
interp_ok(())
}
}
@ -1757,7 +1757,7 @@ impl GlobalState {
clocks.increment_clock(index, current_span);
}
}
Ok(())
interp_ok(())
}
/// Internal utility to identify a thread stored internally

View File

@ -37,9 +37,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
lock,
offset,
|ecx| &mut ecx.machine.sync.init_onces,
|_| Ok(Default::default()),
|_| interp_ok(Default::default()),
)?
.ok_or_else(|| err_ub_format!("init_once has invalid ID").into())
.ok_or_else(|| err_ub_format!("init_once has invalid ID")).into()
}
#[inline]
@ -101,7 +101,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.unblock_thread(waiter, BlockReason::InitOnce(id))?;
}
Ok(())
interp_ok(())
}
#[inline]
@ -126,7 +126,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.unblock_thread(waiter, BlockReason::InitOnce(id))?;
}
Ok(())
interp_ok(())
}
/// Synchronize with the previous completion of an InitOnce.

View File

@ -206,7 +206,7 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
)?
.to_scalar_pair();
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
interp_ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// We set the in-memory ID to `next_index`, now also create this object in the machine
// state.
let obj = create_obj(this)?;
@ -247,7 +247,7 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
let new_index = get_objs(this).push(obj);
this.write_scalar(Scalar::from_u32(new_index.to_u32()), &id_place)?;
Ok(new_index)
interp_ok(new_index)
}
fn condvar_reacquire_mutex(
@ -266,7 +266,7 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Don't forget to write the return value.
this.write_scalar(retval, &dest)?;
}
Ok(())
interp_ok(())
}
}
@ -307,7 +307,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|ecx| &mut ecx.machine.sync.mutexes,
|ecx| initialize_data(ecx).map(|data| Mutex { data, ..Default::default() }),
)?
.ok_or_else(|| err_ub_format!("mutex has invalid ID").into())
.ok_or_else(|| err_ub_format!("mutex has invalid ID")).into()
}
/// Retrieve the additional data stored for a mutex.
@ -334,7 +334,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|ecx| &mut ecx.machine.sync.rwlocks,
|ecx| initialize_data(ecx).map(|data| RwLock { data, ..Default::default() }),
)?
.ok_or_else(|| err_ub_format!("rwlock has invalid ID").into())
.ok_or_else(|| err_ub_format!("rwlock has invalid ID")).into()
}
/// Retrieve the additional data stored for a rwlock.
@ -375,7 +375,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|ecx| &mut ecx.machine.sync.condvars,
|ecx| initialize_data(ecx).map(|data| Condvar { data, ..Default::default() }),
)?
.ok_or_else(|| err_ub_format!("condvar has invalid ID").into())
.ok_or_else(|| err_ub_format!("condvar has invalid ID")).into()
}
/// Retrieve the additional data stored for a condvar.
@ -428,11 +428,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn mutex_unlock(&mut self, id: MutexId) -> InterpResult<'tcx, Option<usize>> {
let this = self.eval_context_mut();
let mutex = &mut this.machine.sync.mutexes[id];
Ok(if let Some(current_owner) = mutex.owner {
interp_ok(if let Some(current_owner) = mutex.owner {
// Mutex is locked.
if current_owner != this.machine.threads.active_thread() {
// Only the owner can unlock the mutex.
return Ok(None);
return interp_ok(None);
}
let old_lock_count = mutex.lock_count;
mutex.lock_count = old_lock_count.strict_sub(1);
@ -484,7 +484,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(retval, &dest)?;
}
Ok(())
interp_ok(())
}
),
);
@ -546,7 +546,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
trace!("rwlock_reader_unlock: {:?} held one less time by {:?}", id, thread);
}
}
Entry::Vacant(_) => return Ok(false), // we did not even own this lock
Entry::Vacant(_) => return interp_ok(false), // we did not even own this lock
}
if let Some(data_race) = &this.machine.data_race {
// Add this to the shared-release clock of all concurrent readers.
@ -565,7 +565,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.unblock_thread(writer, BlockReason::RwLock(id))?;
}
}
Ok(true)
interp_ok(true)
}
/// Put the reader in the queue waiting for the lock and block it.
@ -593,7 +593,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
@unblock = |this| {
this.rwlock_reader_lock(id);
this.write_scalar(retval, &dest)?;
Ok(())
interp_ok(())
}
),
);
@ -620,10 +620,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let thread = this.active_thread();
let rwlock = &mut this.machine.sync.rwlocks[id];
Ok(if let Some(current_writer) = rwlock.writer {
interp_ok(if let Some(current_writer) = rwlock.writer {
if current_writer != thread {
// Only the owner can unlock the rwlock.
return Ok(false);
return interp_ok(false);
}
rwlock.writer = None;
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, thread);
@ -676,7 +676,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
@unblock = |this| {
this.rwlock_writer_lock(id);
this.write_scalar(retval, &dest)?;
Ok(())
interp_ok(())
}
),
);
@ -749,7 +749,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
),
);
Ok(())
interp_ok(())
}
/// Wake up some thread (if there is any) sleeping on the conditional
@ -764,10 +764,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
condvar.clock.clone_from(&*data_race.release_clock(&this.machine.threads));
}
let Some(waiter) = condvar.waiters.pop_front() else {
return Ok(false);
return interp_ok(false);
};
this.unblock_thread(waiter, BlockReason::Condvar(id))?;
Ok(true)
interp_ok(true)
}
/// Wait for the futex to be signaled, or a timeout.
@ -808,7 +808,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
// Write the return value.
this.write_scalar(retval_succ, &dest)?;
Ok(())
interp_ok(())
}
@timeout = |this| {
// Remove the waiter from the futex.
@ -818,7 +818,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Set errno and write return value.
this.set_last_error(errno_timeout)?;
this.write_scalar(retval_timeout, &dest)?;
Ok(())
interp_ok(())
}
),
);
@ -828,7 +828,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn futex_wake(&mut self, addr: u64, bitset: u32) -> InterpResult<'tcx, bool> {
let this = self.eval_context_mut();
let Some(futex) = this.machine.sync.futexes.get_mut(&addr) else {
return Ok(false);
return interp_ok(false);
};
let data_race = &this.machine.data_race;
@ -839,10 +839,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Wake up the first thread in the queue that matches any of the bits in the bitset.
let Some(i) = futex.waiters.iter().position(|w| w.bitset & bitset != 0) else {
return Ok(false);
return interp_ok(false);
};
let waiter = futex.waiters.remove(i).unwrap();
this.unblock_thread(waiter.thread, BlockReason::Futex { addr })?;
Ok(true)
interp_ok(true)
}
}

View File

@ -622,7 +622,7 @@ impl<'tcx> ThreadManager<'tcx> {
}
self.threads[id].join_status = ThreadJoinStatus::Detached;
Ok(())
interp_ok(())
}
/// Mark that the active thread tries to join the thread with `joined_thread_id`.
@ -657,7 +657,7 @@ impl<'tcx> ThreadManager<'tcx> {
if let Some(data_race) = &mut this.machine.data_race {
data_race.thread_joined(&this.machine.threads, joined_thread_id);
}
Ok(())
interp_ok(())
}
),
);
@ -667,7 +667,7 @@ impl<'tcx> ThreadManager<'tcx> {
data_race.thread_joined(self, joined_thread_id);
}
}
Ok(())
interp_ok(())
}
/// Mark that the active thread tries to exclusively join the thread with `joined_thread_id`.
@ -754,7 +754,7 @@ impl<'tcx> ThreadManager<'tcx> {
// This thread and the program can keep going.
if self.threads[self.active_thread].state.is_enabled() && !self.yield_active_thread {
// The currently active thread is still enabled, just continue with it.
return Ok(SchedulingAction::ExecuteStep);
return interp_ok(SchedulingAction::ExecuteStep);
}
// The active thread yielded or got terminated. Let's see if there are any timeouts to take
// care of. We do this *before* running any other thread, to ensure that timeouts "in the
@ -764,7 +764,7 @@ impl<'tcx> ThreadManager<'tcx> {
// <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
let potential_sleep_time = self.next_callback_wait_time(clock);
if potential_sleep_time == Some(Duration::ZERO) {
return Ok(SchedulingAction::ExecuteTimeoutCallback);
return interp_ok(SchedulingAction::ExecuteTimeoutCallback);
}
// No callbacks immediately scheduled, pick a regular thread to execute.
// The active thread blocked or yielded. So we go search for another enabled thread.
@ -793,7 +793,7 @@ impl<'tcx> ThreadManager<'tcx> {
}
self.yield_active_thread = false;
if self.threads[self.active_thread].state.is_enabled() {
return Ok(SchedulingAction::ExecuteStep);
return interp_ok(SchedulingAction::ExecuteStep);
}
// We have not found a thread to execute.
if self.threads.iter().all(|thread| thread.state.is_terminated()) {
@ -802,7 +802,7 @@ impl<'tcx> ThreadManager<'tcx> {
// All threads are currently blocked, but we have unexecuted
// timeout_callbacks, which may unblock some of the threads. Hence,
// sleep until the first callback.
Ok(SchedulingAction::Sleep(sleep_time))
interp_ok(SchedulingAction::Sleep(sleep_time))
} else {
throw_machine_stop!(TerminationInfo::Deadlock);
}
@ -848,7 +848,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
// https://github.com/rust-lang/miri/issues/1763). In this case,
// just do nothing, which effectively just returns to the
// scheduler.
Ok(())
interp_ok(())
}
#[inline]
@ -861,7 +861,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
.expect("`on_stack_empty` not set up, or already running");
let res = callback(this)?;
this.active_thread_mut().on_stack_empty = Some(callback);
Ok(res)
interp_ok(res)
}
}
@ -879,7 +879,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let Some(old_alloc) = this.machine.threads.get_thread_local_alloc_id(def_id) {
// We already have a thread-specific allocation id for this
// thread-local static.
Ok(old_alloc)
interp_ok(old_alloc)
} else {
// We need to allocate a thread-specific allocation id for this
// thread-local static.
@ -892,7 +892,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let mut alloc = alloc.inner().adjust_from_tcx(
&this.tcx,
|bytes, align| {
Ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align))
interp_ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align))
},
|ptr| this.global_root_pointer(ptr),
)?;
@ -901,7 +901,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Create a fresh allocation with this content.
let ptr = this.insert_allocation(alloc, MiriMemoryKind::Tls.into())?;
this.machine.threads.set_thread_local_alloc(def_id, ptr);
Ok(ptr)
interp_ok(ptr)
}
}
@ -964,7 +964,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Restore the old active thread frame.
this.machine.threads.set_active_thread_id(old_thread_id);
Ok(new_thread_id)
interp_ok(new_thread_id)
}
/// Handles thread termination of the active thread: wakes up threads joining on this one,
@ -1022,7 +1022,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.unblock_thread(thread, unblock_reason)?;
}
Ok(())
interp_ok(())
}
/// Block the current thread, with an optional timeout.
@ -1078,7 +1078,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let old_thread = this.machine.threads.set_active_thread_id(thread);
callback.unblock(this)?;
this.machine.threads.set_active_thread_id(old_thread);
Ok(())
interp_ok(())
}
#[inline]
@ -1095,7 +1095,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.machine.threads.join_thread(joined_thread_id, this.machine.data_race.as_mut())?;
Ok(())
interp_ok(())
}
#[inline]
@ -1104,7 +1104,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.machine
.threads
.join_thread_exclusive(joined_thread_id, this.machine.data_race.as_mut())?;
Ok(())
interp_ok(())
}
#[inline]

View File

@ -195,10 +195,10 @@ impl StoreBufferAlloc {
AccessType::PerfectlyOverlapping(pos) => pos,
// If there is nothing here yet, that means there wasn't an atomic write yet so
// we can't return anything outdated.
_ => return Ok(None),
_ => return interp_ok(None),
};
let store_buffer = Ref::map(self.store_buffers.borrow(), |buffer| &buffer[pos]);
Ok(Some(store_buffer))
interp_ok(Some(store_buffer))
}
/// Gets a mutable store buffer associated with an atomic object in this allocation,
@ -223,7 +223,7 @@ impl StoreBufferAlloc {
pos_range.start
}
};
Ok(&mut buffers[pos])
interp_ok(&mut buffers[pos])
}
}
@ -284,7 +284,7 @@ impl<'tcx> StoreBuffer {
let (index, clocks) = global.active_thread_state(thread_mgr);
let loaded = store_elem.load_impl(index, &clocks, is_seqcst);
Ok((loaded, recency))
interp_ok((loaded, recency))
}
fn buffered_write(
@ -297,7 +297,7 @@ impl<'tcx> StoreBuffer {
let (index, clocks) = global.active_thread_state(thread_mgr);
self.store_impl(val, index, &clocks.clock, is_seqcst);
Ok(())
interp_ok(())
}
#[allow(clippy::if_same_then_else, clippy::needless_bool)]
@ -470,7 +470,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
buffer.read_from_last_store(global, threads, atomic == AtomicRwOrd::SeqCst);
buffer.buffered_write(new_val, global, threads, atomic == AtomicRwOrd::SeqCst)?;
}
Ok(())
interp_ok(())
}
fn buffered_atomic_read(
@ -508,14 +508,14 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
});
}
return Ok(loaded);
return interp_ok(loaded);
}
}
}
// Race detector or weak memory disabled, simply read the latest value
validate()?;
Ok(Some(latest_in_mo))
interp_ok(Some(latest_in_mo))
}
/// Add the given write to the store buffer. (Does not change machine memory.)
@ -546,7 +546,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
// Caller should've written to dest with the vanilla scalar write, we do nothing here
Ok(())
interp_ok(())
}
/// Caller should never need to consult the store buffer for the latest value.
@ -570,7 +570,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
alloc_buffers.get_store_buffer(alloc_range(base_offset, size))?
else {
// No store buffer, nothing to do.
return Ok(());
return interp_ok(());
};
buffer.read_from_last_store(
global,
@ -579,6 +579,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
);
}
}
Ok(())
interp_ok(())
}
}

View File

@ -223,7 +223,7 @@ pub fn report_error<'tcx>(
let info = info.downcast_ref::<TerminationInfo>().expect("invalid MachineStop payload");
use TerminationInfo::*;
let title = match info {
Exit { code, leak_check } => return Some((*code, *leak_check)),
&Exit { code, leak_check } => return Some((code, leak_check)),
Abort(_) => Some("abnormal termination"),
UnsupportedInIsolation(_) | Int2PtrWithStrictProvenance | UnsupportedForeignItem(_) =>
Some("unsupported operation"),

View File

@ -259,7 +259,7 @@ impl<'tcx> MainThreadState<'tcx> {
throw_machine_stop!(TerminationInfo::Exit { code: exit_code, leak_check: true });
}
}
Ok(Poll::Pending)
interp_ok(Poll::Pending)
}
}
@ -420,7 +420,7 @@ pub fn create_ecx<'tcx>(
}
}
Ok(ecx)
interp_ok(ecx)
}
/// Evaluates the entry function specified by `entry_id`.
@ -436,7 +436,7 @@ pub fn eval_entry<'tcx>(
// Copy setting before we move `config`.
let ignore_leaks = config.ignore_leaks;
let mut ecx = match create_ecx(tcx, entry_id, entry_type, &config) {
let mut ecx = match create_ecx(tcx, entry_id, entry_type, &config).report_err() {
Ok(v) => v,
Err(err) => {
let (kind, backtrace) = err.into_parts();
@ -453,7 +453,7 @@ pub fn eval_entry<'tcx>(
panic::resume_unwind(panic_payload)
});
// `Ok` can never happen.
let Err(res) = res;
let Err(err) = res.report_err();
// Machine cleanup. Only do this if all threads have terminated; threads that are still running
// might cause Stacked Borrows errors (https://github.com/rust-lang/miri/issues/2396).
@ -466,7 +466,7 @@ pub fn eval_entry<'tcx>(
}
// Process the result.
let (return_code, leak_check) = report_error(&ecx, res)?;
let (return_code, leak_check) = report_error(&ecx, err)?;
if leak_check && !ignore_leaks {
// Check for thread leaks.
if !ecx.have_all_terminated() {

View File

@ -228,7 +228,7 @@ pub fn iter_exported_symbols<'tcx>(
}
}
}
Ok(())
interp_ok(())
}
/// Convert a softfloat type to its corresponding hostfloat type.
@ -431,7 +431,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let field = this.project_field(dest, idx)?;
this.write_int(val, &field)?;
}
Ok(())
interp_ok(())
}
/// Write the given fields of the given place.
@ -445,7 +445,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let field = this.project_field_named(dest, name)?;
this.write_int(val, &field)?;
}
Ok(())
interp_ok(())
}
/// Write a 0 of the appropriate size to `dest`.
@ -455,7 +455,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// Test if this pointer equals 0.
fn ptr_is_null(&self, ptr: Pointer) -> InterpResult<'tcx, bool> {
Ok(ptr.addr().bytes() == 0)
interp_ok(ptr.addr().bytes() == 0)
}
/// Generate some random bytes, and write them to `dest`.
@ -466,7 +466,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// any additional checks - it's okay if the pointer is invalid,
// since we wouldn't actually be writing to it.
if len == 0 {
return Ok(());
return interp_ok(());
}
let this = self.eval_context_mut();
@ -571,7 +571,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
cur_addr += unsafe_cell_size;
// Done
Ok(())
interp_ok(())
};
// Run a visitor
{
@ -589,7 +589,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if unsafe_cell_size != Size::ZERO {
unsafe_cell_action(&place.ptr(), unsafe_cell_size)
} else {
Ok(())
interp_ok(())
}
},
};
@ -599,7 +599,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// So pretend there is a 0-sized `UnsafeCell` at the end.
unsafe_cell_action(&place.ptr().wrapping_offset(size, this), Size::ZERO)?;
// Done!
return Ok(());
return interp_ok(());
/// Visiting the memory covered by a `MemPlace`, being aware of
/// whether we are inside an `UnsafeCell` or not.
@ -642,7 +642,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
(self.unsafe_cell_action)(v)
} else if self.ecx.type_is_freeze(v.layout.ty) {
// This is `Freeze`, there cannot be an `UnsafeCell`
Ok(())
interp_ok(())
} else if matches!(v.layout.fields, FieldsShape::Union(..)) {
// A (non-frozen) union. We fall back to whatever the type says.
(self.unsafe_cell_action)(v)
@ -691,7 +691,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if !self.eval_context_ref().machine.communicate() {
self.reject_in_isolation(name, RejectOpWith::Abort)?;
}
Ok(())
interp_ok(())
}
/// Helper function used inside the shims of foreign functions which reject the op
@ -713,13 +713,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
.dcx()
.warn(format!("{op_name} was made to return an error due to isolation"));
}
Ok(())
interp_ok(())
}
RejectOpWith::Warning => {
this.emit_diagnostic(NonHaltingDiagnostic::RejectedIsolatedOp(op_name.to_string()));
Ok(())
interp_ok(())
}
RejectOpWith::NoWarning => Ok(()), // no warning
RejectOpWith::NoWarning => interp_ok(()), // no warning
}
}
@ -750,14 +750,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn last_error_place(&mut self) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
let this = self.eval_context_mut();
if let Some(errno_place) = this.active_thread_ref().last_error.as_ref() {
Ok(errno_place.clone())
interp_ok(errno_place.clone())
} else {
// Allocate new place, set initial value to 0.
let errno_layout = this.machine.layouts.u32;
let errno_place = this.allocate(errno_layout, MiriMemoryKind::Machine.into())?;
this.write_scalar(Scalar::from_u32(0), &errno_place)?;
this.active_thread_mut().last_error = Some(errno_place.clone());
Ok(errno_place)
interp_ok(errno_place)
}
}
@ -783,14 +783,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if target.families.iter().any(|f| f == "unix") {
for &(name, kind) in UNIX_IO_ERROR_TABLE {
if err.kind() == kind {
return Ok(this.eval_libc(name));
return interp_ok(this.eval_libc(name));
}
}
throw_unsup_format!("unsupported io error: {err}")
} else if target.families.iter().any(|f| f == "windows") {
for &(name, kind) in WINDOWS_IO_ERROR_TABLE {
if err.kind() == kind {
return Ok(this.eval_windows("c", name));
return interp_ok(this.eval_windows("c", name));
}
}
throw_unsup_format!("unsupported io error: {err}");
@ -814,18 +814,18 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let errnum = errnum.to_i32()?;
for &(name, kind) in UNIX_IO_ERROR_TABLE {
if errnum == this.eval_libc_i32(name) {
return Ok(Some(kind));
return interp_ok(Some(kind));
}
}
return Ok(None);
return interp_ok(None);
} else if target.families.iter().any(|f| f == "windows") {
let errnum = errnum.to_u32()?;
for &(name, kind) in WINDOWS_IO_ERROR_TABLE {
if errnum == this.eval_windows("c", name).to_u32()? {
return Ok(Some(kind));
return interp_ok(Some(kind));
}
}
return Ok(None);
return interp_ok(None);
} else {
throw_unsup_format!(
"converting errnum into io::Error is unsupported for OS {}",
@ -839,8 +839,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
self.set_last_error(self.io_error_to_errnum(err)?)
}
/// Helper function that consumes an `std::io::Result<T>` and returns an
/// `InterpResult<'tcx,T>::Ok` instead. In case the result is an error, this function returns
/// Helper function that consumes a `std::io::Result<T>` and returns a
/// `InterpResult<'tcx, T>` instead. In case the result is an error, this function returns
/// `Ok(-1)` and sets the last OS error accordingly.
///
/// This function uses `T: From<i32>` instead of `i32` directly because some IO related
@ -850,10 +850,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
result: std::io::Result<T>,
) -> InterpResult<'tcx, T> {
match result {
Ok(ok) => Ok(ok),
Ok(ok) => interp_ok(ok),
Err(e) => {
self.eval_context_mut().set_last_error_from_io_error(e)?;
Ok((-1).into())
interp_ok((-1).into())
}
}
}
@ -866,7 +866,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
let this = self.eval_context_ref();
let ptr = this.read_pointer(op)?;
Ok(this.ptr_to_mplace(ptr, layout))
interp_ok(this.ptr_to_mplace(ptr, layout))
}
/// Calculates the MPlaceTy given the offset and layout of an access on an operand
@ -884,7 +884,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Ensure that the access is within bounds.
assert!(base_layout.size >= offset + value_layout.size);
let value_place = op_place.offset(offset, value_layout, this)?;
Ok(value_place)
interp_ok(value_place)
}
fn deref_pointer_and_read(
@ -924,7 +924,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let nanoseconds_scalar = this.read_scalar(&nanoseconds_place)?;
let nanoseconds = nanoseconds_scalar.to_target_isize(this)?;
Ok(try {
interp_ok(try {
// tv_sec must be non-negative.
let seconds: u64 = seconds.try_into().ok()?;
// tv_nsec must be non-negative.
@ -947,7 +947,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let ptr = ptr.to_pointer(this)?;
let len = len.to_target_usize(this)?;
let bytes = this.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
Ok(bytes)
interp_ok(bytes)
}
/// Read a sequence of bytes until the first null terminator.
@ -992,11 +992,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let string_length = u64::try_from(c_str.len()).unwrap();
let string_length = string_length.strict_add(1);
if size < string_length {
return Ok((false, string_length));
return interp_ok((false, string_length));
}
self.eval_context_mut()
.write_bytes_ptr(ptr, c_str.iter().copied().chain(iter::once(0u8)))?;
Ok((true, string_length))
interp_ok((true, string_length))
}
/// Helper function to read a sequence of unsigned integers of the given size and alignment
@ -1031,7 +1031,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
Ok(wchars)
interp_ok(wchars)
}
/// Read a sequence of u16 until the first null terminator.
@ -1056,7 +1056,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let string_length = u64::try_from(wide_str.len()).unwrap();
let string_length = string_length.strict_add(1);
if size < string_length {
return Ok((false, string_length));
return interp_ok((false, string_length));
}
// Store the UTF-16 string.
@ -1068,7 +1068,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let offset = u64::try_from(offset).unwrap();
alloc.write_scalar(alloc_range(size2 * offset, size2), Scalar::from_u16(wchar))?;
}
Ok((true, string_length))
interp_ok((true, string_length))
}
/// Read a sequence of wchar_t until the first null terminator.
@ -1093,7 +1093,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
abi.name()
)
}
Ok(())
interp_ok(())
}
fn frame_in_std(&self) -> bool {
@ -1128,7 +1128,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// message is slightly different here to make automated analysis easier
let error_msg = format!("unsupported Miri functionality: {error_msg}");
this.start_panic(error_msg.as_ref(), mir::UnwindAction::Continue)?;
Ok(())
interp_ok(())
} else {
throw_machine_stop!(TerminationInfo::UnsupportedForeignItem(error_msg));
}
@ -1148,7 +1148,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// compiler-builtins when running other crates, but Miri can still be run on
// compiler-builtins itself (or any crate that uses it as a normal dependency)
if self.eval_context_ref().tcx.is_compiler_builtins(instance.def_id().krate) {
return Ok(());
return interp_ok(());
}
throw_machine_stop!(TerminationInfo::SymbolShimClashing {
@ -1156,7 +1156,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
span: body.span.data(),
})
}
Ok(())
interp_ok(())
}
fn check_shim<'a, const N: usize>(
@ -1242,11 +1242,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
) {
// Floating point value is NaN (flagged with INVALID_OP) or outside the range
// of values of the integer type (flagged with OVERFLOW or UNDERFLOW).
Ok(None)
interp_ok(None)
} else {
// Floating point value can be represented by the integer type after rounding.
// The INEXACT flag is ignored on purpose to allow rounding.
Ok(Some(ImmTy::from_scalar(val, cast_to)))
interp_ok(Some(ImmTy::from_scalar(val, cast_to)))
}
}
@ -1283,7 +1283,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"attempted to call intrinsic `{intrinsic}` that requires missing target feature {target_feature}"
);
}
Ok(())
interp_ok(())
}
/// Lookup an array of immediates stored as a linker section of name `name`.
@ -1296,7 +1296,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
iter_exported_symbols(tcx, |_cnum, def_id| {
let attrs = tcx.codegen_fn_attrs(def_id);
let Some(link_section) = attrs.link_section else {
return Ok(());
return interp_ok(());
};
if link_section.as_str() == name {
let instance = ty::Instance::mono(tcx, def_id);
@ -1308,10 +1308,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let val = this.read_immediate(&const_val)?;
array.push(val);
}
Ok(())
interp_ok(())
})?;
Ok(array)
interp_ok(array)
}
}
@ -1361,7 +1361,7 @@ where
&'a [OpTy<'tcx>; N]: TryFrom<&'a [OpTy<'tcx>]>,
{
if let Ok(ops) = args.try_into() {
return Ok(ops);
return interp_ok(ops);
}
throw_ub_format!("incorrect number of arguments: got {}, expected {}", args.len(), N)
}
@ -1401,7 +1401,7 @@ pub(crate) fn bool_to_simd_element(b: bool, size: Size) -> Scalar {
pub(crate) fn simd_element_to_bool(elem: ImmTy<'_>) -> InterpResult<'_, bool> {
let val = elem.to_scalar().to_int(elem.layout.size)?;
Ok(match val {
interp_ok(match val {
0 => false,
-1 => true,
_ => throw_ub_format!("each element of a SIMD mask must be all-0-bits or all-1-bits"),

View File

@ -115,9 +115,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.atomic_rmw_op(args, dest, AtomicOp::Max, rw_ord(ord))?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}
@ -138,7 +138,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
let val = this.read_scalar_atomic(&place, atomic)?;
// Perform regular store.
this.write_scalar(val, dest)?;
Ok(())
interp_ok(())
}
fn atomic_store(&mut self, args: &[OpTy<'tcx>], atomic: AtomicWriteOrd) -> InterpResult<'tcx> {
@ -151,7 +151,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
let val = this.read_scalar(val)?;
// Perform atomic store
this.write_scalar_atomic(val, &place, atomic)?;
Ok(())
interp_ok(())
}
fn compiler_fence_intrinsic(
@ -162,7 +162,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
let [] = check_arg_count(args)?;
let _ = atomic;
//FIXME: compiler fences are currently ignored
Ok(())
interp_ok(())
}
fn atomic_fence_intrinsic(
@ -173,7 +173,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let [] = check_arg_count(args)?;
this.atomic_fence(atomic)?;
Ok(())
interp_ok(())
}
fn atomic_rmw_op(
@ -203,17 +203,17 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
AtomicOp::Min => {
let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
this.write_immediate(*old, dest)?; // old value is returned
Ok(())
interp_ok(())
}
AtomicOp::Max => {
let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
this.write_immediate(*old, dest)?; // old value is returned
Ok(())
interp_ok(())
}
AtomicOp::MirOp(op, not) => {
let old = this.atomic_rmw_op_immediate(&place, &rhs, op, not, atomic)?;
this.write_immediate(*old, dest)?; // old value is returned
Ok(())
interp_ok(())
}
}
}
@ -232,7 +232,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
let old = this.atomic_exchange_scalar(&place, new, atomic)?;
this.write_scalar(old, dest)?; // old value is returned
Ok(())
interp_ok(())
}
fn atomic_compare_exchange_impl(
@ -261,7 +261,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
// Return old value.
this.write_immediate(old, dest)?;
Ok(())
interp_ok(())
}
fn atomic_compare_exchange(

View File

@ -29,7 +29,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// See if the core engine can handle this intrinsic.
if this.eval_intrinsic(instance, args, dest, ret)? {
return Ok(None);
return interp_ok(None);
}
let intrinsic_name = this.tcx.item_name(instance.def_id());
let intrinsic_name = intrinsic_name.as_str();
@ -51,7 +51,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"Miri can only use intrinsic fallback bodies that exactly reflect the specification: they fully check for UB and are as non-deterministic as possible. After verifying that `{intrinsic_name}` does so, add the `#[miri::intrinsic_fallback_is_spec]` attribute to it; also ping @rust-lang/miri when you do that"
);
}
Ok(Some(ty::Instance {
interp_ok(Some(ty::Instance {
def: ty::InstanceKind::Item(instance.def_id()),
args: instance.args,
}))
@ -59,14 +59,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
EmulateItemResult::NeedsReturn => {
trace!("{:?}", this.dump_place(&dest.clone().into()));
this.return_to_block(ret)?;
Ok(None)
interp_ok(None)
}
EmulateItemResult::NeedsUnwind => {
// Jump to the unwind block to begin unwinding.
this.unwind_to_block(unwind)?;
Ok(None)
interp_ok(None)
}
EmulateItemResult::AlreadyJumped => Ok(None),
EmulateItemResult::AlreadyJumped => interp_ok(None),
}
}
@ -99,7 +99,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"catch_unwind" => {
this.handle_catch_unwind(args, dest, ret)?;
// This pushed a stack frame, don't jump to `ret`.
return Ok(EmulateItemResult::AlreadyJumped);
return interp_ok(EmulateItemResult::AlreadyJumped);
}
// Raw memory accesses
@ -378,7 +378,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let ty::Float(fty) = x.layout.ty.kind() else {
bug!("float_finite: non-float input type {}", x.layout.ty)
};
Ok(match fty {
interp_ok(match fty {
FloatTy::F16 => x.to_scalar().to_f16()?.is_finite(),
FloatTy::F32 => x.to_scalar().to_f32()?.is_finite(),
FloatTy::F64 => x.to_scalar().to_f64()?.is_finite(),
@ -429,9 +429,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
throw_machine_stop!(TerminationInfo::Abort(format!("trace/breakpoint trap")))
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -247,7 +247,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// This does NaN adjustments.
let val = this.binary_op(mir_op, &left, &right).map_err(|err| {
match err.kind() {
InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ShiftOverflow { shift_amount, .. }) => {
&InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ShiftOverflow { shift_amount, .. }) => {
// This resets the interpreter backtrace, but it's not worth avoiding that.
let shift_amount = match shift_amount {
Either::Left(v) => v.to_string(),
@ -786,9 +786,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
fn fminmax_op(
@ -804,7 +804,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
};
let left = left.to_scalar();
let right = right.to_scalar();
Ok(match float_ty {
interp_ok(match float_ty {
FloatTy::F16 => unimplemented!("f16_f128"),
FloatTy::F32 => {
let left = left.to_f32()?;

View File

@ -730,7 +730,7 @@ impl<'tcx> MiriMachine<'tcx> {
EnvVars::init(this, config)?;
MiriMachine::init_extern_statics(this)?;
ThreadManager::init(this, on_main_stack_empty);
Ok(())
interp_ok(())
}
pub(crate) fn add_extern_static(this: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
@ -992,7 +992,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
throw_ub_format!("{msg}");
}
}
Ok(())
interp_ok(())
}
#[inline(always)]
@ -1019,7 +1019,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
}
// Otherwise, load the MIR.
Ok(Some((ecx.load_mir(instance.def, None)?, instance)))
interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
}
#[inline(always)]
@ -1072,7 +1072,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
ret: None,
unwind: mir::UnwindAction::Unreachable,
})?;
Ok(())
interp_ok(())
}
#[inline(always)]
@ -1097,7 +1097,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
}
fn ub_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
Ok(ecx.tcx.sess.ub_checks())
interp_ok(ecx.tcx.sess.ub_checks())
}
fn thread_local_static_pointer(
@ -1136,7 +1136,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
shim_align = shim_align.bytes(),
)
}
Ok(ptr)
interp_ok(ptr)
} else {
throw_unsup_format!("extern static `{link_name}` is not supported by Miri",)
}
@ -1186,7 +1186,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
.insert(id, (ecx.machine.current_span(), None));
}
Ok(AllocExtra { borrow_tracker, data_race, weak_memory, backtrace })
interp_ok(AllocExtra { borrow_tracker, data_race, weak_memory, backtrace })
}
fn adjust_alloc_root_pointer(
@ -1233,7 +1233,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
Provenance::Wildcard => {
// No need to do anything for wildcard pointers as
// their provenances have already been previously exposed.
Ok(())
interp_ok(())
}
}
}
@ -1286,7 +1286,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
|ptr| ecx.global_root_pointer(ptr),
)?;
let extra = Self::init_alloc_extra(ecx, id, kind, alloc.size(), alloc.align)?;
Ok(Cow::Owned(alloc.with_extra(extra)))
interp_ok(Cow::Owned(alloc.with_extra(extra)))
}
#[inline(always)]
@ -1310,7 +1310,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
if let Some(weak_memory) = &alloc_extra.weak_memory {
weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
}
Ok(())
interp_ok(())
}
#[inline(always)]
@ -1334,7 +1334,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
if let Some(weak_memory) = &alloc_extra.weak_memory {
weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
}
Ok(())
interp_ok(())
}
#[inline(always)]
@ -1367,7 +1367,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
*deallocated_at = Some(machine.current_span());
}
machine.free_alloc_id(alloc_id, size, align, kind);
Ok(())
interp_ok(())
}
#[inline(always)]
@ -1379,7 +1379,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
if ecx.machine.borrow_tracker.is_some() {
ecx.retag_ptr_value(kind, val)
} else {
Ok(val.clone())
interp_ok(val.clone())
}
}
@ -1392,7 +1392,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
if ecx.machine.borrow_tracker.is_some() {
ecx.retag_place_contents(kind, place)?;
}
Ok(())
interp_ok(())
}
fn protect_in_place_function_argument(
@ -1413,7 +1413,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
// Conveniently this also ensures that the place actually points to suitable memory.
ecx.write_uninit(&protected_place)?;
// Now we throw away the protected place, ensuring its tag is never used again.
Ok(())
interp_ok(())
}
#[inline(always)]
@ -1447,7 +1447,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
data_race: ecx.machine.data_race.as_ref().map(|_| data_race::FrameState::default()),
};
Ok(frame.with_extra(extra))
interp_ok(frame.with_extra(extra))
}
fn stack<'a>(
@ -1489,7 +1489,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
// Make sure some time passes.
ecx.machine.clock.tick();
Ok(())
interp_ok(())
}
#[inline(always)]
@ -1500,7 +1500,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
let stack_len = ecx.active_thread_stack().len();
ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
}
Ok(())
interp_ok(())
}
fn before_stack_pop(
@ -1516,7 +1516,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
// concurrency and what it prints is just plain wrong. So we print our own information
// instead. (Cc https://github.com/rust-lang/miri/issues/2266)
info!("Leaving {}", ecx.frame().instance());
Ok(())
interp_ok(())
}
#[inline(always)]
@ -1554,7 +1554,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
if let Some(data_race) = &ecx.frame().extra.data_race {
data_race.local_read(local, &ecx.machine);
}
Ok(())
interp_ok(())
}
fn after_local_write(
@ -1565,7 +1565,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
if let Some(data_race) = &ecx.frame().extra.data_race {
data_race.local_write(local, storage_live, &ecx.machine);
}
Ok(())
interp_ok(())
}
fn after_local_moved_to_memory(
@ -1587,7 +1587,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
{
data_race.local_moved_to_memory(local, alloc_info.data_race.as_mut().unwrap(), machine);
}
Ok(())
interp_ok(())
}
fn eval_mir_constant<F>(
@ -1611,9 +1611,9 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
Entry::Vacant(ve) => {
let op = eval(ecx, val, span, layout)?;
ve.insert(op.clone());
Ok(op)
interp_ok(op)
}
Entry::Occupied(oe) => Ok(oe.get().clone()),
Entry::Occupied(oe) => interp_ok(oe.get().clone()),
}
}

View File

@ -21,7 +21,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_ref();
trace!("ptr_op: {:?} {:?} {:?}", *left, bin_op, *right);
Ok(match bin_op {
interp_ok(match bin_op {
Eq | Ne | Lt | Le | Gt | Ge => {
assert_eq!(left.layout.abi, right.layout.abi); // types can differ, e.g. fn ptrs with different `for`
let size = this.pointer_size();

View File

@ -61,7 +61,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let Some(allocator_kind) = this.tcx.allocator_kind(()) else {
// in real code, this symbol does not exist without an allocator
return Ok(EmulateItemResult::NotSupported);
return interp_ok(EmulateItemResult::NotSupported);
};
match allocator_kind {
@ -71,11 +71,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// and not execute any Miri shim. Somewhat unintuitively doing so is done
// by returning `NotSupported`, which triggers the `lookup_exported_symbol`
// fallback case in `emulate_foreign_item`.
Ok(EmulateItemResult::NotSupported)
interp_ok(EmulateItemResult::NotSupported)
}
AllocatorKind::Default => {
default(this)?;
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}
}
@ -92,7 +92,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
)
.unwrap();
}
Ok(ptr.into())
interp_ok(ptr.into())
}
fn posix_memalign(
@ -109,7 +109,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Align must be power of 2, and also at least ptr-sized (POSIX rules).
// But failure to adhere to this is not UB, it's an error condition.
if !align.is_power_of_two() || align < this.pointer_size().bytes() {
Ok(this.eval_libc("EINVAL"))
interp_ok(this.eval_libc("EINVAL"))
} else {
let ptr = this.allocate_ptr(
Size::from_bytes(size),
@ -117,7 +117,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
MiriMemoryKind::C.into(),
)?;
this.write_pointer(ptr, &memptr)?;
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
}
@ -126,7 +126,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if !this.ptr_is_null(ptr)? {
this.deallocate_ptr(ptr, None, MiriMemoryKind::C.into())?;
}
Ok(())
interp_ok(())
}
fn realloc(&mut self, old_ptr: Pointer, new_size: u64) -> InterpResult<'tcx, Pointer> {
@ -148,7 +148,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
new_align,
MiriMemoryKind::C.into(),
)?;
Ok(new_ptr.into())
interp_ok(new_ptr.into())
}
}
}
@ -188,9 +188,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Align::from_bytes(align).unwrap(),
MiriMemoryKind::C.into(),
)?;
Ok(ptr.into())
interp_ok(ptr.into())
}
_ => Ok(Pointer::null()),
_ => interp_ok(Pointer::null()),
}
}
}

View File

@ -107,7 +107,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
_ => throw_unsup_format!("unknown `miri_get_backtrace` flags {}", flags),
};
Ok(())
interp_ok(())
}
fn resolve_frame_pointer(
@ -135,7 +135,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let name = fn_instance.to_string();
let filename = lo.file.name.prefer_remapped_unconditionaly().to_string();
Ok((fn_instance, lo, name, filename))
interp_ok((fn_instance, lo, name, filename))
}
fn handle_miri_resolve_frame(
@ -213,7 +213,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_pointer(fn_ptr, &this.project_field(dest, 4)?)?;
}
Ok(())
interp_ok(())
}
fn handle_miri_resolve_frame_names(
@ -237,6 +237,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_bytes_ptr(this.read_pointer(name_ptr)?, name.bytes())?;
this.write_bytes_ptr(this.read_pointer(filename_ptr)?, filename.bytes())?;
Ok(())
interp_ok(())
}
}

View File

@ -56,15 +56,15 @@ impl<'tcx> EnvVars<'tcx> {
};
ecx.machine.env_vars = env_vars;
Ok(())
interp_ok(())
}
pub(crate) fn cleanup(ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>) -> InterpResult<'tcx> {
let this = ecx.eval_context_mut();
match this.machine.env_vars {
EnvVars::Unix(_) => UnixEnvVars::cleanup(this),
EnvVars::Windows(_) => Ok(()), // no cleanup needed
EnvVars::Uninit => Ok(()),
EnvVars::Windows(_) => interp_ok(()), // no cleanup needed
EnvVars::Uninit => interp_ok(()),
}
}
@ -104,7 +104,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn get_env_var(&mut self, name: &OsStr) -> InterpResult<'tcx, Option<OsString>> {
let this = self.eval_context_ref();
match &this.machine.env_vars {
EnvVars::Uninit => Ok(None),
EnvVars::Uninit => interp_ok(None),
EnvVars::Unix(vars) => vars.get(this, name),
EnvVars::Windows(vars) => vars.get(name),
}

View File

@ -11,7 +11,7 @@ impl<'tcx> MiriMachine<'tcx> {
let place = this.allocate(val.layout, MiriMemoryKind::ExternStatic.into())?;
this.write_immediate(*val, &place)?;
Self::add_extern_static(this, name, place.ptr());
Ok(())
interp_ok(())
}
/// Zero-initialized pointer-sized extern statics are pretty common.
@ -26,7 +26,7 @@ impl<'tcx> MiriMachine<'tcx> {
let val = ImmTy::from_int(0, this.machine.layouts.usize);
Self::alloc_extern_static(this, name, val)?;
}
Ok(())
interp_ok(())
}
/// Extern statics that are initialized with function pointers to the symbols of the same name.
@ -41,7 +41,7 @@ impl<'tcx> MiriMachine<'tcx> {
let val = ImmTy::from_scalar(Scalar::from_pointer(ptr, this), layout);
Self::alloc_extern_static(this, name, val)?;
}
Ok(())
interp_ok(())
}
/// Sets up the "extern statics" for this machine.
@ -87,6 +87,6 @@ impl<'tcx> MiriMachine<'tcx> {
}
_ => {} // No "extern statics" supported on this target
}
Ok(())
interp_ok(())
}
}

View File

@ -62,7 +62,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let handler = this
.lookup_exported_symbol(Symbol::intern(name))?
.expect("missing alloc error handler symbol");
return Ok(Some(handler));
return interp_ok(Some(handler));
}
_ => {}
}
@ -80,18 +80,18 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
EmulateItemResult::AlreadyJumped => (),
EmulateItemResult::NotSupported => {
if let Some(body) = this.lookup_exported_symbol(link_name)? {
return Ok(Some(body));
return interp_ok(Some(body));
}
this.handle_unsupported_foreign_item(format!(
"can't call foreign function `{link_name}` on OS `{os}`",
os = this.tcx.sess.target.os,
))?;
return Ok(None);
return interp_ok(None);
}
}
Ok(None)
interp_ok(None)
}
fn is_dyn_sym(&self, name: &str) -> bool {
@ -116,7 +116,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
) -> InterpResult<'tcx> {
let res = self.emulate_foreign_item(sym.0, abi, args, dest, ret, unwind)?;
assert!(res.is_none(), "DynSyms that delegate are not supported");
Ok(())
interp_ok(())
}
/// Lookup the body of a function that has `link_name` as the symbol name.
@ -143,7 +143,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
tcx.item_name(def_id)
} else {
// Skip over items without an explicitly defined symbol name.
return Ok(());
return interp_ok(());
};
if symbol_name == link_name {
if let Some((original_instance, original_cnum)) = instance_and_crate {
@ -175,15 +175,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
instance_and_crate = Some((ty::Instance::mono(tcx, def_id), cnum));
}
Ok(())
interp_ok(())
})?;
e.insert(instance_and_crate.map(|ic| ic.0))
}
};
match instance {
None => Ok(None), // no symbol with this name
Some(instance) => Ok(Some((this.load_mir(instance.def, None)?, instance))),
None => interp_ok(None), // no symbol with this name
Some(instance) => interp_ok(Some((this.load_mir(instance.def, None)?, instance))),
}
}
}
@ -214,7 +214,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
Ok(())
interp_ok(())
}
fn emulate_foreign_item_inner(
@ -234,7 +234,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// by the specified `.so` file; we should continue and check if it corresponds to
// a provided shim.
if this.call_native_fn(link_name, dest, args)? {
return Ok(EmulateItemResult::NeedsReturn);
return interp_ok(EmulateItemResult::NeedsReturn);
}
}
@ -268,7 +268,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
//
// // ...
//
// Ok(Scalar::from_u32(42))
// interp_ok(Scalar::from_u32(42))
// }
// ```
// You might find existing shims not following this pattern, most
@ -281,7 +281,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
"miri_start_unwind" => {
let [payload] = this.check_shim(abi, Abi::Rust, link_name, args)?;
this.handle_miri_start_unwind(payload)?;
return Ok(EmulateItemResult::NeedsUnwind);
return interp_ok(EmulateItemResult::NeedsUnwind);
}
"miri_run_provenance_gc" => {
let [] = this.check_shim(abi, Abi::Rust, link_name, args)?;
@ -294,6 +294,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
err_machine_stop!(TerminationInfo::Abort(format!(
"pointer passed to `miri_get_alloc_id` must not be dangling, got {ptr:?}"
)))
.into()
})?;
this.write_scalar(Scalar::from_u64(alloc_id.0.get()), dest)?;
}
@ -524,7 +525,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
"__rust_alloc" => return this.emulate_allocator(default),
"miri_alloc" => {
default(this)?;
return Ok(EmulateItemResult::NeedsReturn);
return interp_ok(EmulateItemResult::NeedsReturn);
}
_ => unreachable!(),
}
@ -584,7 +585,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
"miri_dealloc" => {
default(this)?;
return Ok(EmulateItemResult::NeedsReturn);
return interp_ok(EmulateItemResult::NeedsReturn);
}
_ => unreachable!(),
}
@ -1000,11 +1001,11 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
shims::windows::foreign_items::EvalContextExt::emulate_foreign_item_inner(
this, link_name, abi, args, dest,
),
_ => Ok(EmulateItemResult::NotSupported),
_ => interp_ok(EmulateItemResult::NotSupported),
},
};
// We only fall through to here if we did *not* hit the `_` arm above,
// i.e., if we actually emulated the function with one of the shims.
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -73,11 +73,11 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// have the output_type `Tuple([])`.
ty::Tuple(t_list) if t_list.len() == 0 => {
unsafe { ffi::call::<()>(ptr, libffi_args.as_slice()) };
return Ok(ImmTy::uninit(dest.layout));
return interp_ok(ImmTy::uninit(dest.layout));
}
_ => throw_unsup_format!("unsupported return type for native call: {:?}", link_name),
};
Ok(ImmTy::from_scalar(scalar, dest.layout))
interp_ok(ImmTy::from_scalar(scalar, dest.layout))
}
/// Get the pointer to the function of the specified name in the shared object file,
@ -142,7 +142,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Some(ptr) => ptr,
None => {
// Shared object file does not export this function -- try the shims next.
return Ok(false);
return interp_ok(false);
}
};
@ -164,7 +164,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Call the function and store output, depending on return type in the function signature.
let ret = this.call_native_with_args(link_name, dest, code_ptr, libffi_args)?;
this.write_immediate(*ret, dest)?;
Ok(true)
interp_ok(true)
}
}
@ -221,7 +221,7 @@ impl<'a> CArg {
/// Extract the scalar value from the result of reading a scalar from the machine,
/// and convert it to a `CArg`.
fn imm_to_carg<'tcx>(v: ImmTy<'tcx>, cx: &impl HasDataLayout) -> InterpResult<'tcx, CArg> {
Ok(match v.layout.ty.kind() {
interp_ok(match v.layout.ty.kind() {
// If the primitive provided can be converted to a type matching the type pattern
// then create a `CArg` of this primitive value with the corresponding `CArg` constructor.
// the ints

View File

@ -19,14 +19,14 @@ pub enum PathConversion {
#[cfg(unix)]
pub fn bytes_to_os_str<'tcx>(bytes: &[u8]) -> InterpResult<'tcx, &OsStr> {
Ok(OsStr::from_bytes(bytes))
interp_ok(OsStr::from_bytes(bytes))
}
#[cfg(not(unix))]
pub fn bytes_to_os_str<'tcx>(bytes: &[u8]) -> InterpResult<'tcx, &OsStr> {
// We cannot use `from_encoded_bytes_unchecked` here since we can't trust `bytes`.
let s = std::str::from_utf8(bytes)
.map_err(|_| err_unsup_format!("{:?} is not a valid utf-8 string", bytes))?;
Ok(OsStr::new(s))
interp_ok(OsStr::new(s))
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
@ -50,13 +50,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
{
#[cfg(windows)]
pub fn u16vec_to_osstring<'tcx>(u16_vec: Vec<u16>) -> InterpResult<'tcx, OsString> {
Ok(OsString::from_wide(&u16_vec[..]))
interp_ok(OsString::from_wide(&u16_vec[..]))
}
#[cfg(not(windows))]
pub fn u16vec_to_osstring<'tcx>(u16_vec: Vec<u16>) -> InterpResult<'tcx, OsString> {
let s = String::from_utf16(&u16_vec[..])
.map_err(|_| err_unsup_format!("{:?} is not a valid utf-16 string", u16_vec))?;
Ok(s.into())
interp_ok(s.into())
}
let u16_vec = self.eval_context_ref().read_wide_str(ptr)?;
@ -87,7 +87,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
) -> InterpResult<'tcx, (bool, u64)> {
#[cfg(windows)]
fn os_str_to_u16vec<'tcx>(os_str: &OsStr) -> InterpResult<'tcx, Vec<u16>> {
Ok(os_str.encode_wide().collect())
interp_ok(os_str.encode_wide().collect())
}
#[cfg(not(windows))]
fn os_str_to_u16vec<'tcx>(os_str: &OsStr) -> InterpResult<'tcx, Vec<u16>> {
@ -97,7 +97,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
os_str
.to_str()
.map(|s| s.encode_utf16().collect())
.ok_or_else(|| err_unsup_format!("{:?} is not a valid utf-8 string", os_str).into())
.ok_or_else(|| err_unsup_format!("{:?} is not a valid utf-8 string", os_str))
.into()
}
let u16_vec = os_str_to_u16vec(os_str)?;
@ -109,7 +110,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
self.eval_context_mut().write_wide_str(truncated_data, ptr, size)?;
assert!(written && written_len == size);
}
Ok((written, size_needed))
interp_ok((written, size_needed))
}
/// Helper function to write an OsStr as a 0x0000-terminated u16-sequence, which is what the
@ -148,7 +149,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let arg_place = this.allocate(this.layout_of(arg_type).unwrap(), memkind)?;
let (written, _) = self.write_os_str_to_c_str(os_str, arg_place.ptr(), size).unwrap();
assert!(written);
Ok(arg_place.ptr())
interp_ok(arg_place.ptr())
}
/// Allocate enough memory to store the given `OsStr` as a null-terminated sequence of `u16`.
@ -164,7 +165,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let arg_place = this.allocate(this.layout_of(arg_type).unwrap(), memkind)?;
let (written, _) = self.write_os_str_to_wide_str(os_str, arg_place.ptr(), size).unwrap();
assert!(written);
Ok(arg_place.ptr())
interp_ok(arg_place.ptr())
}
/// Read a null-terminated sequence of bytes, and perform path separator conversion if needed.
@ -175,7 +176,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_ref();
let os_str = this.read_os_str_from_c_str(ptr)?;
Ok(match this.convert_path(Cow::Borrowed(os_str), PathConversion::TargetToHost) {
interp_ok(match this.convert_path(Cow::Borrowed(os_str), PathConversion::TargetToHost) {
Cow::Borrowed(x) => Cow::Borrowed(Path::new(x)),
Cow::Owned(y) => Cow::Owned(PathBuf::from(y)),
})
@ -186,7 +187,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_ref();
let os_str = this.read_os_str_from_wide_str(ptr)?;
Ok(this.convert_path(Cow::Owned(os_str), PathConversion::TargetToHost).into_owned().into())
interp_ok(
this.convert_path(Cow::Owned(os_str), PathConversion::TargetToHost).into_owned().into(),
)
}
/// Write a Path to the machine memory (as a null-terminated sequence of bytes),

View File

@ -54,7 +54,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let thread = this.active_thread_mut();
thread.panic_payloads.push(payload);
Ok(())
interp_ok(())
}
/// Handles the `try` intrinsic, the underlying implementation of `std::panicking::try`.
@ -106,7 +106,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Some(CatchUnwindData { catch_fn, data, dest: dest.clone(), ret });
}
Ok(())
interp_ok(())
}
fn handle_stack_pop_unwind(
@ -150,9 +150,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
)?;
// We pushed a new stack frame, the engine should not do any jumping now!
Ok(ReturnAction::NoJump)
interp_ok(ReturnAction::NoJump)
} else {
Ok(ReturnAction::Normal)
interp_ok(ReturnAction::Normal)
}
}
@ -254,6 +254,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
})?;
}
}
Ok(())
interp_ok(())
}
}

View File

@ -11,7 +11,7 @@ use crate::*;
/// Returns the time elapsed between the provided time and the unix epoch as a `Duration`.
pub fn system_time_to_duration<'tcx>(time: &SystemTime) -> InterpResult<'tcx, Duration> {
time.duration_since(SystemTime::UNIX_EPOCH)
.map_err(|_| err_unsup_format!("times before the Unix epoch are not supported").into())
.map_err(|_| err_unsup_format!("times before the Unix epoch are not supported")).into()
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
@ -82,7 +82,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
} else {
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
};
let tv_sec = duration.as_secs();
@ -90,7 +90,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_int_fields(&[tv_sec.into(), tv_nsec.into()], &tp)?;
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
fn gettimeofday(
@ -110,7 +110,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if !this.ptr_is_null(tz)? {
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let duration = system_time_to_duration(&SystemTime::now())?;
@ -119,7 +119,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_int_fields(&[tv_sec.into(), tv_usec.into()], &tv)?;
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
// The localtime() function shall convert the time in seconds since the Epoch pointed to by
@ -206,7 +206,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_pointer(tm_zone_ptr, &this.project_field_named(&result, "tm_zone")?)?;
this.write_int_fields_named(&[("tm_gmtoff", tm_gmtoff.into())], &result)?;
}
Ok(result.ptr())
interp_ok(result.ptr())
}
#[allow(non_snake_case, clippy::arithmetic_side_effects)]
fn GetSystemTimeAsFileTime(
@ -236,7 +236,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let dwHighDateTime = u32::try_from((duration_ticks & 0xFFFFFFFF00000000) >> 32).unwrap();
this.write_int_fields(&[dwLowDateTime.into(), dwHighDateTime.into()], &filetime)?;
Ok(())
interp_ok(())
}
#[allow(non_snake_case)]
@ -255,7 +255,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
err_unsup_format!("programs running longer than 2^63 nanoseconds are not supported")
})?;
this.write_scalar(Scalar::from_i64(qpc), &this.deref_pointer(lpPerformanceCount_op)?)?;
Ok(Scalar::from_i32(-1)) // return non-zero on success
interp_ok(Scalar::from_i32(-1)) // return non-zero on success
}
#[allow(non_snake_case)]
@ -276,7 +276,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Scalar::from_i64(1_000_000_000),
&this.deref_pointer_as(lpFrequency_op, this.machine.layouts.u64)?,
)?;
Ok(Scalar::from_i32(-1)) // Return non-zero on success
interp_ok(Scalar::from_i32(-1)) // Return non-zero on success
}
fn mach_absolute_time(&self) -> InterpResult<'tcx, Scalar> {
@ -290,7 +290,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let res = u64::try_from(duration.as_nanos()).map_err(|_| {
err_unsup_format!("programs running longer than 2^64 nanoseconds are not supported")
})?;
Ok(Scalar::from_u64(res))
interp_ok(Scalar::from_u64(res))
}
fn mach_timebase_info(&mut self, info_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
@ -305,7 +305,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let (numer, denom) = (1, 1);
this.write_int_fields(&[numer.into(), denom.into()], &info)?;
Ok(Scalar::from_i32(0)) // KERN_SUCCESS
interp_ok(Scalar::from_i32(0)) // KERN_SUCCESS
}
fn nanosleep(
@ -324,7 +324,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
None => {
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
};
@ -334,10 +334,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
callback!(
@capture<'tcx> {}
@unblock = |_this| { panic!("sleeping thread unblocked before time is up") }
@timeout = |_this| { Ok(()) }
@timeout = |_this| { interp_ok(()) }
),
);
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
#[allow(non_snake_case)]
@ -356,9 +356,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
callback!(
@capture<'tcx> {}
@unblock = |_this| { panic!("sleeping thread unblocked before time is up") }
@timeout = |_this| { Ok(()) }
@timeout = |_this| { interp_ok(()) }
),
);
Ok(())
interp_ok(())
}
}

View File

@ -68,14 +68,14 @@ impl<'tcx> TlsData<'tcx> {
if max_size.bits() < 128 && new_key >= (1u128 << max_size.bits()) {
throw_unsup_format!("we ran out of TLS key space");
}
Ok(new_key)
interp_ok(new_key)
}
pub fn delete_tls_key(&mut self, key: TlsKey) -> InterpResult<'tcx> {
match self.keys.remove(&key) {
Some(_) => {
trace!("TLS key {} removed", key);
Ok(())
interp_ok(())
}
None => throw_ub_format!("removing a nonexistent TLS key: {}", key),
}
@ -91,7 +91,7 @@ impl<'tcx> TlsData<'tcx> {
Some(TlsEntry { data, .. }) => {
let value = data.get(&thread_id).copied();
trace!("TLS key {} for thread {:?} loaded: {:?}", key, thread_id, value);
Ok(value.unwrap_or_else(|| Scalar::null_ptr(cx)))
interp_ok(value.unwrap_or_else(|| Scalar::null_ptr(cx)))
}
None => throw_ub_format!("loading from a non-existing TLS key: {}", key),
}
@ -113,7 +113,7 @@ impl<'tcx> TlsData<'tcx> {
trace!("TLS key {} for thread {:?} removed", key, thread_id);
data.remove(&thread_id);
}
Ok(())
interp_ok(())
}
None => throw_ub_format!("storing to a non-existing TLS key: {}", key),
}
@ -128,7 +128,7 @@ impl<'tcx> TlsData<'tcx> {
data: Scalar,
) -> InterpResult<'tcx> {
self.macos_thread_dtors.entry(thread).or_default().push((dtor, data));
Ok(())
interp_ok(())
}
/// Returns a dtor, its argument and its index, if one is supposed to run.
@ -261,7 +261,7 @@ impl<'tcx> TlsDtorsState<'tcx> {
}
MacOsDtors => {
match this.schedule_macos_tls_dtor()? {
Poll::Pending => return Ok(Poll::Pending),
Poll::Pending => return interp_ok(Poll::Pending),
// After all macOS destructors are run, the system switches
// to destroying the pthread destructors.
Poll::Ready(()) => break 'new_state PthreadDtors(Default::default()),
@ -269,14 +269,14 @@ impl<'tcx> TlsDtorsState<'tcx> {
}
PthreadDtors(state) => {
match this.schedule_next_pthread_tls_dtor(state)? {
Poll::Pending => return Ok(Poll::Pending), // just keep going
Poll::Pending => return interp_ok(Poll::Pending), // just keep going
Poll::Ready(()) => break 'new_state Done,
}
}
WindowsDtors(dtors) => {
if let Some(dtor) = dtors.pop() {
this.schedule_windows_tls_dtor(dtor)?;
return Ok(Poll::Pending); // we stay in this state (but `dtors` got shorter)
return interp_ok(Poll::Pending); // we stay in this state (but `dtors` got shorter)
} else {
// No more destructors to run.
break 'new_state Done;
@ -284,13 +284,13 @@ impl<'tcx> TlsDtorsState<'tcx> {
}
Done => {
this.machine.tls.delete_all_thread_tls(this.active_thread());
return Ok(Poll::Ready(()));
return interp_ok(Poll::Ready(()));
}
}
};
self.0 = new_state;
Ok(Poll::Pending)
interp_ok(Poll::Pending)
}
}
@ -303,7 +303,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Windows has a special magic linker section that is run on certain events.
// We don't support most of that, but just enough to make thread-local dtors in `std` work.
Ok(this.lookup_link_section(".CRT$XLB")?)
interp_ok(this.lookup_link_section(".CRT$XLB")?)
}
fn schedule_windows_tls_dtor(&mut self, dtor: ImmTy<'tcx>) -> InterpResult<'tcx> {
@ -328,7 +328,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
None,
StackPopCleanup::Root { cleanup: true },
)?;
Ok(())
interp_ok(())
}
/// Schedule the macOS thread local storage destructors to be executed.
@ -350,10 +350,10 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
StackPopCleanup::Root { cleanup: true },
)?;
return Ok(Poll::Pending);
return interp_ok(Poll::Pending);
}
Ok(Poll::Ready(()))
interp_ok(Poll::Ready(()))
}
/// Schedule a pthread TLS destructor. Returns `true` if found
@ -387,9 +387,9 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
StackPopCleanup::Root { cleanup: true },
)?;
return Ok(Poll::Pending);
return interp_ok(Poll::Pending);
}
Ok(Poll::Ready(()))
interp_ok(Poll::Ready(()))
}
}

View File

@ -25,8 +25,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(errno_place.to_ref(this).to_scalar(), dest)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -47,7 +47,7 @@ impl<'tcx> UnixEnvVars<'tcx> {
let environ_block = alloc_environ_block(ecx, env_vars_machine.values().copied().collect())?;
ecx.write_pointer(environ_block, &environ)?;
Ok(UnixEnvVars { map: env_vars_machine, environ })
interp_ok(UnixEnvVars { map: env_vars_machine, environ })
}
pub(crate) fn cleanup(ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>) -> InterpResult<'tcx> {
@ -61,7 +61,7 @@ impl<'tcx> UnixEnvVars<'tcx> {
let old_vars_ptr = ecx.read_pointer(environ)?;
ecx.deallocate_ptr(old_vars_ptr, None, MiriMemoryKind::Runtime.into())?;
Ok(())
interp_ok(())
}
pub(crate) fn environ(&self) -> Pointer {
@ -77,14 +77,14 @@ impl<'tcx> UnixEnvVars<'tcx> {
// but we do want to do this read so it shows up as a data race.
let _vars_ptr = ecx.read_pointer(&self.environ)?;
let Some(var_ptr) = self.map.get(name) else {
return Ok(None);
return interp_ok(None);
};
// The offset is used to strip the "{name}=" part of the string.
let var_ptr = var_ptr.wrapping_offset(
Size::from_bytes(u64::try_from(name.len()).unwrap().strict_add(1)),
ecx,
);
Ok(Some(var_ptr))
interp_ok(Some(var_ptr))
}
/// Implementation detail for [`InterpCx::get_env_var`]. This basically does `getenv`, complete
@ -97,9 +97,9 @@ impl<'tcx> UnixEnvVars<'tcx> {
let var_ptr = self.get_ptr(ecx, name)?;
if let Some(ptr) = var_ptr {
let var = ecx.read_os_str_from_c_str(ptr)?;
Ok(Some(var.to_owned()))
interp_ok(Some(var.to_owned()))
} else {
Ok(None)
interp_ok(None)
}
}
}
@ -133,7 +133,7 @@ fn alloc_environ_block<'tcx>(
let place = ecx.project_field(&vars_place, idx)?;
ecx.write_pointer(var, &place)?;
}
Ok(vars_place.ptr())
interp_ok(vars_place.ptr())
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
@ -146,7 +146,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let name = this.read_os_str_from_c_str(name_ptr)?;
let var_ptr = this.machine.env_vars.unix().get_ptr(this, name)?;
Ok(var_ptr.unwrap_or_else(Pointer::null))
interp_ok(var_ptr.unwrap_or_else(Pointer::null))
}
fn setenv(
@ -174,12 +174,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?;
}
this.update_environ()?;
Ok(Scalar::from_i32(0)) // return zero on success
interp_ok(Scalar::from_i32(0)) // return zero on success
} else {
// name argument is a null pointer, points to an empty string, or points to a string containing an '=' character.
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
Ok(Scalar::from_i32(-1))
interp_ok(Scalar::from_i32(-1))
}
}
@ -200,12 +200,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?;
}
this.update_environ()?;
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
} else {
// name argument is a null pointer, points to an empty string, or points to a string containing an '=' character.
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
Ok(Scalar::from_i32(-1))
interp_ok(Scalar::from_i32(-1))
}
}
@ -219,14 +219,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`getcwd`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied.into())?;
return Ok(Pointer::null());
return interp_ok(Pointer::null());
}
// If we cannot get the current directory, we return null
match env::current_dir() {
Ok(cwd) => {
if this.write_path_to_c_str(&cwd, buf, size)?.0 {
return Ok(buf);
return interp_ok(buf);
}
let erange = this.eval_libc("ERANGE");
this.set_last_error(erange)?;
@ -234,7 +234,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Err(e) => this.set_last_error_from_io_error(e)?,
}
Ok(Pointer::null())
interp_ok(Pointer::null())
}
fn chdir(&mut self, path_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
@ -247,11 +247,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.reject_in_isolation("`chdir`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied.into())?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let result = env::set_current_dir(path).map(|()| 0);
Ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
}
/// Updates the `environ` static.
@ -267,7 +267,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let environ_block = alloc_environ_block(this, vals)?;
this.write_pointer(environ_block, &environ)?;
Ok(())
interp_ok(())
}
fn getpid(&mut self) -> InterpResult<'tcx, Scalar> {
@ -278,7 +278,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// `libc::getpid` returns an i32, however, `std::process::id()` return an u32.
// So we un-do the conversion that stdlib does and turn it back into an i32.
// In `Scalar` representation, these are the same, so we don't need to anything else.
Ok(Scalar::from_u32(this.get_pid()))
interp_ok(Scalar::from_u32(this.get_pid()))
}
fn linux_gettid(&mut self) -> InterpResult<'tcx, Scalar> {
@ -290,6 +290,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Compute a TID for this thread, ensuring that the main thread has PID == TID.
let tid = this.get_pid().strict_add(index);
Ok(Scalar::from_u32(tid))
interp_ok(Scalar::from_u32(tid))
}
}

View File

@ -277,7 +277,7 @@ impl FileDescriptionRef {
fd.file_description.close(communicate_allowed, ecx)
}
None => Ok(Ok(())),
None => interp_ok(Ok(())),
}
}
@ -414,16 +414,16 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let Some(fd) = this.machine.fds.get(old_fd_num) else {
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
};
Ok(Scalar::from_i32(this.machine.fds.insert(fd)))
interp_ok(Scalar::from_i32(this.machine.fds.insert(fd)))
}
fn dup2(&mut self, old_fd_num: i32, new_fd_num: i32) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let Some(fd) = this.machine.fds.get(old_fd_num) else {
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
};
if new_fd_num != old_fd_num {
// Close new_fd if it is previously opened.
@ -433,13 +433,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
old_new_fd.close(this.machine.communicate(), this)?.ok();
}
}
Ok(Scalar::from_i32(new_fd_num))
interp_ok(Scalar::from_i32(new_fd_num))
}
fn flock(&mut self, fd_num: i32, op: i32) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let Some(fd) = this.machine.fds.get(fd_num) else {
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
};
// We need to check that there aren't unsupported options in `op`.
@ -467,7 +467,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
drop(fd);
// return `0` if flock is successful
let result = result.map(|()| 0i32);
Ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
}
fn fcntl(&mut self, args: &[OpTy<'tcx>]) -> InterpResult<'tcx, Scalar> {
@ -488,7 +488,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// `FD_CLOEXEC` value without checking if the flag is set for the file because `std`
// always sets this flag when opening a file. However we still need to check that the
// file itself is open.
Ok(Scalar::from_i32(if this.machine.fds.is_fd_num(fd_num) {
interp_ok(Scalar::from_i32(if this.machine.fds.is_fd_num(fd_num) {
this.eval_libc_i32("FD_CLOEXEC")
} else {
this.fd_not_found()?
@ -509,15 +509,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let start = this.read_scalar(&args[2])?.to_i32()?;
match this.machine.fds.get(fd_num) {
Some(fd) => Ok(Scalar::from_i32(this.machine.fds.insert_with_min_num(fd, start))),
None => Ok(Scalar::from_i32(this.fd_not_found()?)),
Some(fd) => interp_ok(Scalar::from_i32(this.machine.fds.insert_with_min_num(fd, start))),
None => interp_ok(Scalar::from_i32(this.fd_not_found()?)),
}
} else if this.tcx.sess.target.os == "macos" && cmd == this.eval_libc_i32("F_FULLFSYNC") {
// Reject if isolation is enabled.
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`fcntl`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied.into())?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
this.ffullsync_fd(fd_num)
@ -532,12 +532,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let fd_num = this.read_scalar(fd_op)?.to_i32()?;
let Some(fd) = this.machine.fds.remove(fd_num) else {
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
};
let result = fd.close(this.machine.communicate(), this)?;
// return `0` if close is successful
let result = result.map(|()| 0i32);
Ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
}
/// Function used when a file descriptor does not exist. It returns `Ok(-1)`and sets
@ -548,7 +548,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let ebadf = this.eval_libc("EBADF");
this.set_last_error(ebadf)?;
Ok((-1).into())
interp_ok((-1).into())
}
/// Read data from `fd` into buffer specified by `buf` and `count`.
@ -586,7 +586,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
trace!("read: FD not found");
let res: i32 = this.fd_not_found()?;
this.write_int(res, dest)?;
return Ok(());
return interp_ok(());
};
trace!("read: FD mapped to {fd:?}");
@ -601,12 +601,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
this.write_int(-1, dest)?;
return Ok(());
return interp_ok(());
};
fd.pread(communicate, offset, buf, count, dest, this)?
}
};
Ok(())
interp_ok(())
}
fn write(
@ -636,7 +636,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let Some(fd) = this.machine.fds.get(fd_num) else {
let res: i32 = this.fd_not_found()?;
this.write_int(res, dest)?;
return Ok(());
return interp_ok(());
};
match offset {
@ -646,12 +646,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
this.write_int(-1, dest)?;
return Ok(());
return interp_ok(());
};
fd.pwrite(communicate, buf, count, offset, dest, this)?
}
};
Ok(())
interp_ok(())
}
/// Helper to implement `FileDescription::read`:
@ -675,12 +675,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_bytes_ptr(buf, bytes[..read_bytes].iter().copied())?;
// The actual read size is always less than what got originally requested so this cannot fail.
this.write_int(u64::try_from(read_bytes).unwrap(), dest)?;
Ok(())
interp_ok(())
}
Err(e) => {
this.set_last_error_from_io_error(e)?;
this.write_int(-1, dest)?;
Ok(())
interp_ok(())
}
}
}
@ -695,6 +695,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let result = this.try_unwrap_io_result(result.map(|c| i64::try_from(c).unwrap()))?;
this.write_int(result, dest)?;
Ok(())
interp_ok(())
}
}

View File

@ -828,7 +828,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// This function looks and behaves excatly like miri_start_unwind.
let [payload] = this.check_shim(abi, Abi::C { unwind: true }, link_name, args)?;
this.handle_miri_start_unwind(payload)?;
return Ok(EmulateItemResult::NeedsUnwind);
return interp_ok(EmulateItemResult::NeedsUnwind);
}
"getuid" => {
let [] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
@ -944,11 +944,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"linux" => linux::EvalContextExt::emulate_foreign_item_inner(this, link_name, abi, args, dest),
"macos" => macos::EvalContextExt::emulate_foreign_item_inner(this, link_name, abi, args, dest),
"solaris" | "illumos" => solarish::EvalContextExt::emulate_foreign_item_inner(this, link_name, abi, args, dest),
_ => Ok(EmulateItemResult::NotSupported),
_ => interp_ok(EmulateItemResult::NotSupported),
};
}
};
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -84,8 +84,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_null(dest)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -121,7 +121,7 @@ impl FileDescription for FileHandle {
offset: SeekFrom,
) -> InterpResult<'tcx, io::Result<u64>> {
assert!(communicate_allowed, "isolation should have prevented even opening a file");
Ok((&mut &self.file).seek(offset))
interp_ok((&mut &self.file).seek(offset))
}
fn close<'tcx>(
@ -136,7 +136,7 @@ impl FileDescription for FileHandle {
// to handle possible errors correctly.
let result = self.file.sync_all();
// Now we actually close the file and return the result.
Ok(result)
interp_ok(result)
} else {
// We drop the file, this closes it but ignores any errors
// produced when closing it. This is done because
@ -144,7 +144,7 @@ impl FileDescription for FileHandle {
// `/dev/urandom` which are read-only. Check
// https://github.com/rust-lang/miri/issues/999#issuecomment-568920439
// for a deeper discussion.
Ok(Ok(()))
interp_ok(Ok(()))
}
}
@ -179,7 +179,7 @@ impl FileDescription for FileHandle {
}
ret => panic!("Unexpected return value from flock: {ret}"),
};
Ok(res)
interp_ok(res)
}
#[cfg(target_family = "windows")]
@ -231,7 +231,7 @@ impl FileDescription for FileHandle {
}
_ => panic!("Unexpected return value: {ret}"),
};
Ok(res)
interp_ok(res)
}
#[cfg(not(any(target_family = "unix", target_family = "windows")))]
@ -289,7 +289,7 @@ trait EvalContextExtPrivate<'tcx>: crate::MiriInterpCxExt<'tcx> {
&buf,
)?;
Ok(0)
interp_ok(0)
}
fn file_type_to_d_type(
@ -303,26 +303,30 @@ trait EvalContextExtPrivate<'tcx>: crate::MiriInterpCxExt<'tcx> {
match file_type {
Ok(file_type) => {
match () {
_ if file_type.is_dir() => Ok(this.eval_libc("DT_DIR").to_u8()?.into()),
_ if file_type.is_file() => Ok(this.eval_libc("DT_REG").to_u8()?.into()),
_ if file_type.is_symlink() => Ok(this.eval_libc("DT_LNK").to_u8()?.into()),
_ if file_type.is_dir() => interp_ok(this.eval_libc("DT_DIR").to_u8()?.into()),
_ if file_type.is_file() => interp_ok(this.eval_libc("DT_REG").to_u8()?.into()),
_ if file_type.is_symlink() =>
interp_ok(this.eval_libc("DT_LNK").to_u8()?.into()),
// Certain file types are only supported when the host is a Unix system.
#[cfg(unix)]
_ if file_type.is_block_device() =>
Ok(this.eval_libc("DT_BLK").to_u8()?.into()),
interp_ok(this.eval_libc("DT_BLK").to_u8()?.into()),
#[cfg(unix)]
_ if file_type.is_char_device() => Ok(this.eval_libc("DT_CHR").to_u8()?.into()),
_ if file_type.is_char_device() =>
interp_ok(this.eval_libc("DT_CHR").to_u8()?.into()),
#[cfg(unix)]
_ if file_type.is_fifo() => Ok(this.eval_libc("DT_FIFO").to_u8()?.into()),
_ if file_type.is_fifo() =>
interp_ok(this.eval_libc("DT_FIFO").to_u8()?.into()),
#[cfg(unix)]
_ if file_type.is_socket() => Ok(this.eval_libc("DT_SOCK").to_u8()?.into()),
_ if file_type.is_socket() =>
interp_ok(this.eval_libc("DT_SOCK").to_u8()?.into()),
// Fallback
_ => Ok(this.eval_libc("DT_UNKNOWN").to_u8()?.into()),
_ => interp_ok(this.eval_libc("DT_UNKNOWN").to_u8()?.into()),
}
}
Err(e) =>
match e.raw_os_error() {
Some(error) => Ok(error),
Some(error) => interp_ok(error),
None =>
throw_unsup_format!(
"the error {} couldn't be converted to a return value",
@ -524,7 +528,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// if the flag contains `O_TMPFILE` then we return a graceful error
let eopnotsupp = this.eval_libc("EOPNOTSUPP");
this.set_last_error(eopnotsupp)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
}
@ -545,7 +549,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if path.is_symlink() {
let eloop = this.eval_libc("ELOOP");
this.set_last_error(eloop)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
}
mirror |= o_nofollow;
@ -561,14 +565,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`open`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied.into())?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let fd = options
.open(path)
.map(|file| this.machine.fds.insert_new(FileHandle { file, writable }));
Ok(Scalar::from_i32(this.try_unwrap_io_result(fd)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(fd)?))
}
fn lseek64(&mut self, fd_num: i32, offset: i128, whence: i32) -> InterpResult<'tcx, Scalar> {
@ -581,7 +585,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Negative offsets return `EINVAL`.
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
return Ok(Scalar::from_i64(-1));
return interp_ok(Scalar::from_i64(-1));
} else {
SeekFrom::Start(u64::try_from(offset).unwrap())
}
@ -592,19 +596,19 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
} else {
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
return Ok(Scalar::from_i64(-1));
return interp_ok(Scalar::from_i64(-1));
};
let communicate = this.machine.communicate();
let Some(fd) = this.machine.fds.get(fd_num) else {
return Ok(Scalar::from_i64(this.fd_not_found()?));
return interp_ok(Scalar::from_i64(this.fd_not_found()?));
};
let result = fd.seek(communicate, seek_from)?.map(|offset| i64::try_from(offset).unwrap());
drop(fd);
let result = this.try_unwrap_io_result(result)?;
Ok(Scalar::from_i64(result))
interp_ok(Scalar::from_i64(result))
}
fn unlink(&mut self, path_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
@ -616,11 +620,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`unlink`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied.into())?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let result = remove_file(path).map(|_| 0);
Ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
}
fn symlink(
@ -647,11 +651,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`symlink`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied.into())?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let result = create_link(&target, &linkpath).map(|_| 0);
Ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
}
fn macos_fbsd_stat(
@ -673,16 +677,16 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.reject_in_isolation("`stat`", reject_with)?;
let eacc = this.eval_libc("EACCES");
this.set_last_error(eacc)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
// `stat` always follows symlinks.
let metadata = match FileMetadata::from_path(this, &path, true)? {
Some(metadata) => metadata,
None => return Ok(Scalar::from_i32(-1)), // `FileMetadata` has set errno
None => return interp_ok(Scalar::from_i32(-1)), // `FileMetadata` has set errno
};
Ok(Scalar::from_i32(this.macos_stat_write_buf(metadata, buf_op)?))
interp_ok(Scalar::from_i32(this.macos_stat_write_buf(metadata, buf_op)?))
}
// `lstat` is used to get symlink metadata.
@ -705,15 +709,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.reject_in_isolation("`lstat`", reject_with)?;
let eacc = this.eval_libc("EACCES");
this.set_last_error(eacc)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let metadata = match FileMetadata::from_path(this, &path, false)? {
Some(metadata) => metadata,
None => return Ok(Scalar::from_i32(-1)), // `FileMetadata` has set errno
None => return interp_ok(Scalar::from_i32(-1)), // `FileMetadata` has set errno
};
Ok(Scalar::from_i32(this.macos_stat_write_buf(metadata, buf_op)?))
interp_ok(Scalar::from_i32(this.macos_stat_write_buf(metadata, buf_op)?))
}
fn macos_fbsd_fstat(
@ -733,14 +737,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`fstat`", reject_with)?;
// Set error code as "EBADF" (bad fd)
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
}
let metadata = match FileMetadata::from_fd_num(this, fd)? {
Some(metadata) => metadata,
None => return Ok(Scalar::from_i32(-1)),
None => return interp_ok(Scalar::from_i32(-1)),
};
Ok(Scalar::from_i32(this.macos_stat_write_buf(metadata, buf_op)?))
interp_ok(Scalar::from_i32(this.macos_stat_write_buf(metadata, buf_op)?))
}
fn linux_statx(
@ -765,7 +769,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if this.ptr_is_null(statxbuf_ptr)? || this.ptr_is_null(pathname_ptr)? {
let efault = this.eval_libc("EFAULT");
this.set_last_error(efault)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let statxbuf = this.deref_pointer_as(statxbuf_op, this.libc_ty_layout("statx"))?;
@ -807,7 +811,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.eval_libc("EBADF")
};
this.set_last_error(ecode)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
// the `_mask_op` parameter specifies the file information that the caller requested.
@ -829,7 +833,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
};
let metadata = match metadata {
Some(metadata) => metadata,
None => return Ok(Scalar::from_i32(-1)),
None => return interp_ok(Scalar::from_i32(-1)),
};
// The `mode` field specifies the type of the file and the permissions over the file for
@ -848,25 +852,25 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
.accessed
.map(|tup| {
mask |= this.eval_libc_u32("STATX_ATIME");
InterpResult::Ok(tup)
interp_ok(tup)
})
.unwrap_or_else(|| Ok((0, 0)))?;
.unwrap_or_else(|| interp_ok((0, 0)))?;
let (created_sec, created_nsec) = metadata
.created
.map(|tup| {
mask |= this.eval_libc_u32("STATX_BTIME");
InterpResult::Ok(tup)
interp_ok(tup)
})
.unwrap_or_else(|| Ok((0, 0)))?;
.unwrap_or_else(|| interp_ok((0, 0)))?;
let (modified_sec, modified_nsec) = metadata
.modified
.map(|tup| {
mask |= this.eval_libc_u32("STATX_MTIME");
InterpResult::Ok(tup)
interp_ok(tup)
})
.unwrap_or_else(|| Ok((0, 0)))?;
.unwrap_or_else(|| interp_ok((0, 0)))?;
// Now we write everything to `statxbuf`. We write a zero for the unavailable fields.
this.write_int_fields_named(
@ -922,7 +926,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
&this.project_field_named(&statxbuf, "stx_mtime")?,
)?;
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
fn rename(
@ -938,7 +942,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if this.ptr_is_null(oldpath_ptr)? || this.ptr_is_null(newpath_ptr)? {
let efault = this.eval_libc("EFAULT");
this.set_last_error(efault)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let oldpath = this.read_path_from_c_str(oldpath_ptr)?;
@ -948,12 +952,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`rename`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied.into())?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let result = rename(oldpath, newpath).map(|_| 0);
Ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
}
fn mkdir(&mut self, path_op: &OpTy<'tcx>, mode_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
@ -972,7 +976,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`mkdir`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied.into())?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
#[cfg_attr(not(unix), allow(unused_mut))]
@ -988,7 +992,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let result = builder.create(path).map(|_| 0i32);
Ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
}
fn rmdir(&mut self, path_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
@ -1000,12 +1004,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`rmdir`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied.into())?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let result = remove_dir(path).map(|_| 0i32);
Ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
}
fn opendir(&mut self, name_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
@ -1018,7 +1022,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.reject_in_isolation("`opendir`", reject_with)?;
let eacc = this.eval_libc("EACCES");
this.set_last_error(eacc)?;
return Ok(Scalar::null_ptr(this));
return interp_ok(Scalar::null_ptr(this));
}
let result = read_dir(name);
@ -1030,11 +1034,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// The libc API for opendir says that this method returns a pointer to an opaque
// structure, but we are returning an ID number. Thus, pass it as a scalar of
// pointer width.
Ok(Scalar::from_target_usize(id, this))
interp_ok(Scalar::from_target_usize(id, this))
}
Err(e) => {
this.set_last_error_from_io_error(e)?;
Ok(Scalar::null_ptr(this))
interp_ok(Scalar::null_ptr(this))
}
}
}
@ -1051,7 +1055,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.reject_in_isolation("`readdir`", reject_with)?;
let eacc = this.eval_libc("EBADF");
this.set_last_error(eacc)?;
return Ok(Scalar::null_ptr(this));
return interp_ok(Scalar::null_ptr(this));
}
let open_dir = this.machine.dirs.streams.get_mut(&dirp).ok_or_else(|| {
@ -1129,7 +1133,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.deallocate_ptr(old_entry, None, MiriMemoryKind::Runtime.into())?;
}
Ok(Scalar::from_maybe_pointer(entry.unwrap_or_else(Pointer::null), this))
interp_ok(Scalar::from_maybe_pointer(entry.unwrap_or_else(Pointer::null), this))
}
fn macos_fbsd_readdir_r(
@ -1150,13 +1154,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`readdir_r`", reject_with)?;
// Set error code as "EBADF" (bad fd)
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
}
let open_dir = this.machine.dirs.streams.get_mut(&dirp).ok_or_else(|| {
err_unsup_format!("the DIR pointer passed to readdir_r did not come from opendir")
})?;
Ok(Scalar::from_i32(match open_dir.read_dir.next() {
interp_ok(Scalar::from_i32(match open_dir.read_dir.next() {
Some(Ok(dir_entry)) => {
// Write into entry, write pointer to result, return 0 on success.
// The name is written with write_os_str_to_c_str, while the rest of the
@ -1261,7 +1265,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let dirp = this.read_target_usize(dirp_op)?;
// Reject if isolation is enabled.
Ok(Scalar::from_i32(if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
interp_ok(Scalar::from_i32(
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`closedir`", reject_with)?;
this.fd_not_found()?
} else if let Some(open_dir) = this.machine.dirs.streams.remove(&dirp) {
@ -1272,7 +1277,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
0
} else {
this.fd_not_found()?
}))
},
))
}
fn ftruncate64(&mut self, fd_num: i32, length: i128) -> InterpResult<'tcx, Scalar> {
@ -1282,11 +1288,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`ftruncate64`", reject_with)?;
// Set error code as "EBADF" (bad fd)
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
}
let Some(fd) = this.machine.fds.get(fd_num) else {
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
};
// FIXME: Support ftruncate64 for all FDs
@ -1299,19 +1305,19 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let result = file.set_len(length);
drop(fd);
let result = this.try_unwrap_io_result(result.map(|_| 0i32))?;
Ok(Scalar::from_i32(result))
interp_ok(Scalar::from_i32(result))
} else {
drop(fd);
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
Ok(Scalar::from_i32(-1))
interp_ok(Scalar::from_i32(-1))
}
} else {
drop(fd);
// The file is not writable
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
Ok(Scalar::from_i32(-1))
interp_ok(Scalar::from_i32(-1))
}
}
@ -1329,7 +1335,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`fsync`", reject_with)?;
// Set error code as "EBADF" (bad fd)
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
}
self.ffullsync_fd(fd)
@ -1338,7 +1344,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn ffullsync_fd(&mut self, fd_num: i32) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let Some(fd) = this.machine.fds.get(fd_num) else {
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
};
// Only regular files support synchronization.
let FileHandle { file, writable } = fd.downcast::<FileHandle>().ok_or_else(|| {
@ -1346,7 +1352,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
})?;
let io_result = maybe_sync_file(file, *writable, File::sync_all);
drop(fd);
Ok(Scalar::from_i32(this.try_unwrap_io_result(io_result)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(io_result)?))
}
fn fdatasync(&mut self, fd_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
@ -1358,11 +1364,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`fdatasync`", reject_with)?;
// Set error code as "EBADF" (bad fd)
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
}
let Some(fd) = this.machine.fds.get(fd) else {
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
};
// Only regular files support synchronization.
let FileHandle { file, writable } = fd.downcast::<FileHandle>().ok_or_else(|| {
@ -1370,7 +1376,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
})?;
let io_result = maybe_sync_file(file, *writable, File::sync_data);
drop(fd);
Ok(Scalar::from_i32(this.try_unwrap_io_result(io_result)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(io_result)?))
}
fn sync_file_range(
@ -1390,7 +1396,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if offset < 0 || nbytes < 0 {
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let allowed_flags = this.eval_libc_i32("SYNC_FILE_RANGE_WAIT_BEFORE")
| this.eval_libc_i32("SYNC_FILE_RANGE_WRITE")
@ -1398,18 +1404,18 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if flags & allowed_flags != flags {
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
// Reject if isolation is enabled.
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`sync_file_range`", reject_with)?;
// Set error code as "EBADF" (bad fd)
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
}
let Some(fd) = this.machine.fds.get(fd) else {
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
};
// Only regular files support synchronization.
let FileHandle { file, writable } = fd.downcast::<FileHandle>().ok_or_else(|| {
@ -1417,7 +1423,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
})?;
let io_result = maybe_sync_file(file, *writable, File::sync_data);
drop(fd);
Ok(Scalar::from_i32(this.try_unwrap_io_result(io_result)?))
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(io_result)?))
}
fn readlink(
@ -1437,7 +1443,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.reject_in_isolation("`readlink`", reject_with)?;
let eacc = this.eval_libc("EACCES");
this.set_last_error(eacc)?;
return Ok(-1);
return interp_ok(-1);
}
let result = std::fs::read_link(pathname);
@ -1456,11 +1462,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
path_bytes = &path_bytes[..bufsize]
}
this.write_bytes_ptr(buf, path_bytes.iter().copied())?;
Ok(path_bytes.len().try_into().unwrap())
interp_ok(path_bytes.len().try_into().unwrap())
}
Err(e) => {
this.set_last_error_from_io_error(e)?;
Ok(-1)
interp_ok(-1)
}
}
}
@ -1472,7 +1478,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let fd = this.read_scalar(miri_fd)?.to_i32()?;
let error = if let Some(fd) = this.machine.fds.get(fd) {
if fd.is_tty(this.machine.communicate()) {
return Ok(Scalar::from_i32(1));
return interp_ok(Scalar::from_i32(1));
} else {
this.eval_libc("ENOTTY")
}
@ -1481,7 +1487,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.eval_libc("EBADF")
};
this.set_last_error(error)?;
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
fn realpath(
@ -1500,7 +1506,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.reject_in_isolation("`realpath`", reject_with)?;
let eacc = this.eval_libc("EACCES");
this.set_last_error(eacc)?;
return Ok(Scalar::from_target_usize(0, this));
return interp_ok(Scalar::from_target_usize(0, this));
}
let result = std::fs::canonicalize(pathname);
@ -1531,16 +1537,16 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// seems like a bit of a mess anyway: <https://eklitzke.org/path-max-is-tricky>.
let enametoolong = this.eval_libc("ENAMETOOLONG");
this.set_last_error(enametoolong)?;
return Ok(Scalar::from_target_usize(0, this));
return interp_ok(Scalar::from_target_usize(0, this));
}
processed_ptr
};
Ok(Scalar::from_maybe_pointer(dest, this))
interp_ok(Scalar::from_maybe_pointer(dest, this))
}
Err(e) => {
this.set_last_error_from_io_error(e)?;
Ok(Scalar::from_target_usize(0, this))
interp_ok(Scalar::from_target_usize(0, this))
}
}
}
@ -1575,7 +1581,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.reject_in_isolation("`mkstemp`", reject_with)?;
let eacc = this.eval_libc("EACCES");
this.set_last_error(eacc)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
// Get the bytes of the suffix we expect in _target_ encoding.
@ -1593,7 +1599,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if last_six_char_bytes != suffix_bytes {
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
// At this point we know we have 6 ASCII 'X' characters as a suffix.
@ -1648,7 +1654,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
match file {
Ok(f) => {
let fd = this.machine.fds.insert_new(FileHandle { file: f, writable: true });
return Ok(Scalar::from_i32(fd));
return interp_ok(Scalar::from_i32(fd));
}
Err(e) =>
match e.kind() {
@ -1659,7 +1665,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// "On error, -1 is returned, and errno is set to
// indicate the error"
this.set_last_error_from_io_error(e)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
},
}
@ -1668,7 +1674,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// We ran out of attempts to create the file, return an error.
let eexist = this.eval_libc("EEXIST");
this.set_last_error(eexist)?;
Ok(Scalar::from_i32(-1))
interp_ok(Scalar::from_i32(-1))
}
}
@ -1678,12 +1684,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn extract_sec_and_nsec<'tcx>(
time: std::io::Result<SystemTime>,
) -> InterpResult<'tcx, Option<(u64, u32)>> {
time.ok()
.map(|time| {
match time.ok() {
Some(time) => {
let duration = system_time_to_duration(&time)?;
Ok((duration.as_secs(), duration.subsec_nanos()))
})
.transpose()
interp_ok(Some((duration.as_secs(), duration.subsec_nanos())))
}
None => interp_ok(None),
}
}
/// Stores a file's metadata in order to avoid code duplication in the different metadata related
@ -1738,7 +1745,7 @@ impl FileMetadata {
Ok(metadata) => metadata,
Err(e) => {
ecx.set_last_error_from_io_error(e)?;
return Ok(None);
return interp_ok(None);
}
};
@ -1761,6 +1768,6 @@ impl FileMetadata {
let modified = extract_sec_and_nsec(metadata.modified())?;
// FIXME: Provide more fields using platform specific methods.
Ok(Some(FileMetadata { mode, size, created, accessed, modified }))
interp_ok(Some(FileMetadata { mode, size, created, accessed, modified }))
}
}

View File

@ -142,7 +142,7 @@ impl FileDescription for Epoll {
_communicate_allowed: bool,
_ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, io::Result<()>> {
Ok(Ok(()))
interp_ok(Ok(()))
}
}
@ -211,7 +211,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
epoll_instance.ready_list = Rc::new(RefCell::new(BTreeMap::new()));
let fd = this.machine.fds.insert_new(Epoll::default());
Ok(Scalar::from_i32(fd))
interp_ok(Scalar::from_i32(fd))
}
/// This function performs control operations on the `Epoll` instance referred to by the file
@ -263,12 +263,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if epfd_value == fd {
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
// Check if epfd is a valid epoll file descriptor.
let Some(epfd) = this.machine.fds.get(epfd_value) else {
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
};
let epoll_file_description = epfd
.downcast::<Epoll>()
@ -278,7 +278,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let ready_list = &epoll_file_description.ready_list;
let Some(fd_ref) = this.machine.fds.get(fd) else {
return Ok(Scalar::from_i32(this.fd_not_found()?));
return interp_ok(Scalar::from_i32(this.fd_not_found()?));
};
let id = fd_ref.get_id();
@ -330,13 +330,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if interest_list.contains_key(&epoll_key) {
let eexist = this.eval_libc("EEXIST");
this.set_last_error(eexist)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
} else {
if !interest_list.contains_key(&epoll_key) {
let enoent = this.eval_libc("ENOENT");
this.set_last_error(enoent)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
}
@ -364,7 +364,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Notification will be returned for current epfd if there is event in the file
// descriptor we registered.
check_and_update_one_event_interest(&fd_ref, interest, id, this)?;
return Ok(Scalar::from_i32(0));
return interp_ok(Scalar::from_i32(0));
} else if op == epoll_ctl_del {
let epoll_key = (id, fd);
@ -372,7 +372,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let Some(epoll_interest) = interest_list.remove(&epoll_key) else {
let enoent = this.eval_libc("ENOENT");
this.set_last_error(enoent)?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
};
// All related Weak<EpollEventInterest> will fail to upgrade after the drop.
drop(epoll_interest);
@ -390,9 +390,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
.unwrap()
.retain(|event| event.upgrade().is_some());
return Ok(Scalar::from_i32(0));
return interp_ok(Scalar::from_i32(0));
}
Ok(Scalar::from_i32(-1))
interp_ok(Scalar::from_i32(-1))
}
/// The `epoll_wait()` system call waits for events on the `Epoll`
@ -446,7 +446,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
this.write_int(-1, dest)?;
return Ok(());
return interp_ok(());
}
// This needs to come after the maxevents value check, or else maxevents.try_into().unwrap()
@ -459,7 +459,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let Some(epfd) = this.machine.fds.get(epfd_value) else {
let result_value: i32 = this.fd_not_found()?;
this.write_int(result_value, dest)?;
return Ok(());
return interp_ok(());
};
// Create a weak ref of epfd and pass it to callback so we will make sure that epfd
// is not close after the thread unblocks.
@ -508,7 +508,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
@unblock = |this| {
blocking_epoll_callback(epfd_value, weak_epfd, &dest, &event, this)?;
Ok(())
interp_ok(())
}
@timeout = |this| {
// No notification after blocking timeout.
@ -521,12 +521,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
.thread_id.borrow_mut()
.retain(|&id| id != this.active_thread());
this.write_int(0, &dest)?;
Ok(())
interp_ok(())
}
),
);
}
Ok(())
interp_ok(())
}
/// For a specific file description, get its ready events and update the corresponding ready
@ -577,7 +577,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
for thread_id in waiter {
this.unblock_thread(thread_id, BlockReason::Epoll)?;
}
Ok(())
interp_ok(())
}
}
@ -623,9 +623,9 @@ fn check_and_update_one_event_interest<'tcx>(
let event_instance = EpollEventInstance::new(flags, epoll_event_interest.data);
// Triggers the notification by inserting it to the ready list.
ready_list.insert(epoll_key, event_instance);
Ok(true)
interp_ok(true)
} else {
Ok(false)
interp_ok(false)
}
}
@ -665,5 +665,5 @@ fn blocking_epoll_callback<'tcx>(
}
}
ecx.write_int(num_of_events, dest)?;
Ok(())
interp_ok(())
}

View File

@ -37,7 +37,7 @@ impl FileDescription for Event {
// We only check the status of EPOLLIN and EPOLLOUT flags for eventfd. If other event flags
// need to be supported in the future, the check should be added here.
Ok(EpollReadyEvents {
interp_ok(EpollReadyEvents {
epollin: self.counter.get() != 0,
epollout: self.counter.get() != MAX_COUNTER,
..EpollReadyEvents::new()
@ -49,7 +49,7 @@ impl FileDescription for Event {
_communicate_allowed: bool,
_ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, io::Result<()>> {
Ok(Ok(()))
interp_ok(Ok(()))
}
/// Read the counter in the buffer and return the counter if succeeded.
@ -68,7 +68,7 @@ impl FileDescription for Event {
if len < ty.size.bytes_usize() {
ecx.set_last_error_from_io_error(Error::from(ErrorKind::InvalidInput))?;
ecx.write_int(-1, dest)?;
return Ok(());
return interp_ok(());
}
// eventfd read at the size of u64.
@ -80,7 +80,7 @@ impl FileDescription for Event {
if self.is_nonblock {
ecx.set_last_error_from_io_error(Error::from(ErrorKind::WouldBlock))?;
ecx.write_int(-1, dest)?;
return Ok(());
return interp_ok(());
}
throw_unsup_format!("eventfd: blocking is unsupported");
@ -100,7 +100,7 @@ impl FileDescription for Event {
ecx.write_int(buf_place.layout.size.bytes(), dest)?;
}
Ok(())
interp_ok(())
}
/// A write call adds the 8-byte integer value supplied in
@ -226,6 +226,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
clock: RefCell::new(VClock::default()),
});
Ok(Scalar::from_i32(fd_value))
interp_ok(Scalar::from_i32(fd_value))
}
}

View File

@ -134,7 +134,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// The only supported flags are GRND_RANDOM and GRND_NONBLOCK,
// neither of which have any effect on our current PRNG.
// See <https://github.com/rust-lang/rust/pull/79196> for a discussion of argument sizes.
let _flags = this.read_scalar(&args[3])?.to_i32();
let _flags = this.read_scalar(&args[3])?.to_i32()?;
this.gen_random(ptr, len)?;
this.write_scalar(Scalar::from_target_usize(len, this), dest)?;
@ -147,7 +147,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.handle_unsupported_foreign_item(format!(
"can't execute syscall with ID {id}"
))?;
return Ok(EmulateItemResult::AlreadyJumped);
return interp_ok(EmulateItemResult::AlreadyJumped);
}
}
}
@ -190,9 +190,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_null(dest)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
};
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -25,7 +25,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
#[allow(clippy::arithmetic_side_effects)] // PAGE_SIZE is nonzero
if old_address.addr().bytes() % this.machine.page_size != 0 || new_size == 0 {
this.set_last_error(this.eval_libc("EINVAL"))?;
return Ok(this.eval_libc("MAP_FAILED"));
return interp_ok(this.eval_libc("MAP_FAILED"));
}
if flags & this.eval_libc_i32("MREMAP_FIXED") != 0 {
@ -39,7 +39,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if flags & this.eval_libc_i32("MREMAP_MAYMOVE") == 0 {
// We only support MREMAP_MAYMOVE, so not passing the flag is just a failure
this.set_last_error(this.eval_libc("EINVAL"))?;
return Ok(this.eval_libc("MAP_FAILED"));
return interp_ok(this.eval_libc("MAP_FAILED"));
}
let align = this.machine.page_align();
@ -60,6 +60,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
.unwrap();
}
Ok(Scalar::from_pointer(ptr, this))
interp_ok(Scalar::from_pointer(ptr, this))
}
}

View File

@ -78,7 +78,7 @@ pub fn futex<'tcx>(
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
this.write_scalar(Scalar::from_target_isize(-1, this), dest)?;
return Ok(());
return interp_ok(());
}
let timeout = this.deref_pointer_as(&args[3], this.libc_ty_layout("timespec"))?;
@ -91,7 +91,7 @@ pub fn futex<'tcx>(
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
this.write_scalar(Scalar::from_target_isize(-1, this), dest)?;
return Ok(());
return interp_ok(());
}
};
let timeout_clock = if op & futex_realtime == futex_realtime {
@ -201,7 +201,7 @@ pub fn futex<'tcx>(
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
this.write_scalar(Scalar::from_target_isize(-1, this), dest)?;
return Ok(());
return interp_ok(());
}
// Together with the SeqCst fence in futex_wait, this makes sure that futex_wait
// will see the latest value on addr which could be changed by our caller
@ -221,5 +221,5 @@ pub fn futex<'tcx>(
op => throw_unsup_format!("Miri does not support `futex` syscall with op={}", op),
}
Ok(())
interp_ok(())
}

View File

@ -207,9 +207,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.os_unfair_lock_assert_not_owner(lock_op)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
};
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -20,7 +20,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// os_unfair_lock holds a 32-bit value, is initialized with zero and
// must be assumed to be opaque. Therefore, we can just store our
// internal mutex ID in the structure without anyone noticing.
this.mutex_get_or_create_id(&lock, 0, |_| Ok(None))
this.mutex_get_or_create_id(&lock, 0, |_| interp_ok(None))
}
}
@ -43,7 +43,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.mutex_lock(id);
}
Ok(())
interp_ok(())
}
fn os_unfair_lock_trylock(
@ -63,7 +63,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(Scalar::from_bool(true), dest)?;
}
Ok(())
interp_ok(())
}
fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
@ -77,7 +77,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
));
}
Ok(())
interp_ok(())
}
fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
@ -90,7 +90,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
));
}
Ok(())
interp_ok(())
}
fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
@ -103,6 +103,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
));
}
Ok(())
interp_ok(())
}
}

View File

@ -49,7 +49,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
&& matches!(&*this.tcx.sess.target.os, "macos" | "solaris" | "illumos")
&& (flags & map_fixed) != 0
{
return Ok(Scalar::from_maybe_pointer(Pointer::from_addr_invalid(addr), this));
return interp_ok(Scalar::from_maybe_pointer(Pointer::from_addr_invalid(addr), this));
}
let prot_read = this.eval_libc_i32("PROT_READ");
@ -58,11 +58,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// First, we do some basic argument validation as required by mmap
if (flags & (map_private | map_shared)).count_ones() != 1 {
this.set_last_error(this.eval_libc("EINVAL"))?;
return Ok(this.eval_libc("MAP_FAILED"));
return interp_ok(this.eval_libc("MAP_FAILED"));
}
if length == 0 {
this.set_last_error(this.eval_libc("EINVAL"))?;
return Ok(this.eval_libc("MAP_FAILED"));
return interp_ok(this.eval_libc("MAP_FAILED"));
}
// If a user tries to map a file, we want to loudly inform them that this is not going
@ -104,11 +104,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let align = this.machine.page_align();
let Some(map_length) = length.checked_next_multiple_of(this.machine.page_size) else {
this.set_last_error(this.eval_libc("EINVAL"))?;
return Ok(this.eval_libc("MAP_FAILED"));
return interp_ok(this.eval_libc("MAP_FAILED"));
};
if map_length > this.target_usize_max() {
this.set_last_error(this.eval_libc("EINVAL"))?;
return Ok(this.eval_libc("MAP_FAILED"));
return interp_ok(this.eval_libc("MAP_FAILED"));
}
let ptr =
@ -121,7 +121,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
)
.unwrap();
Ok(Scalar::from_pointer(ptr, this))
interp_ok(Scalar::from_pointer(ptr, this))
}
fn munmap(&mut self, addr: &OpTy<'tcx>, length: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
@ -135,16 +135,16 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
#[allow(clippy::arithmetic_side_effects)] // PAGE_SIZE is nonzero
if addr.addr().bytes() % this.machine.page_size != 0 {
this.set_last_error(this.eval_libc("EINVAL"))?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
}
let Some(length) = length.checked_next_multiple_of(this.machine.page_size) else {
this.set_last_error(this.eval_libc("EINVAL"))?;
return Ok(Scalar::from_i32(-1));
return interp_ok(Scalar::from_i32(-1));
};
if length > this.target_usize_max() {
this.set_last_error(this.eval_libc("EINVAL"))?;
return Ok(this.eval_libc("MAP_FAILED"));
return interp_ok(this.eval_libc("MAP_FAILED"));
}
let length = Size::from_bytes(length);
@ -154,6 +154,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
MemoryKind::Machine(MiriMemoryKind::Mmap),
)?;
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
}

View File

@ -99,8 +99,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_null(dest)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -10,7 +10,7 @@ use crate::*;
#[inline]
fn mutexattr_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
Ok(match &*ecx.tcx.sess.target.os {
interp_ok(match &*ecx.tcx.sess.target.os {
"linux" | "illumos" | "solaris" | "macos" | "freebsd" | "android" => 0,
os => throw_unsup_format!("`pthread_mutexattr` is not supported on {os}"),
})
@ -112,7 +112,7 @@ fn mutex_id_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
}
}
Ok(offset)
interp_ok(offset)
}
/// Eagerly create and initialize a new mutex.
@ -125,7 +125,7 @@ fn mutex_create<'tcx>(
let address = mutex.ptr().addr().bytes();
let data = Box::new(AdditionalMutexData { address, kind });
ecx.mutex_create(&mutex, mutex_id_offset(ecx)?, Some(data))?;
Ok(())
interp_ok(())
}
/// Returns the `MutexId` of the mutex stored at `mutex_op`.
@ -144,7 +144,7 @@ fn mutex_get_id<'tcx>(
// an ID yet. We have to determine the mutex kind from the static initializer.
let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
Ok(Some(Box::new(AdditionalMutexData { kind, address })))
interp_ok(Some(Box::new(AdditionalMutexData { kind, address })))
})?;
// Check that the mutex has not been moved since last use.
@ -155,7 +155,7 @@ fn mutex_get_id<'tcx>(
throw_ub_format!("pthread_mutex_t can't be moved after first use")
}
Ok(id)
interp_ok(id)
}
/// Returns the kind of a static initializer.
@ -163,7 +163,7 @@ fn mutex_kind_from_static_initializer<'tcx>(
ecx: &MiriInterpCx<'tcx>,
mutex: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, MutexKind> {
Ok(match &*ecx.tcx.sess.target.os {
interp_ok(match &*ecx.tcx.sess.target.os {
// Only linux has static initializers other than PTHREAD_MUTEX_DEFAULT.
"linux" => {
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
@ -186,7 +186,7 @@ fn mutex_translate_kind<'tcx>(
ecx: &MiriInterpCx<'tcx>,
kind: i32,
) -> InterpResult<'tcx, MutexKind> {
Ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
MutexKind::Normal
} else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
MutexKind::ErrorCheck
@ -237,7 +237,7 @@ fn rwlock_id_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
);
}
Ok(offset)
interp_ok(offset)
}
fn rwlock_get_id<'tcx>(
@ -248,7 +248,7 @@ fn rwlock_get_id<'tcx>(
let address = rwlock.ptr().addr().bytes();
let id = ecx.rwlock_get_or_create_id(&rwlock, rwlock_id_offset(ecx)?, |_| {
Ok(Some(Box::new(AdditionalRwLockData { address })))
interp_ok(Some(Box::new(AdditionalRwLockData { address })))
})?;
// Check that the rwlock has not been moved since last use.
@ -259,7 +259,7 @@ fn rwlock_get_id<'tcx>(
throw_ub_format!("pthread_rwlock_t can't be moved after first use")
}
Ok(id)
interp_ok(id)
}
// pthread_condattr_t.
@ -268,7 +268,7 @@ fn rwlock_get_id<'tcx>(
#[inline]
fn condattr_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
Ok(match &*ecx.tcx.sess.target.os {
interp_ok(match &*ecx.tcx.sess.target.os {
"linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
// macOS does not have a clock attribute.
os => throw_unsup_format!("`pthread_condattr` clock field is not supported on {os}"),
@ -292,7 +292,7 @@ fn cond_translate_clock_id<'tcx>(
ecx: &MiriInterpCx<'tcx>,
raw_id: i32,
) -> InterpResult<'tcx, ClockId> {
Ok(if raw_id == ecx.eval_libc_i32("CLOCK_REALTIME") {
interp_ok(if raw_id == ecx.eval_libc_i32("CLOCK_REALTIME") {
ClockId::Realtime
} else if raw_id == ecx.eval_libc_i32("CLOCK_MONOTONIC") {
ClockId::Monotonic
@ -342,7 +342,7 @@ fn cond_id_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
);
}
Ok(offset)
interp_ok(offset)
}
#[derive(Debug, Clone, Copy)]
@ -369,7 +369,7 @@ fn cond_get_id<'tcx>(
let address = cond.ptr().addr().bytes();
let id = ecx.condvar_get_or_create_id(&cond, cond_id_offset(ecx)?, |_ecx| {
// This used the static initializer. The clock there is always CLOCK_REALTIME.
Ok(Some(Box::new(AdditionalCondData { address, clock_id: ClockId::Realtime })))
interp_ok(Some(Box::new(AdditionalCondData { address, clock_id: ClockId::Realtime })))
})?;
// Check that the mutex has not been moved since last use.
@ -380,7 +380,7 @@ fn cond_get_id<'tcx>(
throw_ub_format!("pthread_cond_t can't be moved after first use")
}
Ok(id)
interp_ok(id)
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
@ -390,7 +390,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
mutexattr_set_kind(this, attr_op, PTHREAD_MUTEX_KIND_UNCHANGED)?;
Ok(())
interp_ok(())
}
fn pthread_mutexattr_settype(
@ -411,10 +411,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
mutexattr_set_kind(this, attr_op, kind)?;
} else {
let einval = this.eval_libc_i32("EINVAL");
return Ok(Scalar::from_i32(einval));
return interp_ok(Scalar::from_i32(einval));
}
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
fn pthread_mutexattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
@ -439,7 +439,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
&this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_mutexattr_t"))?,
)?;
Ok(())
interp_ok(())
}
fn pthread_mutex_init(
@ -458,7 +458,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
mutex_create(this, mutex_op, kind)?;
Ok(())
interp_ok(())
}
fn pthread_mutex_lock(
@ -478,7 +478,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let owner_thread = this.mutex_get_owner(id);
if owner_thread != this.active_thread() {
this.mutex_enqueue_and_block(id, Some((Scalar::from_i32(0), dest.clone())));
return Ok(());
return interp_ok(());
} else {
// Trying to acquire the same mutex again.
match kind {
@ -498,7 +498,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
0
};
this.write_scalar(Scalar::from_i32(ret), dest)?;
Ok(())
interp_ok(())
}
fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
@ -510,7 +510,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
.expect("data should always exist for pthread mutexes")
.kind;
Ok(Scalar::from_i32(if this.mutex_is_locked(id) {
interp_ok(Scalar::from_i32(if this.mutex_is_locked(id) {
let owner_thread = this.mutex_get_owner(id);
if owner_thread != this.active_thread() {
this.eval_libc_i32("EBUSY")
@ -542,7 +542,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let Some(_old_locked_count) = this.mutex_unlock(id)? {
// The mutex was locked by the current thread.
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
} else {
// The mutex was locked by another thread or not locked at all. See
// the “Unlock When Not Owner” column in
@ -557,7 +557,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
),
MutexKind::ErrorCheck | MutexKind::Recursive =>
Ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
interp_ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
}
}
}
@ -579,7 +579,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
)?;
// FIXME: delete interpreter state associated with this mutex.
Ok(())
interp_ok(())
}
fn pthread_rwlock_rdlock(
@ -598,7 +598,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_null(dest)?;
}
Ok(())
interp_ok(())
}
fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
@ -607,10 +607,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let id = rwlock_get_id(this, rwlock_op)?;
if this.rwlock_is_write_locked(id) {
Ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
} else {
this.rwlock_reader_lock(id);
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
}
@ -642,7 +642,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_null(dest)?;
}
Ok(())
interp_ok(())
}
fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
@ -651,10 +651,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let id = rwlock_get_id(this, rwlock_op)?;
if this.rwlock_is_locked(id) {
Ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
} else {
this.rwlock_writer_lock(id);
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
}
@ -665,7 +665,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
#[allow(clippy::if_same_then_else)]
if this.rwlock_reader_unlock(id)? || this.rwlock_writer_unlock(id)? {
Ok(())
interp_ok(())
} else {
throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
}
@ -688,7 +688,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
)?;
// FIXME: delete interpreter state associated with this rwlock.
Ok(())
interp_ok(())
}
fn pthread_condattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
@ -703,7 +703,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
condattr_set_clock_id(this, attr_op, default_clock_id)?;
}
Ok(())
interp_ok(())
}
fn pthread_condattr_setclock(
@ -720,10 +720,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
condattr_set_clock_id(this, attr_op, clock_id)?;
} else {
let einval = this.eval_libc_i32("EINVAL");
return Ok(Scalar::from_i32(einval));
return interp_ok(Scalar::from_i32(einval));
}
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
fn pthread_condattr_getclock(
@ -736,7 +736,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let clock_id = condattr_get_clock_id(this, attr_op)?;
this.write_scalar(Scalar::from_i32(clock_id), &this.deref_pointer(clk_id_op)?)?;
Ok(())
interp_ok(())
}
fn pthread_condattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
@ -754,7 +754,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
&this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_condattr_t"))?,
)?;
Ok(())
interp_ok(())
}
fn pthread_cond_init(
@ -781,21 +781,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Some(Box::new(AdditionalCondData { address, clock_id })),
)?;
Ok(())
interp_ok(())
}
fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let id = cond_get_id(this, cond_op)?;
this.condvar_signal(id)?;
Ok(())
interp_ok(())
}
fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let id = cond_get_id(this, cond_op)?;
while this.condvar_signal(id)? {}
Ok(())
interp_ok(())
}
fn pthread_cond_wait(
@ -818,7 +818,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
dest.clone(),
)?;
Ok(())
interp_ok(())
}
fn pthread_cond_timedwait(
@ -845,7 +845,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
None => {
let einval = this.eval_libc("EINVAL");
this.write_scalar(einval, dest)?;
return Ok(());
return interp_ok(());
}
};
let timeout_clock = match clock_id {
@ -865,7 +865,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
dest.clone(),
)?;
Ok(())
interp_ok(())
}
fn pthread_cond_destroy(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
@ -882,6 +882,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_uninit(&this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?)?;
// FIXME: delete interpreter state associated with this condvar.
Ok(())
interp_ok(())
}
}

View File

@ -27,7 +27,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.machine.layouts.mut_raw_ptr,
)?;
Ok(())
interp_ok(())
}
fn pthread_join(&mut self, thread: &OpTy<'tcx>, retval: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
@ -41,7 +41,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let thread_id = this.read_scalar(thread)?.to_int(this.libc_ty_layout("pthread_t").size)?;
this.join_thread_exclusive(thread_id.try_into().expect("thread ID should fit in u32"))?;
Ok(())
interp_ok(())
}
fn pthread_detach(&mut self, thread: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
@ -53,14 +53,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/*allow_terminated_joined*/ false,
)?;
Ok(())
interp_ok(())
}
fn pthread_self(&mut self) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let thread_id = this.active_thread();
Ok(Scalar::from_uint(thread_id.to_u32(), this.libc_ty_layout("pthread_t").size))
interp_ok(Scalar::from_uint(thread_id.to_u32(), this.libc_ty_layout("pthread_t").size))
}
/// Set the name of the current thread. `max_name_len` is the maximal length of the name
@ -81,12 +81,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Comparing with `>=` to account for null terminator.
if name.len() >= max_name_len {
return Ok(this.eval_libc("ERANGE"));
return interp_ok(this.eval_libc("ERANGE"));
}
this.set_thread_name(thread, name);
Ok(Scalar::from_u32(0))
interp_ok(Scalar::from_u32(0))
}
fn pthread_getname_np(
@ -106,7 +106,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let name = this.get_thread_name(thread).unwrap_or(b"<unnamed>").to_owned();
let (success, _written) = this.write_c_str(&name, name_out, len)?;
Ok(if success { Scalar::from_u32(0) } else { this.eval_libc("ERANGE") })
interp_ok(if success { Scalar::from_u32(0) } else { this.eval_libc("ERANGE") })
}
fn sched_yield(&mut self) -> InterpResult<'tcx, ()> {
@ -114,6 +114,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.yield_active_thread();
Ok(())
interp_ok(())
}
}

View File

@ -103,7 +103,7 @@ impl FileDescription for AnonSocket {
epoll_ready_events.epollerr = true;
}
}
Ok(epoll_ready_events)
interp_ok(epoll_ready_events)
}
fn close<'tcx>(
@ -122,7 +122,7 @@ impl FileDescription for AnonSocket {
// Notify peer fd that close has happened, since that can unblock reads and writes.
ecx.check_and_update_readiness(&peer_fd)?;
}
Ok(Ok(()))
interp_ok(Ok(()))
}
fn read<'tcx>(
@ -344,7 +344,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(sv0, &sv)?;
this.write_scalar(sv1, &sv.offset(sv.layout.size, sv.layout, this)?)?;
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
fn pipe2(
@ -396,6 +396,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(pipefd0, &pipefd)?;
this.write_scalar(pipefd1, &pipefd.offset(pipefd.layout.size, pipefd.layout, this)?)?;
Ok(Scalar::from_i32(0))
interp_ok(Scalar::from_i32(0))
}
}

View File

@ -33,8 +33,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_pointer(res, dest)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -24,12 +24,12 @@ impl WindowsEnvVars {
_ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
env_vars: FxHashMap<OsString, OsString>,
) -> InterpResult<'tcx, Self> {
Ok(Self { map: env_vars })
interp_ok(Self { map: env_vars })
}
/// Implementation detail for [`InterpCx::get_env_var`].
pub(crate) fn get<'tcx>(&self, name: &OsStr) -> InterpResult<'tcx, Option<OsString>> {
Ok(self.map.get(name).cloned())
interp_ok(self.map.get(name).cloned())
}
}
@ -52,7 +52,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let buf_size = this.read_scalar(size_op)?.to_u32()?; // in characters
let name = this.read_os_str_from_wide_str(name_ptr)?;
Ok(match this.machine.env_vars.windows().map.get(&name).cloned() {
interp_ok(match this.machine.env_vars.windows().map.get(&name).cloned() {
Some(val) => {
Scalar::from_u32(windows_check_buffer_size(this.write_os_str_to_wide_str(
&val,
@ -89,7 +89,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let envblock_ptr =
this.alloc_os_str_as_wide_str(&env_vars, MiriMemoryKind::Runtime.into())?;
// If the function succeeds, the return value is a pointer to the environment block of the current process.
Ok(envblock_ptr)
interp_ok(envblock_ptr)
}
#[allow(non_snake_case)]
@ -100,7 +100,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let env_block_ptr = this.read_pointer(env_block_op)?;
this.deallocate_ptr(env_block_ptr, None, MiriMemoryKind::Runtime.into())?;
// If the function succeeds, the return value is nonzero.
Ok(Scalar::from_i32(1))
interp_ok(Scalar::from_i32(1))
}
#[allow(non_snake_case)]
@ -128,11 +128,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
} else if this.ptr_is_null(value_ptr)? {
// Delete environment variable `{name}` if it exists.
this.machine.env_vars.windows_mut().map.remove(&name);
Ok(this.eval_windows("c", "TRUE"))
interp_ok(this.eval_windows("c", "TRUE"))
} else {
let value = this.read_os_str_from_wide_str(value_ptr)?;
this.machine.env_vars.windows_mut().map.insert(name, value);
Ok(this.eval_windows("c", "TRUE"))
interp_ok(this.eval_windows("c", "TRUE"))
}
}
@ -151,7 +151,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`GetCurrentDirectoryW`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied.into())?;
return Ok(Scalar::from_u32(0));
return interp_ok(Scalar::from_u32(0));
}
// If we cannot get the current directory, we return 0
@ -159,13 +159,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Ok(cwd) => {
// This can in fact return 0. It is up to the caller to set last_error to 0
// beforehand and check it afterwards to exclude that case.
return Ok(Scalar::from_u32(windows_check_buffer_size(
return interp_ok(Scalar::from_u32(windows_check_buffer_size(
this.write_path_to_wide_str(&cwd, buf, size)?,
)));
}
Err(e) => this.set_last_error_from_io_error(e)?,
}
Ok(Scalar::from_u32(0))
interp_ok(Scalar::from_u32(0))
}
#[allow(non_snake_case)]
@ -184,14 +184,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.reject_in_isolation("`SetCurrentDirectoryW`", reject_with)?;
this.set_last_error_from_io_error(ErrorKind::PermissionDenied.into())?;
return Ok(this.eval_windows("c", "FALSE"));
return interp_ok(this.eval_windows("c", "FALSE"));
}
match env::set_current_dir(path) {
Ok(()) => Ok(this.eval_windows("c", "TRUE")),
Ok(()) => interp_ok(this.eval_windows("c", "TRUE")),
Err(e) => {
this.set_last_error_from_io_error(e)?;
Ok(this.eval_windows("c", "FALSE"))
interp_ok(this.eval_windows("c", "FALSE"))
}
}
}
@ -201,7 +201,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
this.assert_target_os("windows", "GetCurrentProcessId");
Ok(Scalar::from_u32(this.get_pid()))
interp_ok(Scalar::from_u32(this.get_pid()))
}
#[allow(non_snake_case)]
@ -227,7 +227,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
// See <https://learn.microsoft.com/en-us/windows/win32/api/userenv/nf-userenv-getuserprofiledirectoryw> for docs.
Ok(match directories::UserDirs::new() {
interp_ok(match directories::UserDirs::new() {
Some(dirs) => {
let home = dirs.home_dir();
let size_avail = if this.ptr_is_null(size.ptr())? {

View File

@ -22,7 +22,7 @@ pub fn is_dyn_sym(name: &str) -> bool {
#[cfg(windows)]
fn win_absolute<'tcx>(path: &Path) -> InterpResult<'tcx, io::Result<PathBuf>> {
// We are on Windows so we can simply let the host do this.
Ok(path::absolute(path))
interp_ok(path::absolute(path))
}
#[cfg(unix)]
@ -33,7 +33,7 @@ fn win_absolute<'tcx>(path: &Path) -> InterpResult<'tcx, io::Result<PathBuf>> {
// If it starts with `//` (these were backslashes but are already converted)
// then this is a magic special path, we just leave it unchanged.
if bytes.get(0).copied() == Some(b'/') && bytes.get(1).copied() == Some(b'/') {
return Ok(Ok(path.into()));
return interp_ok(Ok(path.into()));
};
// Special treatment for Windows' magic filenames: they are treated as being relative to `\\.\`.
let magic_filenames = &[
@ -43,7 +43,7 @@ fn win_absolute<'tcx>(path: &Path) -> InterpResult<'tcx, io::Result<PathBuf>> {
if magic_filenames.iter().any(|m| m.as_bytes() == bytes) {
let mut result: Vec<u8> = br"//./".into();
result.extend(bytes);
return Ok(Ok(bytes_to_os_str(&result)?.into()));
return interp_ok(Ok(bytes_to_os_str(&result)?.into()));
}
// Otherwise we try to do something kind of close to what Windows does, but this is probably not
// right in all cases. We iterate over the components between `/`, and remove trailing `.`,
@ -71,7 +71,7 @@ fn win_absolute<'tcx>(path: &Path) -> InterpResult<'tcx, io::Result<PathBuf>> {
}
}
// Let the host `absolute` function do working-dir handling
Ok(path::absolute(bytes_to_os_str(&result)?))
interp_ok(path::absolute(bytes_to_os_str(&result)?))
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
@ -769,12 +769,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// This function looks and behaves excatly like miri_start_unwind.
let [payload] = this.check_shim(abi, Abi::C { unwind: true }, link_name, args)?;
this.handle_miri_start_unwind(payload)?;
return Ok(EmulateItemResult::NeedsUnwind);
return interp_ok(EmulateItemResult::NeedsUnwind);
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -139,10 +139,10 @@ impl Handle {
signed_handle as u32
} else {
// if a handle doesn't fit in an i32, it isn't valid.
return Ok(None);
return interp_ok(None);
};
Ok(Self::from_packed(handle))
interp_ok(Self::from_packed(handle))
}
}
@ -167,6 +167,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
_ => this.invalid_handle("CloseHandle")?,
}
Ok(())
interp_ok(())
}
}

View File

@ -24,7 +24,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, bool> {
let this = self.eval_context_mut();
Ok(match this.init_once_status(id) {
interp_ok(match this.init_once_status(id) {
InitOnceStatus::Uninitialized => {
this.init_once_begin(id);
this.write_scalar(this.eval_windows("c", "TRUE"), pending_place)?;
@ -70,7 +70,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if this.init_once_try_begin(id, &pending_place, dest)? {
// Done!
return Ok(());
return interp_ok(());
}
// We have to block, and then try again when we are woken up.
@ -86,11 +86,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
@unblock = |this| {
let ret = this.init_once_try_begin(id, &pending_place, &dest)?;
assert!(ret, "we were woken up but init_once_try_begin still failed");
Ok(())
interp_ok(())
}
),
);
Ok(())
interp_ok(())
}
fn InitOnceComplete(
@ -130,7 +130,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.init_once_fail(id)?;
}
Ok(this.eval_windows("c", "TRUE"))
interp_ok(this.eval_windows("c", "TRUE"))
}
fn WaitOnAddress(
@ -154,7 +154,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let invalid_param = this.eval_windows("c", "ERROR_INVALID_PARAMETER");
this.set_last_error(invalid_param)?;
this.write_scalar(Scalar::from_i32(0), dest)?;
return Ok(());
return interp_ok(());
};
let size = Size::from_bytes(size);
@ -188,7 +188,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(Scalar::from_i32(1), dest)?;
Ok(())
interp_ok(())
}
fn WakeByAddressSingle(&mut self, ptr_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
@ -202,7 +202,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let addr = ptr.addr().bytes();
this.futex_wake(addr, u32::MAX)?;
Ok(())
interp_ok(())
}
fn WakeByAddressAll(&mut self, ptr_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
@ -215,6 +215,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let addr = ptr.addr().bytes();
while this.futex_wake(addr, u32::MAX)? {}
Ok(())
interp_ok(())
}
}

View File

@ -79,6 +79,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.join_thread(thread)?;
Ok(0)
interp_ok(0)
}
}

View File

@ -123,9 +123,9 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
// TODO: Implement the `llvm.x86.aesni.aeskeygenassist` when possible
// with an external crate.
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}
@ -162,5 +162,5 @@ fn aes_round<'tcx>(
this.write_scalar(Scalar::from_u128(res), &dest)?;
}
Ok(())
interp_ok(())
}

View File

@ -348,8 +348,8 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// The only thing that needs to be ensured is the correct calling convention.
let [] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -435,8 +435,8 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
shift_simd_by_simd(this, left, right, which, dest)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -30,7 +30,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.expect_target_feature_for_intrinsic(link_name, target_feature)?;
if is_64_bit && this.tcx.sess.target.arch != "x86_64" {
return Ok(EmulateItemResult::NotSupported);
return interp_ok(EmulateItemResult::NotSupported);
}
let [left, right] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
@ -93,7 +93,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
result
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
};
let result = if is_64_bit {
@ -103,6 +103,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
};
this.write_scalar(result, dest)?;
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -43,7 +43,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// https://www.intel.com/content/www/us/en/docs/cpp-compiler/developer-guide-reference/2021-8/subborrow-u32-subborrow-u64.html
"addcarry.32" | "addcarry.64" | "subborrow.32" | "subborrow.64" => {
if unprefixed_name.ends_with("64") && this.tcx.sess.target.arch != "x86_64" {
return Ok(EmulateItemResult::NotSupported);
return interp_ok(EmulateItemResult::NotSupported);
}
let [cb_in, a, b] = this.check_shim(abi, Abi::Unadjusted, link_name, args)?;
@ -67,7 +67,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let is_u64 = unprefixed_name.ends_with("64");
if is_u64 && this.tcx.sess.target.arch != "x86_64" {
return Ok(EmulateItemResult::NotSupported);
return interp_ok(EmulateItemResult::NotSupported);
}
let [c_in, a, b, out] = this.check_shim(abi, Abi::Unadjusted, link_name, args)?;
@ -157,9 +157,9 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
);
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}
@ -244,7 +244,7 @@ impl FloatBinOp {
this.expect_target_feature_for_intrinsic(intrinsic, "avx")?;
unord = !unord;
}
Ok(Self::Cmp { gt, lt, eq, unord })
interp_ok(Self::Cmp { gt, lt, eq, unord })
}
}
@ -266,7 +266,7 @@ fn bin_op_float<'tcx, F: rustc_apfloat::Float>(
Some(std::cmp::Ordering::Equal) => eq,
Some(std::cmp::Ordering::Greater) => gt,
};
Ok(bool_to_simd_element(res, Size::from_bits(F::BITS)))
interp_ok(bool_to_simd_element(res, Size::from_bits(F::BITS)))
}
FloatBinOp::Min => {
let left_scalar = left.to_scalar();
@ -280,9 +280,9 @@ fn bin_op_float<'tcx, F: rustc_apfloat::Float>(
|| right.is_nan()
|| left >= right
{
Ok(right_scalar)
interp_ok(right_scalar)
} else {
Ok(left_scalar)
interp_ok(left_scalar)
}
}
FloatBinOp::Max => {
@ -297,9 +297,9 @@ fn bin_op_float<'tcx, F: rustc_apfloat::Float>(
|| right.is_nan()
|| left <= right
{
Ok(right_scalar)
interp_ok(right_scalar)
} else {
Ok(left_scalar)
interp_ok(left_scalar)
}
}
}
@ -332,7 +332,7 @@ fn bin_op_simd_float_first<'tcx, F: rustc_apfloat::Float>(
this.copy_op(&this.project_index(&left, i)?, &this.project_index(&dest, i)?)?;
}
Ok(())
interp_ok(())
}
/// Performs `which` operation on each component of `left` and
@ -360,7 +360,7 @@ fn bin_op_simd_float_all<'tcx, F: rustc_apfloat::Float>(
this.write_scalar(res, &dest)?;
}
Ok(())
interp_ok(())
}
#[derive(Copy, Clone)]
@ -391,7 +391,7 @@ fn unary_op_f32<'tcx>(
// Apply a relative error with a magnitude on the order of 2^-12 to simulate the
// inaccuracy of RCP.
let res = apply_random_float_error(this, div, -12);
Ok(Scalar::from_f32(res))
interp_ok(Scalar::from_f32(res))
}
FloatUnaryOp::Rsqrt => {
let op = op.to_scalar().to_u32()?;
@ -401,7 +401,7 @@ fn unary_op_f32<'tcx>(
// Apply a relative error with a magnitude on the order of 2^-12 to simulate the
// inaccuracy of RSQRT.
let res = apply_random_float_error(this, rsqrt, -12);
Ok(Scalar::from_f32(res))
interp_ok(Scalar::from_f32(res))
}
}
}
@ -442,7 +442,7 @@ fn unary_op_ss<'tcx>(
this.copy_op(&this.project_index(&op, i)?, &this.project_index(&dest, i)?)?;
}
Ok(())
interp_ok(())
}
/// Performs `which` operation on each component of `op`, storing the
@ -466,7 +466,7 @@ fn unary_op_ps<'tcx>(
this.write_scalar(res, &dest)?;
}
Ok(())
interp_ok(())
}
enum ShiftOp {
@ -532,7 +532,7 @@ fn shift_simd_by_scalar<'tcx>(
this.write_scalar(res, &dest)?;
}
Ok(())
interp_ok(())
}
/// Shifts each element of `left` by the corresponding element of `right`.
@ -587,7 +587,7 @@ fn shift_simd_by_simd<'tcx>(
this.write_scalar(res, &dest)?;
}
Ok(())
interp_ok(())
}
/// Takes a 128-bit vector, transmutes it to `[u64; 2]` and extracts
@ -633,7 +633,7 @@ fn round_first<'tcx, F: rustc_apfloat::Float>(
this.copy_op(&this.project_index(&left, i)?, &this.project_index(&dest, i)?)?;
}
Ok(())
interp_ok(())
}
// Rounds all elements of `op` according to `rounding`.
@ -659,7 +659,7 @@ fn round_all<'tcx, F: rustc_apfloat::Float>(
)?;
}
Ok(())
interp_ok(())
}
/// Gets equivalent `rustc_apfloat::Round` from rounding mode immediate of
@ -671,14 +671,14 @@ fn rounding_from_imm<'tcx>(rounding: i32) -> InterpResult<'tcx, rustc_apfloat::R
match rounding & !0b1000 {
// When the third bit is 0, the rounding mode is determined by the
// first two bits.
0b000 => Ok(rustc_apfloat::Round::NearestTiesToEven),
0b001 => Ok(rustc_apfloat::Round::TowardNegative),
0b010 => Ok(rustc_apfloat::Round::TowardPositive),
0b011 => Ok(rustc_apfloat::Round::TowardZero),
0b000 => interp_ok(rustc_apfloat::Round::NearestTiesToEven),
0b001 => interp_ok(rustc_apfloat::Round::TowardNegative),
0b010 => interp_ok(rustc_apfloat::Round::TowardPositive),
0b011 => interp_ok(rustc_apfloat::Round::TowardZero),
// When the third bit is 1, the rounding mode is determined by the
// SSE status register. Since we do not support modifying it from
// Miri (or Rust), we assume it to be at its default mode (round-to-nearest).
0b100..=0b111 => Ok(rustc_apfloat::Round::NearestTiesToEven),
0b100..=0b111 => interp_ok(rustc_apfloat::Round::NearestTiesToEven),
rounding => panic!("invalid rounding mode 0x{rounding:02x}"),
}
}
@ -717,7 +717,7 @@ fn convert_float_to_int<'tcx>(
this.write_scalar(Scalar::from_int(0, dest.layout.size), &dest)?;
}
Ok(())
interp_ok(())
}
/// Calculates absolute value of integers in `op` and stores the result in `dest`.
@ -747,7 +747,7 @@ fn int_abs<'tcx>(
this.write_immediate(*res, &dest)?;
}
Ok(())
interp_ok(())
}
/// Splits `op` (which must be a SIMD vector) into 128-bit chunks.
@ -778,7 +778,7 @@ fn split_simd_to_128bit_chunks<'tcx, P: Projectable<'tcx, Provenance>>(
.unwrap();
let chunked_op = op.transmute(chunked_layout, this)?;
Ok((num_chunks, items_per_chunk, chunked_op))
interp_ok((num_chunks, items_per_chunk, chunked_op))
}
/// Horizontally performs `which` operation on adjacent values of
@ -830,7 +830,7 @@ fn horizontal_bin_op<'tcx>(
}
}
Ok(())
interp_ok(())
}
/// Conditionally multiplies the packed floating-point elements in
@ -892,7 +892,7 @@ fn conditional_dot_product<'tcx>(
}
}
Ok(())
interp_ok(())
}
/// Calculates two booleans.
@ -923,7 +923,7 @@ fn test_bits_masked<'tcx>(
masked_set &= (op & mask) == mask;
}
Ok((all_zero, masked_set))
interp_ok((all_zero, masked_set))
}
/// Calculates two booleans.
@ -956,7 +956,7 @@ fn test_high_bits_masked<'tcx>(
negated &= (!op & mask) >> high_bit_offset == 0;
}
Ok((direct, negated))
interp_ok((direct, negated))
}
/// Conditionally loads from `ptr` according the high bit of each
@ -989,7 +989,7 @@ fn mask_load<'tcx>(
}
}
Ok(())
interp_ok(())
}
/// Conditionally stores into `ptr` according the high bit of each
@ -1023,7 +1023,7 @@ fn mask_store<'tcx>(
}
}
Ok(())
interp_ok(())
}
/// Compute the sum of absolute differences of quadruplets of unsigned
@ -1082,7 +1082,7 @@ fn mpsadbw<'tcx>(
}
}
Ok(())
interp_ok(())
}
/// Multiplies packed 16-bit signed integer values, truncates the 32-bit
@ -1120,7 +1120,7 @@ fn pmulhrsw<'tcx>(
this.write_scalar(Scalar::from_i16(res), &dest)?;
}
Ok(())
interp_ok(())
}
/// Perform a carry-less multiplication of two 64-bit integers, selected from `left` and `right` according to `imm8`,
@ -1182,7 +1182,7 @@ fn pclmulqdq<'tcx>(
let dest_high = this.project_index(&dest, 1)?;
this.write_scalar(Scalar::from_u64(result_high), &dest_high)?;
Ok(())
interp_ok(())
}
/// Packs two N-bit integer vectors to a single N/2-bit integers.
@ -1227,7 +1227,7 @@ fn pack_generic<'tcx>(
}
}
Ok(())
interp_ok(())
}
/// Converts two 16-bit integer vectors to a single 8-bit integer
@ -1245,7 +1245,7 @@ fn packsswb<'tcx>(
pack_generic(this, left, right, dest, |op| {
let op = op.to_i16()?;
let res = i8::try_from(op).unwrap_or(if op < 0 { i8::MIN } else { i8::MAX });
Ok(Scalar::from_i8(res))
interp_ok(Scalar::from_i8(res))
})
}
@ -1264,7 +1264,7 @@ fn packuswb<'tcx>(
pack_generic(this, left, right, dest, |op| {
let op = op.to_i16()?;
let res = u8::try_from(op).unwrap_or(if op < 0 { 0 } else { u8::MAX });
Ok(Scalar::from_u8(res))
interp_ok(Scalar::from_u8(res))
})
}
@ -1283,7 +1283,7 @@ fn packssdw<'tcx>(
pack_generic(this, left, right, dest, |op| {
let op = op.to_i32()?;
let res = i16::try_from(op).unwrap_or(if op < 0 { i16::MIN } else { i16::MAX });
Ok(Scalar::from_i16(res))
interp_ok(Scalar::from_i16(res))
})
}
@ -1302,7 +1302,7 @@ fn packusdw<'tcx>(
pack_generic(this, left, right, dest, |op| {
let op = op.to_i32()?;
let res = u16::try_from(op).unwrap_or(if op < 0 { 0 } else { u16::MAX });
Ok(Scalar::from_u16(res))
interp_ok(Scalar::from_u16(res))
})
}
@ -1334,7 +1334,7 @@ fn psign<'tcx>(
this.write_immediate(*res, &dest)?;
}
Ok(())
interp_ok(())
}
/// Calcultates either `a + b + cb_in` or `a - b - cb_in` depending on the value
@ -1358,5 +1358,5 @@ fn carrying_add<'tcx>(
this.binary_op(op, &sum, &ImmTy::from_uint(cb_in, a.layout))?.to_pair(this);
let cb_out = overflow1.to_scalar().to_bool()? | overflow2.to_scalar().to_bool()?;
Ok((sum, Scalar::from_u8(cb_out.into())))
interp_ok((sum, Scalar::from_u8(cb_out.into())))
}

View File

@ -31,7 +31,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let projected = &this.project_index(reg, i.try_into().unwrap())?;
*dst = this.read_scalar(projected)?.to_u32()?
}
Ok(res)
interp_ok(res)
}
fn write<'c>(
@ -45,7 +45,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let projected = &this.project_index(dest, i.try_into().unwrap())?;
this.write_scalar(Scalar::from_u32(part), projected)?;
}
Ok(())
interp_ok(())
}
match unprefixed_name {
@ -106,9 +106,9 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let result = sha256msg2(a, b);
write(this, &dest, result)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -201,8 +201,8 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.copy_op(&this.project_index(&left, i)?, &this.project_index(&dest, i)?)?;
}
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -344,8 +344,8 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.copy_op(&this.project_index(&left, i)?, &this.project_index(&dest, i)?)?;
}
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -47,8 +47,8 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.mem_copy(src_ptr, dest.ptr(), dest.layout.size, /*nonoverlapping*/ true)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

View File

@ -172,8 +172,8 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(Scalar::from_i32(res.into()), dest)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
_ => return interp_ok(EmulateItemResult::NotSupported),
}
Ok(EmulateItemResult::NeedsReturn)
interp_ok(EmulateItemResult::NeedsReturn)
}
}

Some files were not shown because too many files have changed in this diff Show More