mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-26 00:34:06 +00:00
Auto merge of #107921 - cjgillot:codegen-overflow-check, r=tmiasko
Make codegen choose whether to emit overflow checks ConstProp and DataflowConstProp currently have a specific code path not to propagate constants when they overflow. This is meant to have the correct behaviour when inlining from a crate with overflow checks (like `core`) into a crate compiled without. This PR shifts the behaviour change to the `Assert(Overflow*)` MIR terminators: if the crate is compiled without overflow checks, just skip emitting the assertions. This is already what happens with `OverflowNeg`. This allows ConstProp and DataflowConstProp to transform `CheckedBinaryOp(Add, u8::MAX, 1)` into `const (0, true)`, and let codegen ignore the `true`. The interpreter is modified to conform to this behaviour. Fixes #35310
This commit is contained in:
commit
7aa413d592
@ -347,7 +347,12 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
|
|||||||
}
|
}
|
||||||
TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
|
TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
|
||||||
if !fx.tcx.sess.overflow_checks() {
|
if !fx.tcx.sess.overflow_checks() {
|
||||||
if let mir::AssertKind::OverflowNeg(_) = *msg {
|
let overflow_not_to_check = match msg {
|
||||||
|
AssertKind::OverflowNeg(..) => true,
|
||||||
|
AssertKind::Overflow(op, ..) => op.is_checkable(),
|
||||||
|
_ => false,
|
||||||
|
};
|
||||||
|
if overflow_not_to_check {
|
||||||
let target = fx.get_block(*target);
|
let target = fx.get_block(*target);
|
||||||
fx.bcx.ins().jump(target, &[]);
|
fx.bcx.ins().jump(target, &[]);
|
||||||
continue;
|
continue;
|
||||||
@ -567,15 +572,7 @@ fn codegen_stmt<'tcx>(
|
|||||||
let lhs = codegen_operand(fx, &lhs_rhs.0);
|
let lhs = codegen_operand(fx, &lhs_rhs.0);
|
||||||
let rhs = codegen_operand(fx, &lhs_rhs.1);
|
let rhs = codegen_operand(fx, &lhs_rhs.1);
|
||||||
|
|
||||||
let res = if !fx.tcx.sess.overflow_checks() {
|
let res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
|
||||||
let val =
|
|
||||||
crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
|
|
||||||
let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
|
|
||||||
CValue::by_val_pair(val, is_overflow, lval.layout())
|
|
||||||
} else {
|
|
||||||
crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
|
|
||||||
};
|
|
||||||
|
|
||||||
lval.write_cvalue(fx, res);
|
lval.write_cvalue(fx, res);
|
||||||
}
|
}
|
||||||
Rvalue::UnaryOp(un_op, ref operand) => {
|
Rvalue::UnaryOp(un_op, ref operand) => {
|
||||||
|
@ -493,20 +493,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
|
|||||||
let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
|
let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
|
||||||
ret.write_cvalue(fx, res);
|
ret.write_cvalue(fx, res);
|
||||||
}
|
}
|
||||||
sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
|
|
||||||
intrinsic_args!(fx, args => (x, y); intrinsic);
|
|
||||||
|
|
||||||
assert_eq!(x.layout().ty, y.layout().ty);
|
|
||||||
let bin_op = match intrinsic {
|
|
||||||
sym::add_with_overflow => BinOp::Add,
|
|
||||||
sym::sub_with_overflow => BinOp::Sub,
|
|
||||||
sym::mul_with_overflow => BinOp::Mul,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
|
|
||||||
ret.write_cvalue(fx, res);
|
|
||||||
}
|
|
||||||
sym::saturating_add | sym::saturating_sub => {
|
sym::saturating_add | sym::saturating_sub => {
|
||||||
intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
|
intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
|
||||||
|
|
||||||
|
@ -563,11 +563,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
// with #[rustc_inherit_overflow_checks] and inlined from
|
// with #[rustc_inherit_overflow_checks] and inlined from
|
||||||
// another crate (mostly core::num generic/#[inline] fns),
|
// another crate (mostly core::num generic/#[inline] fns),
|
||||||
// while the current crate doesn't use overflow checks.
|
// while the current crate doesn't use overflow checks.
|
||||||
// NOTE: Unlike binops, negation doesn't have its own
|
if !bx.cx().check_overflow() {
|
||||||
// checked operation, just a comparison with the minimum
|
let overflow_not_to_check = match msg {
|
||||||
// value, so we have to check for the assert message.
|
AssertKind::OverflowNeg(..) => true,
|
||||||
if !bx.check_overflow() {
|
AssertKind::Overflow(op, ..) => op.is_checkable(),
|
||||||
if let AssertKind::OverflowNeg(_) = *msg {
|
_ => false,
|
||||||
|
};
|
||||||
|
if overflow_not_to_check {
|
||||||
const_cond = Some(expected);
|
const_cond = Some(expected);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -218,9 +218,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
args[1].val.unaligned_volatile_store(bx, dst);
|
args[1].val.unaligned_volatile_store(bx, dst);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
sym::add_with_overflow
|
|
||||||
| sym::sub_with_overflow
|
|
||||||
| sym::mul_with_overflow
|
|
||||||
| sym::unchecked_div
|
| sym::unchecked_div
|
||||||
| sym::unchecked_rem
|
| sym::unchecked_rem
|
||||||
| sym::unchecked_shl
|
| sym::unchecked_shl
|
||||||
@ -232,28 +229,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
let ty = arg_tys[0];
|
let ty = arg_tys[0];
|
||||||
match int_type_width_signed(ty, bx.tcx()) {
|
match int_type_width_signed(ty, bx.tcx()) {
|
||||||
Some((_width, signed)) => match name {
|
Some((_width, signed)) => match name {
|
||||||
sym::add_with_overflow
|
|
||||||
| sym::sub_with_overflow
|
|
||||||
| sym::mul_with_overflow => {
|
|
||||||
let op = match name {
|
|
||||||
sym::add_with_overflow => OverflowOp::Add,
|
|
||||||
sym::sub_with_overflow => OverflowOp::Sub,
|
|
||||||
sym::mul_with_overflow => OverflowOp::Mul,
|
|
||||||
_ => bug!(),
|
|
||||||
};
|
|
||||||
let (val, overflow) =
|
|
||||||
bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
|
|
||||||
// Convert `i1` to a `bool`, and write it to the out parameter
|
|
||||||
let val = bx.from_immediate(val);
|
|
||||||
let overflow = bx.from_immediate(overflow);
|
|
||||||
|
|
||||||
let dest = result.project_field(bx, 0);
|
|
||||||
bx.store(val, dest.llval, dest.align);
|
|
||||||
let dest = result.project_field(bx, 1);
|
|
||||||
bx.store(overflow, dest.llval, dest.align);
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
sym::exact_div => {
|
sym::exact_div => {
|
||||||
if signed {
|
if signed {
|
||||||
bx.exactsdiv(args[0].immediate(), args[1].immediate())
|
bx.exactsdiv(args[0].immediate(), args[1].immediate())
|
||||||
|
@ -652,15 +652,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
rhs: Bx::Value,
|
rhs: Bx::Value,
|
||||||
input_ty: Ty<'tcx>,
|
input_ty: Ty<'tcx>,
|
||||||
) -> OperandValue<Bx::Value> {
|
) -> OperandValue<Bx::Value> {
|
||||||
// This case can currently arise only from functions marked
|
|
||||||
// with #[rustc_inherit_overflow_checks] and inlined from
|
|
||||||
// another crate (mostly core::num generic/#[inline] fns),
|
|
||||||
// while the current crate doesn't use overflow checks.
|
|
||||||
if !bx.cx().check_overflow() {
|
|
||||||
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
|
|
||||||
return OperandValue::Pair(val, bx.cx().const_bool(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
let (val, of) = match op {
|
let (val, of) = match op {
|
||||||
// These are checked using intrinsics
|
// These are checked using intrinsics
|
||||||
mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
|
mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
|
||||||
|
@ -210,19 +210,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||||||
let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
|
let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
|
||||||
self.write_scalar(out_val, dest)?;
|
self.write_scalar(out_val, dest)?;
|
||||||
}
|
}
|
||||||
sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
|
|
||||||
let lhs = self.read_immediate(&args[0])?;
|
|
||||||
let rhs = self.read_immediate(&args[1])?;
|
|
||||||
let bin_op = match intrinsic_name {
|
|
||||||
sym::add_with_overflow => BinOp::Add,
|
|
||||||
sym::sub_with_overflow => BinOp::Sub,
|
|
||||||
sym::mul_with_overflow => BinOp::Mul,
|
|
||||||
_ => bug!(),
|
|
||||||
};
|
|
||||||
self.binop_with_overflow(
|
|
||||||
bin_op, /*force_overflow_checks*/ true, &lhs, &rhs, dest,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
sym::saturating_add | sym::saturating_sub => {
|
sym::saturating_add | sym::saturating_sub => {
|
||||||
let l = self.read_immediate(&args[0])?;
|
let l = self.read_immediate(&args[0])?;
|
||||||
let r = self.read_immediate(&args[1])?;
|
let r = self.read_immediate(&args[1])?;
|
||||||
|
@ -147,8 +147,9 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Whether CheckedBinOp MIR statements should actually check for overflow.
|
/// Whether Assert(OverflowNeg) and Assert(Overflow) MIR terminators should actually
|
||||||
fn checked_binop_checks_overflow(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
|
/// check for overflow.
|
||||||
|
fn ignore_checkable_overflow_assertions(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
|
||||||
|
|
||||||
/// Entry point for obtaining the MIR of anything that should get evaluated.
|
/// Entry point for obtaining the MIR of anything that should get evaluated.
|
||||||
/// So not just functions and shims, but also const/static initializers, anonymous
|
/// So not just functions and shims, but also const/static initializers, anonymous
|
||||||
@ -466,8 +467,8 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn checked_binop_checks_overflow(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
|
fn ignore_checkable_overflow_assertions(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
|
||||||
true
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
|
@ -10,13 +10,9 @@ use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
|
|||||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||||
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
|
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
|
||||||
/// and a boolean signifying the potential overflow to the destination.
|
/// and a boolean signifying the potential overflow to the destination.
|
||||||
///
|
|
||||||
/// `force_overflow_checks` indicates whether overflow checks should be done even when
|
|
||||||
/// `tcx.sess.overflow_checks()` is `false`.
|
|
||||||
pub fn binop_with_overflow(
|
pub fn binop_with_overflow(
|
||||||
&mut self,
|
&mut self,
|
||||||
op: mir::BinOp,
|
op: mir::BinOp,
|
||||||
force_overflow_checks: bool,
|
|
||||||
left: &ImmTy<'tcx, M::Provenance>,
|
left: &ImmTy<'tcx, M::Provenance>,
|
||||||
right: &ImmTy<'tcx, M::Provenance>,
|
right: &ImmTy<'tcx, M::Provenance>,
|
||||||
dest: &PlaceTy<'tcx, M::Provenance>,
|
dest: &PlaceTy<'tcx, M::Provenance>,
|
||||||
@ -28,10 +24,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||||||
"type mismatch for result of {:?}",
|
"type mismatch for result of {:?}",
|
||||||
op,
|
op,
|
||||||
);
|
);
|
||||||
// As per https://github.com/rust-lang/rust/pull/98738, we always return `false` in the 2nd
|
|
||||||
// component when overflow checking is disabled.
|
|
||||||
let overflowed =
|
|
||||||
overflowed && (force_overflow_checks || M::checked_binop_checks_overflow(self));
|
|
||||||
// Write the result to `dest`.
|
// Write the result to `dest`.
|
||||||
if let Abi::ScalarPair(..) = dest.layout.abi {
|
if let Abi::ScalarPair(..) = dest.layout.abi {
|
||||||
// We can use the optimized path and avoid `place_field` (which might do
|
// We can use the optimized path and avoid `place_field` (which might do
|
||||||
|
@ -185,9 +185,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||||||
let left = self.read_immediate(&self.eval_operand(left, None)?)?;
|
let left = self.read_immediate(&self.eval_operand(left, None)?)?;
|
||||||
let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
|
let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
|
||||||
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
|
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
|
||||||
self.binop_with_overflow(
|
self.binop_with_overflow(bin_op, &left, &right, &dest)?;
|
||||||
bin_op, /*force_overflow_checks*/ false, &left, &right, &dest,
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UnaryOp(un_op, ref operand) => {
|
UnaryOp(un_op, ref operand) => {
|
||||||
|
@ -137,8 +137,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Assert { ref cond, expected, ref msg, target, cleanup } => {
|
Assert { ref cond, expected, ref msg, target, cleanup } => {
|
||||||
|
let ignored = M::ignore_checkable_overflow_assertions(self)
|
||||||
|
&& match msg {
|
||||||
|
mir::AssertKind::OverflowNeg(..) => true,
|
||||||
|
mir::AssertKind::Overflow(op, ..) => op.is_checkable(),
|
||||||
|
_ => false,
|
||||||
|
};
|
||||||
let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
|
let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
|
||||||
if expected == cond_val {
|
if ignored || expected == cond_val {
|
||||||
self.go_to_block(target);
|
self.go_to_block(target);
|
||||||
} else {
|
} else {
|
||||||
M::assert_panic(self, msg, cleanup)?;
|
M::assert_panic(self, msg, cleanup)?;
|
||||||
|
@ -671,6 +671,12 @@ pub enum TerminatorKind<'tcx> {
|
|||||||
/// as parameters, and `None` for the destination. Keep in mind that the `cleanup` path is not
|
/// as parameters, and `None` for the destination. Keep in mind that the `cleanup` path is not
|
||||||
/// necessarily executed even in the case of a panic, for example in `-C panic=abort`. If the
|
/// necessarily executed even in the case of a panic, for example in `-C panic=abort`. If the
|
||||||
/// assertion does not fail, execution continues at the specified basic block.
|
/// assertion does not fail, execution continues at the specified basic block.
|
||||||
|
///
|
||||||
|
/// When overflow checking is disabled and this is run-time MIR (as opposed to compile-time MIR
|
||||||
|
/// that is used for CTFE), the following variants of this terminator behave as `goto target`:
|
||||||
|
/// - `OverflowNeg(..)`,
|
||||||
|
/// - `Overflow(op, ..)` if op is a "checkable" operation (add, sub, mul, shl, shr, but NOT
|
||||||
|
/// div or rem).
|
||||||
Assert {
|
Assert {
|
||||||
cond: Operand<'tcx>,
|
cond: Operand<'tcx>,
|
||||||
expected: bool,
|
expected: bool,
|
||||||
@ -1103,10 +1109,6 @@ pub enum Rvalue<'tcx> {
|
|||||||
|
|
||||||
/// Same as `BinaryOp`, but yields `(T, bool)` with a `bool` indicating an error condition.
|
/// Same as `BinaryOp`, but yields `(T, bool)` with a `bool` indicating an error condition.
|
||||||
///
|
///
|
||||||
/// When overflow checking is disabled and we are generating run-time code, the error condition
|
|
||||||
/// is false. Otherwise, and always during CTFE, the error condition is determined as described
|
|
||||||
/// below.
|
|
||||||
///
|
|
||||||
/// For addition, subtraction, and multiplication on integers the error condition is set when
|
/// For addition, subtraction, and multiplication on integers the error condition is set when
|
||||||
/// the infinite precision result would be unequal to the actual result.
|
/// the infinite precision result would be unequal to the actual result.
|
||||||
///
|
///
|
||||||
|
@ -15,7 +15,7 @@ use rustc_middle::mir::visit::{
|
|||||||
};
|
};
|
||||||
use rustc_middle::mir::{
|
use rustc_middle::mir::{
|
||||||
BasicBlock, BinOp, Body, Constant, ConstantKind, Local, LocalDecl, LocalKind, Location,
|
BasicBlock, BinOp, Body, Constant, ConstantKind, Local, LocalDecl, LocalKind, Location,
|
||||||
Operand, Place, Rvalue, SourceInfo, Statement, StatementKind, Terminator, TerminatorKind, UnOp,
|
Operand, Place, Rvalue, SourceInfo, Statement, StatementKind, Terminator, TerminatorKind,
|
||||||
RETURN_PLACE,
|
RETURN_PLACE,
|
||||||
};
|
};
|
||||||
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
|
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
|
||||||
@ -503,55 +503,6 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_unary_op(&mut self, op: UnOp, arg: &Operand<'tcx>) -> Option<()> {
|
|
||||||
if self.use_ecx(|this| {
|
|
||||||
let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?;
|
|
||||||
let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, &val)?;
|
|
||||||
Ok(overflow)
|
|
||||||
})? {
|
|
||||||
// `AssertKind` only has an `OverflowNeg` variant, so make sure that is
|
|
||||||
// appropriate to use.
|
|
||||||
assert_eq!(op, UnOp::Neg, "Neg is the only UnOp that can overflow");
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_binary_op(
|
|
||||||
&mut self,
|
|
||||||
op: BinOp,
|
|
||||||
left: &Operand<'tcx>,
|
|
||||||
right: &Operand<'tcx>,
|
|
||||||
) -> Option<()> {
|
|
||||||
let r = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(right, None)?));
|
|
||||||
let l = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(left, None)?));
|
|
||||||
// Check for exceeding shifts *even if* we cannot evaluate the LHS.
|
|
||||||
if matches!(op, BinOp::Shr | BinOp::Shl) {
|
|
||||||
let r = r.clone()?;
|
|
||||||
// We need the type of the LHS. We cannot use `place_layout` as that is the type
|
|
||||||
// of the result, which for checked binops is not the same!
|
|
||||||
let left_ty = left.ty(self.local_decls, self.tcx);
|
|
||||||
let left_size = self.ecx.layout_of(left_ty).ok()?.size;
|
|
||||||
let right_size = r.layout.size;
|
|
||||||
let r_bits = r.to_scalar().to_bits(right_size).ok();
|
|
||||||
if r_bits.map_or(false, |b| b >= left_size.bits() as u128) {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let (Some(l), Some(r)) = (&l, &r) {
|
|
||||||
// The remaining operators are handled through `overflowing_binary_op`.
|
|
||||||
if self.use_ecx(|this| {
|
|
||||||
let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, l, r)?;
|
|
||||||
Ok(overflow)
|
|
||||||
})? {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn propagate_operand(&mut self, operand: &mut Operand<'tcx>) {
|
fn propagate_operand(&mut self, operand: &mut Operand<'tcx>) {
|
||||||
match *operand {
|
match *operand {
|
||||||
Operand::Copy(l) | Operand::Move(l) => {
|
Operand::Copy(l) | Operand::Move(l) => {
|
||||||
@ -587,28 +538,6 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||||||
// 2. Working around bugs in other parts of the compiler
|
// 2. Working around bugs in other parts of the compiler
|
||||||
// - In this case, we'll return `None` from this function to stop evaluation.
|
// - In this case, we'll return `None` from this function to stop evaluation.
|
||||||
match rvalue {
|
match rvalue {
|
||||||
// Additional checking: give lints to the user if an overflow would occur.
|
|
||||||
// We do this here and not in the `Assert` terminator as that terminator is
|
|
||||||
// only sometimes emitted (overflow checks can be disabled), but we want to always
|
|
||||||
// lint.
|
|
||||||
Rvalue::UnaryOp(op, arg) => {
|
|
||||||
trace!("checking UnaryOp(op = {:?}, arg = {:?})", op, arg);
|
|
||||||
self.check_unary_op(*op, arg)?;
|
|
||||||
}
|
|
||||||
Rvalue::BinaryOp(op, box (left, right)) => {
|
|
||||||
trace!("checking BinaryOp(op = {:?}, left = {:?}, right = {:?})", op, left, right);
|
|
||||||
self.check_binary_op(*op, left, right)?;
|
|
||||||
}
|
|
||||||
Rvalue::CheckedBinaryOp(op, box (left, right)) => {
|
|
||||||
trace!(
|
|
||||||
"checking CheckedBinaryOp(op = {:?}, left = {:?}, right = {:?})",
|
|
||||||
op,
|
|
||||||
left,
|
|
||||||
right
|
|
||||||
);
|
|
||||||
self.check_binary_op(*op, left, right)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do not try creating references (#67862)
|
// Do not try creating references (#67862)
|
||||||
Rvalue::AddressOf(_, place) | Rvalue::Ref(_, _, place) => {
|
Rvalue::AddressOf(_, place) | Rvalue::Ref(_, _, place) => {
|
||||||
trace!("skipping AddressOf | Ref for {:?}", place);
|
trace!("skipping AddressOf | Ref for {:?}", place);
|
||||||
@ -638,7 +567,10 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||||||
| Rvalue::Cast(..)
|
| Rvalue::Cast(..)
|
||||||
| Rvalue::ShallowInitBox(..)
|
| Rvalue::ShallowInitBox(..)
|
||||||
| Rvalue::Discriminant(..)
|
| Rvalue::Discriminant(..)
|
||||||
| Rvalue::NullaryOp(..) => {}
|
| Rvalue::NullaryOp(..)
|
||||||
|
| Rvalue::UnaryOp(..)
|
||||||
|
| Rvalue::BinaryOp(..)
|
||||||
|
| Rvalue::CheckedBinaryOp(..) => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME we need to revisit this for #67176
|
// FIXME we need to revisit this for #67176
|
||||||
@ -1079,31 +1011,18 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
|
|||||||
// Do NOT early return in this function, it does some crucial fixup of the state at the end!
|
// Do NOT early return in this function, it does some crucial fixup of the state at the end!
|
||||||
match &mut terminator.kind {
|
match &mut terminator.kind {
|
||||||
TerminatorKind::Assert { expected, ref mut cond, .. } => {
|
TerminatorKind::Assert { expected, ref mut cond, .. } => {
|
||||||
if let Some(ref value) = self.eval_operand(&cond) {
|
if let Some(ref value) = self.eval_operand(&cond)
|
||||||
trace!("assertion on {:?} should be {:?}", value, expected);
|
|
||||||
let expected = Scalar::from_bool(*expected);
|
|
||||||
// FIXME should be used use_ecx rather than a local match... but we have
|
// FIXME should be used use_ecx rather than a local match... but we have
|
||||||
// quite a few of these read_scalar/read_immediate that need fixing.
|
// quite a few of these read_scalar/read_immediate that need fixing.
|
||||||
if let Ok(value_const) = self.ecx.read_scalar(&value) {
|
&& let Ok(value_const) = self.ecx.read_scalar(&value)
|
||||||
if expected != value_const {
|
&& self.should_const_prop(value)
|
||||||
// Poison all places this operand references so that further code
|
{
|
||||||
// doesn't use the invalid value
|
trace!("assertion on {:?} should be {:?}", value, expected);
|
||||||
match cond {
|
*cond = self.operand_from_scalar(
|
||||||
Operand::Move(ref place) | Operand::Copy(ref place) => {
|
value_const,
|
||||||
Self::remove_const(&mut self.ecx, place.local);
|
self.tcx.types.bool,
|
||||||
}
|
source_info.span,
|
||||||
Operand::Constant(_) => {}
|
);
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if self.should_const_prop(value) {
|
|
||||||
*cond = self.operand_from_scalar(
|
|
||||||
value_const,
|
|
||||||
self.tcx.types.bool,
|
|
||||||
source_info.span,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TerminatorKind::SwitchInt { ref mut discr, .. } => {
|
TerminatorKind::SwitchInt { ref mut discr, .. } => {
|
||||||
|
@ -180,12 +180,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
|
|||||||
let overflow = match overflow {
|
let overflow = match overflow {
|
||||||
FlatSet::Top => FlatSet::Top,
|
FlatSet::Top => FlatSet::Top,
|
||||||
FlatSet::Elem(overflow) => {
|
FlatSet::Elem(overflow) => {
|
||||||
if overflow {
|
self.wrap_scalar(Scalar::from_bool(overflow), self.tcx.types.bool)
|
||||||
// Overflow cannot be reliably propagated. See: https://github.com/rust-lang/rust/pull/101168#issuecomment-1288091446
|
|
||||||
FlatSet::Top
|
|
||||||
} else {
|
|
||||||
self.wrap_scalar(Scalar::from_bool(false), self.tcx.types.bool)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
FlatSet::Bottom => FlatSet::Bottom,
|
FlatSet::Bottom => FlatSet::Bottom,
|
||||||
};
|
};
|
||||||
|
@ -107,9 +107,29 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
|
sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
|
||||||
// The checked binary operations are not suitable target for lowering here,
|
if let Some(target) = *target {
|
||||||
// since their semantics depend on the value of overflow-checks flag used
|
let lhs;
|
||||||
// during codegen. Issue #35310.
|
let rhs;
|
||||||
|
{
|
||||||
|
let mut args = args.drain(..);
|
||||||
|
lhs = args.next().unwrap();
|
||||||
|
rhs = args.next().unwrap();
|
||||||
|
}
|
||||||
|
let bin_op = match intrinsic_name {
|
||||||
|
sym::add_with_overflow => BinOp::Add,
|
||||||
|
sym::sub_with_overflow => BinOp::Sub,
|
||||||
|
sym::mul_with_overflow => BinOp::Mul,
|
||||||
|
_ => bug!("unexpected intrinsic"),
|
||||||
|
};
|
||||||
|
block.statements.push(Statement {
|
||||||
|
source_info: terminator.source_info,
|
||||||
|
kind: StatementKind::Assign(Box::new((
|
||||||
|
*destination,
|
||||||
|
Rvalue::CheckedBinaryOp(bin_op, Box::new((lhs, rhs))),
|
||||||
|
))),
|
||||||
|
});
|
||||||
|
terminator.kind = TerminatorKind::Goto { target };
|
||||||
|
}
|
||||||
}
|
}
|
||||||
sym::size_of | sym::min_align_of => {
|
sym::size_of | sym::min_align_of => {
|
||||||
if let Some(target) = *target {
|
if let Some(target) = *target {
|
||||||
|
@ -815,8 +815,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn checked_binop_checks_overflow(ecx: &MiriInterpCx<'mir, 'tcx>) -> bool {
|
fn ignore_checkable_overflow_assertions(ecx: &MiriInterpCx<'mir, 'tcx>) -> bool {
|
||||||
ecx.tcx.sess.overflow_checks()
|
!ecx.tcx.sess.overflow_checks()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
|
14
tests/codegen/inherit_overflow.rs
Normal file
14
tests/codegen/inherit_overflow.rs
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
// compile-flags: -Zmir-enable-passes=+Inline,+ConstProp --crate-type lib
|
||||||
|
// revisions: ASSERT NOASSERT
|
||||||
|
//[ASSERT] compile-flags: -Coverflow-checks=on
|
||||||
|
//[NOASSERT] compile-flags: -Coverflow-checks=off
|
||||||
|
|
||||||
|
// CHECK-LABEL: define{{.*}} @assertion
|
||||||
|
// ASSERT: call void @_ZN4core9panicking5panic17h
|
||||||
|
// NOASSERT: ret i8 0
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn assertion() -> u8 {
|
||||||
|
// Optimized MIR will replace this `CheckedBinaryOp` by `const (0, true)`.
|
||||||
|
// Verify that codegen does or does not emit the panic.
|
||||||
|
<u8 as std::ops::Add>::add(255, 1)
|
||||||
|
}
|
@ -24,9 +24,10 @@
|
|||||||
StorageLive(_3); // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:18: +2:19
|
StorageLive(_3); // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:18: +2:19
|
||||||
- _3 = _1; // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:18: +2:19
|
- _3 = _1; // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:18: +2:19
|
||||||
- _4 = Eq(_3, const 0_i32); // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:14: +2:19
|
- _4 = Eq(_3, const 0_i32); // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:14: +2:19
|
||||||
|
- assert(!move _4, "attempt to divide `{}` by zero", const 1_i32) -> bb1; // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:14: +2:19
|
||||||
+ _3 = const 0_i32; // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:18: +2:19
|
+ _3 = const 0_i32; // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:18: +2:19
|
||||||
+ _4 = const true; // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:14: +2:19
|
+ _4 = const true; // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:14: +2:19
|
||||||
assert(!move _4, "attempt to divide `{}` by zero", const 1_i32) -> bb1; // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:14: +2:19
|
+ assert(!const true, "attempt to divide `{}` by zero", const 1_i32) -> bb1; // scope 1 at $DIR/bad_op_div_by_zero.rs:+2:14: +2:19
|
||||||
}
|
}
|
||||||
|
|
||||||
bb1: {
|
bb1: {
|
||||||
|
@ -0,0 +1,39 @@
|
|||||||
|
- // MIR for `main` before ConstProp
|
||||||
|
+ // MIR for `main` after ConstProp
|
||||||
|
|
||||||
|
fn main() -> () {
|
||||||
|
let mut _0: (); // return place in scope 0 at $DIR/inherit_overflow.rs:+0:11: +0:11
|
||||||
|
let mut _1: u8; // in scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
let mut _2: u8; // in scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
let mut _3: u8; // in scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
scope 1 {
|
||||||
|
}
|
||||||
|
scope 2 (inlined <u8 as Add>::add) { // at $DIR/inherit_overflow.rs:8:13: 8:47
|
||||||
|
debug self => _2; // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
debug other => _3; // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
let mut _4: (u8, bool); // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
}
|
||||||
|
|
||||||
|
bb0: {
|
||||||
|
StorageLive(_1); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
StorageLive(_2); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
_2 = const u8::MAX; // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
StorageLive(_3); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
_3 = const 1_u8; // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
- _4 = CheckedAdd(_2, _3); // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
- assert(!move (_4.1: bool), "attempt to compute `{} + {}`, which would overflow", _2, _3) -> bb1; // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
+ _4 = const (0_u8, true); // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
+ assert(!const true, "attempt to compute `{} + {}`, which would overflow", _2, _3) -> bb1; // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
}
|
||||||
|
|
||||||
|
bb1: {
|
||||||
|
- _1 = move (_4.0: u8); // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
+ _1 = const 0_u8; // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
StorageDead(_3); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
StorageDead(_2); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
StorageDead(_1); // scope 0 at $DIR/inherit_overflow.rs:+3:47: +3:48
|
||||||
|
_0 = const (); // scope 0 at $DIR/inherit_overflow.rs:+0:11: +4:2
|
||||||
|
return; // scope 0 at $DIR/inherit_overflow.rs:+4:2: +4:2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
9
tests/mir-opt/const_prop/inherit_overflow.rs
Normal file
9
tests/mir-opt/const_prop/inherit_overflow.rs
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
// unit-test: ConstProp
|
||||||
|
// compile-flags: -Zmir-enable-passes=+Inline
|
||||||
|
|
||||||
|
// EMIT_MIR inherit_overflow.main.ConstProp.diff
|
||||||
|
fn main() {
|
||||||
|
// After inlining, this will contain a `CheckedBinaryOp`.
|
||||||
|
// Propagating the overflow is ok as codegen will just skip emitting the panic.
|
||||||
|
let _ = <u8 as std::ops::Add>::add(255, 1);
|
||||||
|
}
|
@ -61,7 +61,7 @@
|
|||||||
- assert(!move (_10.1: bool), "attempt to compute `{} + {}`, which would overflow", move _9, const 1_i32) -> bb2; // scope 4 at $DIR/checked.rs:+6:13: +6:18
|
- assert(!move (_10.1: bool), "attempt to compute `{} + {}`, which would overflow", move _9, const 1_i32) -> bb2; // scope 4 at $DIR/checked.rs:+6:13: +6:18
|
||||||
+ _9 = const i32::MAX; // scope 4 at $DIR/checked.rs:+6:13: +6:14
|
+ _9 = const i32::MAX; // scope 4 at $DIR/checked.rs:+6:13: +6:14
|
||||||
+ _10 = CheckedAdd(const i32::MAX, const 1_i32); // scope 4 at $DIR/checked.rs:+6:13: +6:18
|
+ _10 = CheckedAdd(const i32::MAX, const 1_i32); // scope 4 at $DIR/checked.rs:+6:13: +6:18
|
||||||
+ assert(!move (_10.1: bool), "attempt to compute `{} + {}`, which would overflow", const i32::MAX, const 1_i32) -> bb2; // scope 4 at $DIR/checked.rs:+6:13: +6:18
|
+ assert(!const true, "attempt to compute `{} + {}`, which would overflow", const i32::MAX, const 1_i32) -> bb2; // scope 4 at $DIR/checked.rs:+6:13: +6:18
|
||||||
}
|
}
|
||||||
|
|
||||||
bb2: {
|
bb2: {
|
||||||
|
@ -5,26 +5,34 @@
|
|||||||
let mut _0: (); // return place in scope 0 at $DIR/inherit_overflow.rs:+0:11: +0:11
|
let mut _0: (); // return place in scope 0 at $DIR/inherit_overflow.rs:+0:11: +0:11
|
||||||
let mut _1: u8; // in scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
let mut _1: u8; // in scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
let mut _2: u8; // in scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
let mut _2: u8; // in scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
let mut _3: u8; // in scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
scope 1 {
|
scope 1 {
|
||||||
}
|
}
|
||||||
scope 2 (inlined <u8 as Add>::add) { // at $DIR/inherit_overflow.rs:7:13: 7:47
|
scope 2 (inlined <u8 as Add>::add) { // at $DIR/inherit_overflow.rs:8:13: 8:47
|
||||||
debug self => _1; // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
debug self => _2; // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
debug other => _2; // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
debug other => _3; // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
let mut _3: (u8, bool); // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
let mut _4: (u8, bool); // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
}
|
}
|
||||||
|
|
||||||
bb0: {
|
bb0: {
|
||||||
StorageLive(_1); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
StorageLive(_1); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
_1 = const u8::MAX; // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
|
||||||
StorageLive(_2); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
StorageLive(_2); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
_2 = const 1_u8; // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
_2 = const u8::MAX; // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
_3 = CheckedAdd(const u8::MAX, const 1_u8); // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
StorageLive(_3); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
assert(!move (_3.1: bool), "attempt to compute `{} + {}`, which would overflow", const u8::MAX, const 1_u8) -> bb1; // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
_3 = const 1_u8; // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
|
- _4 = CheckedAdd(_2, _3); // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
- assert(!move (_4.1: bool), "attempt to compute `{} + {}`, which would overflow", _2, _3) -> bb1; // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
+ _4 = CheckedAdd(const u8::MAX, const 1_u8); // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
+ assert(!const true, "attempt to compute `{} + {}`, which would overflow", const u8::MAX, const 1_u8) -> bb1; // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
}
|
}
|
||||||
|
|
||||||
bb1: {
|
bb1: {
|
||||||
|
- _1 = move (_4.0: u8); // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
+ _1 = const 0_u8; // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
|
||||||
|
StorageDead(_3); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
StorageDead(_2); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
StorageDead(_2); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
||||||
StorageDead(_1); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
|
StorageDead(_1); // scope 0 at $DIR/inherit_overflow.rs:+3:47: +3:48
|
||||||
|
_0 = const (); // scope 0 at $DIR/inherit_overflow.rs:+0:11: +4:2
|
||||||
return; // scope 0 at $DIR/inherit_overflow.rs:+4:2: +4:2
|
return; // scope 0 at $DIR/inherit_overflow.rs:+4:2: +4:2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
// compile-flags: -Zunsound-mir-opts
|
// unit-test: DataflowConstProp
|
||||||
|
// compile-flags: -Zmir-enable-passes=+Inline
|
||||||
|
|
||||||
// EMIT_MIR inherit_overflow.main.DataflowConstProp.diff
|
// EMIT_MIR inherit_overflow.main.DataflowConstProp.diff
|
||||||
fn main() {
|
fn main() {
|
||||||
// After inlining, this will contain a `CheckedBinaryOp`. The overflow
|
// After inlining, this will contain a `CheckedBinaryOp`.
|
||||||
// must be ignored by the constant propagation to avoid triggering a panic.
|
// Propagating the overflow is ok as codegen will just skip emitting the panic.
|
||||||
let _ = <u8 as std::ops::Add>::add(255, 1);
|
let _ = <u8 as std::ops::Add>::add(255, 1);
|
||||||
}
|
}
|
||||||
|
@ -72,3 +72,10 @@ pub fn assume() {
|
|||||||
std::intrinsics::assume(true);
|
std::intrinsics::assume(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EMIT_MIR lower_intrinsics.with_overflow.LowerIntrinsics.diff
|
||||||
|
pub fn with_overflow(a: i32, b: i32) {
|
||||||
|
let _x = core::intrinsics::add_with_overflow(a, b);
|
||||||
|
let _y = core::intrinsics::sub_with_overflow(a, b);
|
||||||
|
let _z = core::intrinsics::mul_with_overflow(a, b);
|
||||||
|
}
|
||||||
|
@ -0,0 +1,83 @@
|
|||||||
|
- // MIR for `with_overflow` before LowerIntrinsics
|
||||||
|
+ // MIR for `with_overflow` after LowerIntrinsics
|
||||||
|
|
||||||
|
fn with_overflow(_1: i32, _2: i32) -> () {
|
||||||
|
debug a => _1; // in scope 0 at $DIR/lower_intrinsics.rs:+0:22: +0:23
|
||||||
|
debug b => _2; // in scope 0 at $DIR/lower_intrinsics.rs:+0:30: +0:31
|
||||||
|
let mut _0: (); // return place in scope 0 at $DIR/lower_intrinsics.rs:+0:38: +0:38
|
||||||
|
let _3: (i32, bool); // in scope 0 at $DIR/lower_intrinsics.rs:+1:9: +1:11
|
||||||
|
let mut _4: i32; // in scope 0 at $DIR/lower_intrinsics.rs:+1:50: +1:51
|
||||||
|
let mut _5: i32; // in scope 0 at $DIR/lower_intrinsics.rs:+1:53: +1:54
|
||||||
|
let mut _7: i32; // in scope 0 at $DIR/lower_intrinsics.rs:+2:50: +2:51
|
||||||
|
let mut _8: i32; // in scope 0 at $DIR/lower_intrinsics.rs:+2:53: +2:54
|
||||||
|
let mut _10: i32; // in scope 0 at $DIR/lower_intrinsics.rs:+3:50: +3:51
|
||||||
|
let mut _11: i32; // in scope 0 at $DIR/lower_intrinsics.rs:+3:53: +3:54
|
||||||
|
scope 1 {
|
||||||
|
debug _x => _3; // in scope 1 at $DIR/lower_intrinsics.rs:+1:9: +1:11
|
||||||
|
let _6: (i32, bool); // in scope 1 at $DIR/lower_intrinsics.rs:+2:9: +2:11
|
||||||
|
scope 2 {
|
||||||
|
debug _y => _6; // in scope 2 at $DIR/lower_intrinsics.rs:+2:9: +2:11
|
||||||
|
let _9: (i32, bool); // in scope 2 at $DIR/lower_intrinsics.rs:+3:9: +3:11
|
||||||
|
scope 3 {
|
||||||
|
debug _z => _9; // in scope 3 at $DIR/lower_intrinsics.rs:+3:9: +3:11
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bb0: {
|
||||||
|
StorageLive(_3); // scope 0 at $DIR/lower_intrinsics.rs:+1:9: +1:11
|
||||||
|
StorageLive(_4); // scope 0 at $DIR/lower_intrinsics.rs:+1:50: +1:51
|
||||||
|
_4 = _1; // scope 0 at $DIR/lower_intrinsics.rs:+1:50: +1:51
|
||||||
|
StorageLive(_5); // scope 0 at $DIR/lower_intrinsics.rs:+1:53: +1:54
|
||||||
|
_5 = _2; // scope 0 at $DIR/lower_intrinsics.rs:+1:53: +1:54
|
||||||
|
- _3 = add_with_overflow::<i32>(move _4, move _5) -> bb1; // scope 0 at $DIR/lower_intrinsics.rs:+1:14: +1:55
|
||||||
|
- // mir::Constant
|
||||||
|
- // + span: $DIR/lower_intrinsics.rs:78:14: 78:49
|
||||||
|
- // + literal: Const { ty: extern "rust-intrinsic" fn(i32, i32) -> (i32, bool) {add_with_overflow::<i32>}, val: Value(<ZST>) }
|
||||||
|
+ _3 = CheckedAdd(move _4, move _5); // scope 0 at $DIR/lower_intrinsics.rs:+1:14: +1:55
|
||||||
|
+ goto -> bb1; // scope 0 at $DIR/lower_intrinsics.rs:+1:14: +1:55
|
||||||
|
}
|
||||||
|
|
||||||
|
bb1: {
|
||||||
|
StorageDead(_5); // scope 0 at $DIR/lower_intrinsics.rs:+1:54: +1:55
|
||||||
|
StorageDead(_4); // scope 0 at $DIR/lower_intrinsics.rs:+1:54: +1:55
|
||||||
|
StorageLive(_6); // scope 1 at $DIR/lower_intrinsics.rs:+2:9: +2:11
|
||||||
|
StorageLive(_7); // scope 1 at $DIR/lower_intrinsics.rs:+2:50: +2:51
|
||||||
|
_7 = _1; // scope 1 at $DIR/lower_intrinsics.rs:+2:50: +2:51
|
||||||
|
StorageLive(_8); // scope 1 at $DIR/lower_intrinsics.rs:+2:53: +2:54
|
||||||
|
_8 = _2; // scope 1 at $DIR/lower_intrinsics.rs:+2:53: +2:54
|
||||||
|
- _6 = sub_with_overflow::<i32>(move _7, move _8) -> bb2; // scope 1 at $DIR/lower_intrinsics.rs:+2:14: +2:55
|
||||||
|
- // mir::Constant
|
||||||
|
- // + span: $DIR/lower_intrinsics.rs:79:14: 79:49
|
||||||
|
- // + literal: Const { ty: extern "rust-intrinsic" fn(i32, i32) -> (i32, bool) {sub_with_overflow::<i32>}, val: Value(<ZST>) }
|
||||||
|
+ _6 = CheckedSub(move _7, move _8); // scope 1 at $DIR/lower_intrinsics.rs:+2:14: +2:55
|
||||||
|
+ goto -> bb2; // scope 1 at $DIR/lower_intrinsics.rs:+2:14: +2:55
|
||||||
|
}
|
||||||
|
|
||||||
|
bb2: {
|
||||||
|
StorageDead(_8); // scope 1 at $DIR/lower_intrinsics.rs:+2:54: +2:55
|
||||||
|
StorageDead(_7); // scope 1 at $DIR/lower_intrinsics.rs:+2:54: +2:55
|
||||||
|
StorageLive(_9); // scope 2 at $DIR/lower_intrinsics.rs:+3:9: +3:11
|
||||||
|
StorageLive(_10); // scope 2 at $DIR/lower_intrinsics.rs:+3:50: +3:51
|
||||||
|
_10 = _1; // scope 2 at $DIR/lower_intrinsics.rs:+3:50: +3:51
|
||||||
|
StorageLive(_11); // scope 2 at $DIR/lower_intrinsics.rs:+3:53: +3:54
|
||||||
|
_11 = _2; // scope 2 at $DIR/lower_intrinsics.rs:+3:53: +3:54
|
||||||
|
- _9 = mul_with_overflow::<i32>(move _10, move _11) -> bb3; // scope 2 at $DIR/lower_intrinsics.rs:+3:14: +3:55
|
||||||
|
- // mir::Constant
|
||||||
|
- // + span: $DIR/lower_intrinsics.rs:80:14: 80:49
|
||||||
|
- // + literal: Const { ty: extern "rust-intrinsic" fn(i32, i32) -> (i32, bool) {mul_with_overflow::<i32>}, val: Value(<ZST>) }
|
||||||
|
+ _9 = CheckedMul(move _10, move _11); // scope 2 at $DIR/lower_intrinsics.rs:+3:14: +3:55
|
||||||
|
+ goto -> bb3; // scope 2 at $DIR/lower_intrinsics.rs:+3:14: +3:55
|
||||||
|
}
|
||||||
|
|
||||||
|
bb3: {
|
||||||
|
StorageDead(_11); // scope 2 at $DIR/lower_intrinsics.rs:+3:54: +3:55
|
||||||
|
StorageDead(_10); // scope 2 at $DIR/lower_intrinsics.rs:+3:54: +3:55
|
||||||
|
_0 = const (); // scope 0 at $DIR/lower_intrinsics.rs:+0:38: +4:2
|
||||||
|
StorageDead(_9); // scope 2 at $DIR/lower_intrinsics.rs:+4:1: +4:2
|
||||||
|
StorageDead(_6); // scope 1 at $DIR/lower_intrinsics.rs:+4:1: +4:2
|
||||||
|
StorageDead(_3); // scope 0 at $DIR/lower_intrinsics.rs:+4:1: +4:2
|
||||||
|
return; // scope 0 at $DIR/lower_intrinsics.rs:+4:2: +4:2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user