Auto merge of #107449 - saethlin:enable-copyprop, r=oli-obk

Enable CopyProp

r? `@tmiasko`

`@rustbot` label +A-mir-opt
This commit is contained in:
bors 2023-02-16 03:44:37 +00:00
commit 639377ed73
18 changed files with 342 additions and 267 deletions

View File

@ -406,6 +406,7 @@ impl<'tcx> AdtDef<'tcx> {
}
/// Return the index of `VariantDef` given a variant id.
#[inline]
pub fn variant_index_with_id(self, vid: DefId) -> VariantIdx {
self.variants()
.iter_enumerated()

View File

@ -22,7 +22,7 @@ pub struct CopyProp;
impl<'tcx> MirPass<'tcx> for CopyProp {
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
sess.mir_opt_level() >= 4
sess.mir_opt_level() >= 1
}
#[instrument(level = "trace", skip(self, tcx, body))]
@ -96,7 +96,7 @@ fn fully_moved_locals(ssa: &SsaLocals, body: &Body<'_>) -> BitSet<Local> {
fully_moved
}
/// Utility to help performing subtitution of `*pattern` by `target`.
/// Utility to help performing substitution of `*pattern` by `target`.
struct Replacer<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
fully_moved: BitSet<Local>,

View File

@ -19,6 +19,33 @@ pub struct SsaLocals {
copy_classes: IndexVec<Local, Local>,
}
/// We often encounter MIR bodies with 1 or 2 basic blocks. In those cases, it's unnecessary to
/// actually compute dominators, we can just compare block indices because bb0 is always the first
/// block, and in any body all other blocks are always always dominated by bb0.
struct SmallDominators {
inner: Option<Dominators<BasicBlock>>,
}
trait DomExt {
fn dominates(self, _other: Self, dominators: &SmallDominators) -> bool;
}
impl DomExt for Location {
fn dominates(self, other: Location, dominators: &SmallDominators) -> bool {
if self.block == other.block {
self.statement_index <= other.statement_index
} else {
dominators.dominates(self.block, other.block)
}
}
}
impl SmallDominators {
fn dominates(&self, dom: BasicBlock, node: BasicBlock) -> bool {
if let Some(inner) = &self.inner { inner.dominates(dom, node) } else { dom < node }
}
}
impl SsaLocals {
pub fn new<'tcx>(
tcx: TyCtxt<'tcx>,
@ -29,7 +56,9 @@ impl SsaLocals {
let assignment_order = Vec::new();
let assignments = IndexVec::from_elem(Set1::Empty, &body.local_decls);
let dominators = body.basic_blocks.dominators();
let dominators =
if body.basic_blocks.len() > 2 { Some(body.basic_blocks.dominators()) } else { None };
let dominators = SmallDominators { inner: dominators };
let mut visitor = SsaVisitor { assignments, assignment_order, dominators };
for (local, decl) in body.local_decls.iter_enumerated() {
@ -41,8 +70,14 @@ impl SsaLocals {
}
}
for (bb, data) in traversal::reverse_postorder(body) {
visitor.visit_basic_block_data(bb, data);
if body.basic_blocks.len() > 2 {
for (bb, data) in traversal::reverse_postorder(body) {
visitor.visit_basic_block_data(bb, data);
}
} else {
for (bb, data) in body.basic_blocks.iter_enumerated() {
visitor.visit_basic_block_data(bb, data);
}
}
for var_debug_info in &body.var_debug_info {
@ -139,7 +174,7 @@ enum LocationExtended {
}
struct SsaVisitor {
dominators: Dominators<BasicBlock>,
dominators: SmallDominators,
assignments: IndexVec<Local, Set1<LocationExtended>>,
assignment_order: Vec<Local>,
}

View File

@ -1,4 +1,4 @@
// compile-flags: -C no-prepopulate-passes -Zmir-enable-passes=+DestinationPropagation
// compile-flags: -C no-prepopulate-passes -Zmir-enable-passes=+DestinationPropagation,-CopyProp
#![crate_type = "lib"]

View File

@ -116,150 +116,150 @@ extern "platform-intrinsic" {
fn simd_saturating_sub<T>(x: T, y: T) -> T;
}
// NOTE(eddyb) `%{{x|_3}}` is used because on some targets (e.g. WASM)
// NOTE(eddyb) `%{{x|1}}` is used because on some targets (e.g. WASM)
// SIMD vectors are passed directly, resulting in `%x` being a vector,
// while on others they're passed indirectly, resulting in `%x` being
// a pointer to a vector, and `%_3` a vector loaded from that pointer.
// a pointer to a vector, and `%1` a vector loaded from that pointer.
// This is controlled by the target spec option `simd_types_indirect`.
// The same applies to `%{{y|_4}}` as well.
// The same applies to `%{{y|2}}` as well.
// CHECK-LABEL: @sadd_i8x2
#[no_mangle]
pub unsafe fn sadd_i8x2(x: i8x2, y: i8x2) -> i8x2 {
// CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %{{x|_3}}, <2 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i8x4
#[no_mangle]
pub unsafe fn sadd_i8x4(x: i8x4, y: i8x4) -> i8x4 {
// CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %{{x|_3}}, <4 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i8x8
#[no_mangle]
pub unsafe fn sadd_i8x8(x: i8x8, y: i8x8) -> i8x8 {
// CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %{{x|_3}}, <8 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i8x16
#[no_mangle]
pub unsafe fn sadd_i8x16(x: i8x16, y: i8x16) -> i8x16 {
// CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %{{x|_3}}, <16 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i8x32
#[no_mangle]
pub unsafe fn sadd_i8x32(x: i8x32, y: i8x32) -> i8x32 {
// CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %{{x|_3}}, <32 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i8x64
#[no_mangle]
pub unsafe fn sadd_i8x64(x: i8x64, y: i8x64) -> i8x64 {
// CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> %{{x|_3}}, <64 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i16x2
#[no_mangle]
pub unsafe fn sadd_i16x2(x: i16x2, y: i16x2) -> i16x2 {
// CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %{{x|_3}}, <2 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i16x4
#[no_mangle]
pub unsafe fn sadd_i16x4(x: i16x4, y: i16x4) -> i16x4 {
// CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %{{x|_3}}, <4 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i16x8
#[no_mangle]
pub unsafe fn sadd_i16x8(x: i16x8, y: i16x8) -> i16x8 {
// CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %{{x|_3}}, <8 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i16x16
#[no_mangle]
pub unsafe fn sadd_i16x16(x: i16x16, y: i16x16) -> i16x16 {
// CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %{{x|_3}}, <16 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i16x32
#[no_mangle]
pub unsafe fn sadd_i16x32(x: i16x32, y: i16x32) -> i16x32 {
// CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %{{x|_3}}, <32 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i32x2
#[no_mangle]
pub unsafe fn sadd_i32x2(x: i32x2, y: i32x2) -> i32x2 {
// CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %{{x|_3}}, <2 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i32x4
#[no_mangle]
pub unsafe fn sadd_i32x4(x: i32x4, y: i32x4) -> i32x4 {
// CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %{{x|_3}}, <4 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i32x8
#[no_mangle]
pub unsafe fn sadd_i32x8(x: i32x8, y: i32x8) -> i32x8 {
// CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %{{x|_3}}, <8 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i32x16
#[no_mangle]
pub unsafe fn sadd_i32x16(x: i32x16, y: i32x16) -> i32x16 {
// CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %{{x|_3}}, <16 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i64x2
#[no_mangle]
pub unsafe fn sadd_i64x2(x: i64x2, y: i64x2) -> i64x2 {
// CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %{{x|_3}}, <2 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i64x4
#[no_mangle]
pub unsafe fn sadd_i64x4(x: i64x4, y: i64x4) -> i64x4 {
// CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %{{x|_3}}, <4 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i64x8
#[no_mangle]
pub unsafe fn sadd_i64x8(x: i64x8, y: i64x8) -> i64x8 {
// CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %{{x|_3}}, <8 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i128x2
#[no_mangle]
pub unsafe fn sadd_i128x2(x: i128x2, y: i128x2) -> i128x2 {
// CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %{{x|_3}}, <2 x i128> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @sadd_i128x4
#[no_mangle]
pub unsafe fn sadd_i128x4(x: i128x4, y: i128x4) -> i128x4 {
// CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.sadd.sat.v4i128(<4 x i128> %{{x|_3}}, <4 x i128> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.sadd.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}})
simd_saturating_add(x, y)
}
@ -268,140 +268,140 @@ pub unsafe fn sadd_i128x4(x: i128x4, y: i128x4) -> i128x4 {
// CHECK-LABEL: @uadd_u8x2
#[no_mangle]
pub unsafe fn uadd_u8x2(x: u8x2, y: u8x2) -> u8x2 {
// CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %{{x|_3}}, <2 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u8x4
#[no_mangle]
pub unsafe fn uadd_u8x4(x: u8x4, y: u8x4) -> u8x4 {
// CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %{{x|_3}}, <4 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u8x8
#[no_mangle]
pub unsafe fn uadd_u8x8(x: u8x8, y: u8x8) -> u8x8 {
// CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %{{x|_3}}, <8 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u8x16
#[no_mangle]
pub unsafe fn uadd_u8x16(x: u8x16, y: u8x16) -> u8x16 {
// CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{x|_3}}, <16 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u8x32
#[no_mangle]
pub unsafe fn uadd_u8x32(x: u8x32, y: u8x32) -> u8x32 {
// CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{x|_3}}, <32 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u8x64
#[no_mangle]
pub unsafe fn uadd_u8x64(x: u8x64, y: u8x64) -> u8x64 {
// CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{x|_3}}, <64 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u16x2
#[no_mangle]
pub unsafe fn uadd_u16x2(x: u16x2, y: u16x2) -> u16x2 {
// CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %{{x|_3}}, <2 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u16x4
#[no_mangle]
pub unsafe fn uadd_u16x4(x: u16x4, y: u16x4) -> u16x4 {
// CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %{{x|_3}}, <4 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u16x8
#[no_mangle]
pub unsafe fn uadd_u16x8(x: u16x8, y: u16x8) -> u16x8 {
// CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{x|_3}}, <8 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u16x16
#[no_mangle]
pub unsafe fn uadd_u16x16(x: u16x16, y: u16x16) -> u16x16 {
// CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{x|_3}}, <16 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u16x32
#[no_mangle]
pub unsafe fn uadd_u16x32(x: u16x32, y: u16x32) -> u16x32 {
// CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{x|_3}}, <32 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u32x2
#[no_mangle]
pub unsafe fn uadd_u32x2(x: u32x2, y: u32x2) -> u32x2 {
// CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %{{x|_3}}, <2 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u32x4
#[no_mangle]
pub unsafe fn uadd_u32x4(x: u32x4, y: u32x4) -> u32x4 {
// CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %{{x|_3}}, <4 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u32x8
#[no_mangle]
pub unsafe fn uadd_u32x8(x: u32x8, y: u32x8) -> u32x8 {
// CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %{{x|_3}}, <8 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u32x16
#[no_mangle]
pub unsafe fn uadd_u32x16(x: u32x16, y: u32x16) -> u32x16 {
// CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %{{x|_3}}, <16 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u64x2
#[no_mangle]
pub unsafe fn uadd_u64x2(x: u64x2, y: u64x2) -> u64x2 {
// CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %{{x|_3}}, <2 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u64x4
#[no_mangle]
pub unsafe fn uadd_u64x4(x: u64x4, y: u64x4) -> u64x4 {
// CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %{{x|_3}}, <4 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u64x8
#[no_mangle]
pub unsafe fn uadd_u64x8(x: u64x8, y: u64x8) -> u64x8 {
// CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %{{x|_3}}, <8 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u128x2
#[no_mangle]
pub unsafe fn uadd_u128x2(x: u128x2, y: u128x2) -> u128x2 {
// CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %{{x|_3}}, <2 x i128> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}})
simd_saturating_add(x, y)
}
// CHECK-LABEL: @uadd_u128x4
#[no_mangle]
pub unsafe fn uadd_u128x4(x: u128x4, y: u128x4) -> u128x4 {
// CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.uadd.sat.v4i128(<4 x i128> %{{x|_3}}, <4 x i128> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.uadd.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}})
simd_saturating_add(x, y)
}
@ -412,140 +412,140 @@ pub unsafe fn uadd_u128x4(x: u128x4, y: u128x4) -> u128x4 {
// CHECK-LABEL: @ssub_i8x2
#[no_mangle]
pub unsafe fn ssub_i8x2(x: i8x2, y: i8x2) -> i8x2 {
// CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %{{x|_3}}, <2 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i8x4
#[no_mangle]
pub unsafe fn ssub_i8x4(x: i8x4, y: i8x4) -> i8x4 {
// CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %{{x|_3}}, <4 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i8x8
#[no_mangle]
pub unsafe fn ssub_i8x8(x: i8x8, y: i8x8) -> i8x8 {
// CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %{{x|_3}}, <8 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i8x16
#[no_mangle]
pub unsafe fn ssub_i8x16(x: i8x16, y: i8x16) -> i8x16 {
// CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %{{x|_3}}, <16 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i8x32
#[no_mangle]
pub unsafe fn ssub_i8x32(x: i8x32, y: i8x32) -> i8x32 {
// CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %{{x|_3}}, <32 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i8x64
#[no_mangle]
pub unsafe fn ssub_i8x64(x: i8x64, y: i8x64) -> i8x64 {
// CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> %{{x|_3}}, <64 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i16x2
#[no_mangle]
pub unsafe fn ssub_i16x2(x: i16x2, y: i16x2) -> i16x2 {
// CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %{{x|_3}}, <2 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i16x4
#[no_mangle]
pub unsafe fn ssub_i16x4(x: i16x4, y: i16x4) -> i16x4 {
// CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %{{x|_3}}, <4 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i16x8
#[no_mangle]
pub unsafe fn ssub_i16x8(x: i16x8, y: i16x8) -> i16x8 {
// CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %{{x|_3}}, <8 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i16x16
#[no_mangle]
pub unsafe fn ssub_i16x16(x: i16x16, y: i16x16) -> i16x16 {
// CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %{{x|_3}}, <16 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i16x32
#[no_mangle]
pub unsafe fn ssub_i16x32(x: i16x32, y: i16x32) -> i16x32 {
// CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %{{x|_3}}, <32 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i32x2
#[no_mangle]
pub unsafe fn ssub_i32x2(x: i32x2, y: i32x2) -> i32x2 {
// CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %{{x|_3}}, <2 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i32x4
#[no_mangle]
pub unsafe fn ssub_i32x4(x: i32x4, y: i32x4) -> i32x4 {
// CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %{{x|_3}}, <4 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i32x8
#[no_mangle]
pub unsafe fn ssub_i32x8(x: i32x8, y: i32x8) -> i32x8 {
// CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %{{x|_3}}, <8 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i32x16
#[no_mangle]
pub unsafe fn ssub_i32x16(x: i32x16, y: i32x16) -> i32x16 {
// CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %{{x|_3}}, <16 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i64x2
#[no_mangle]
pub unsafe fn ssub_i64x2(x: i64x2, y: i64x2) -> i64x2 {
// CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %{{x|_3}}, <2 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i64x4
#[no_mangle]
pub unsafe fn ssub_i64x4(x: i64x4, y: i64x4) -> i64x4 {
// CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %{{x|_3}}, <4 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i64x8
#[no_mangle]
pub unsafe fn ssub_i64x8(x: i64x8, y: i64x8) -> i64x8 {
// CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %{{x|_3}}, <8 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i128x2
#[no_mangle]
pub unsafe fn ssub_i128x2(x: i128x2, y: i128x2) -> i128x2 {
// CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %{{x|_3}}, <2 x i128> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @ssub_i128x4
#[no_mangle]
pub unsafe fn ssub_i128x4(x: i128x4, y: i128x4) -> i128x4 {
// CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.ssub.sat.v4i128(<4 x i128> %{{x|_3}}, <4 x i128> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.ssub.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}})
simd_saturating_sub(x, y)
}
@ -554,139 +554,139 @@ pub unsafe fn ssub_i128x4(x: i128x4, y: i128x4) -> i128x4 {
// CHECK-LABEL: @usub_u8x2
#[no_mangle]
pub unsafe fn usub_u8x2(x: u8x2, y: u8x2) -> u8x2 {
// CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %{{x|_3}}, <2 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u8x4
#[no_mangle]
pub unsafe fn usub_u8x4(x: u8x4, y: u8x4) -> u8x4 {
// CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %{{x|_3}}, <4 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u8x8
#[no_mangle]
pub unsafe fn usub_u8x8(x: u8x8, y: u8x8) -> u8x8 {
// CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %{{x|_3}}, <8 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u8x16
#[no_mangle]
pub unsafe fn usub_u8x16(x: u8x16, y: u8x16) -> u8x16 {
// CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{x|_3}}, <16 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u8x32
#[no_mangle]
pub unsafe fn usub_u8x32(x: u8x32, y: u8x32) -> u8x32 {
// CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{x|_3}}, <32 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u8x64
#[no_mangle]
pub unsafe fn usub_u8x64(x: u8x64, y: u8x64) -> u8x64 {
// CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{x|_3}}, <64 x i8> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u16x2
#[no_mangle]
pub unsafe fn usub_u16x2(x: u16x2, y: u16x2) -> u16x2 {
// CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %{{x|_3}}, <2 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u16x4
#[no_mangle]
pub unsafe fn usub_u16x4(x: u16x4, y: u16x4) -> u16x4 {
// CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %{{x|_3}}, <4 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u16x8
#[no_mangle]
pub unsafe fn usub_u16x8(x: u16x8, y: u16x8) -> u16x8 {
// CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{x|_3}}, <8 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u16x16
#[no_mangle]
pub unsafe fn usub_u16x16(x: u16x16, y: u16x16) -> u16x16 {
// CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{x|_3}}, <16 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u16x32
#[no_mangle]
pub unsafe fn usub_u16x32(x: u16x32, y: u16x32) -> u16x32 {
// CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{x|_3}}, <32 x i16> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u32x2
#[no_mangle]
pub unsafe fn usub_u32x2(x: u32x2, y: u32x2) -> u32x2 {
// CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %{{x|_3}}, <2 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u32x4
#[no_mangle]
pub unsafe fn usub_u32x4(x: u32x4, y: u32x4) -> u32x4 {
// CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %{{x|_3}}, <4 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u32x8
#[no_mangle]
pub unsafe fn usub_u32x8(x: u32x8, y: u32x8) -> u32x8 {
// CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %{{x|_3}}, <8 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u32x16
#[no_mangle]
pub unsafe fn usub_u32x16(x: u32x16, y: u32x16) -> u32x16 {
// CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %{{x|_3}}, <16 x i32> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u64x2
#[no_mangle]
pub unsafe fn usub_u64x2(x: u64x2, y: u64x2) -> u64x2 {
// CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %{{x|_3}}, <2 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u64x4
#[no_mangle]
pub unsafe fn usub_u64x4(x: u64x4, y: u64x4) -> u64x4 {
// CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %{{x|_3}}, <4 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u64x8
#[no_mangle]
pub unsafe fn usub_u64x8(x: u64x8, y: u64x8) -> u64x8 {
// CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %{{x|_3}}, <8 x i64> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u128x2
#[no_mangle]
pub unsafe fn usub_u128x2(x: u128x2, y: u128x2) -> u128x2 {
// CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %{{x|_3}}, <2 x i128> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}})
simd_saturating_sub(x, y)
}
// CHECK-LABEL: @usub_u128x4
#[no_mangle]
pub unsafe fn usub_u128x4(x: u128x4, y: u128x4) -> u128x4 {
// CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.usub.sat.v4i128(<4 x i128> %{{x|_3}}, <4 x i128> %{{y|_4}})
// CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.usub.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}})
simd_saturating_sub(x, y)
}

View File

@ -26,16 +26,16 @@ extern "platform-intrinsic" {
fn simd_bitmask<T, U>(x: T) -> U;
}
// NOTE(eddyb) `%{{x|_2}}` is used because on some targets (e.g. WASM)
// NOTE(eddyb) `%{{x|1}}` is used because on some targets (e.g. WASM)
// SIMD vectors are passed directly, resulting in `%x` being a vector,
// while on others they're passed indirectly, resulting in `%x` being
// a pointer to a vector, and `%_2` a vector loaded from that pointer.
// a pointer to a vector, and `%1` a vector loaded from that pointer.
// This is controlled by the target spec option `simd_types_indirect`.
// CHECK-LABEL: @bitmask_int
#[no_mangle]
pub unsafe fn bitmask_int(x: i32x2) -> u8 {
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|_2}}, <i32 31, i32 31>
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, <i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
// CHECK: [[C:%[0-9]+]] = bitcast <2 x i1> [[B]] to i2
// CHECK: %{{[0-9]+}} = zext i2 [[C]] to i8
@ -45,7 +45,7 @@ pub unsafe fn bitmask_int(x: i32x2) -> u8 {
// CHECK-LABEL: @bitmask_uint
#[no_mangle]
pub unsafe fn bitmask_uint(x: u32x2) -> u8 {
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|_2}}, <i32 31, i32 31>
// CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, <i32 31, i32 31>
// CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
// CHECK: [[C:%[0-9]+]] = bitcast <2 x i1> [[B]] to i2
// CHECK: %{{[0-9]+}} = zext i2 [[C]] to i8
@ -55,7 +55,7 @@ pub unsafe fn bitmask_uint(x: u32x2) -> u8 {
// CHECK-LABEL: @bitmask_int16
#[no_mangle]
pub unsafe fn bitmask_int16(x: i8x16) -> u16 {
// CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{x|_2}}, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
// CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{x|1|2}}, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
// CHECK: [[B:%[0-9]+]] = trunc <16 x i8> [[A]] to <16 x i1>
// CHECK: %{{[0-9]+}} = bitcast <16 x i1> [[B]] to i16
// CHECK-NOT: zext

View File

@ -21,27 +21,27 @@ extern "platform-intrinsic" {
// CHECK-LABEL: @extract_m
#[no_mangle]
pub unsafe fn extract_m(v: M, i: u32) -> f32 {
// CHECK: extractelement <4 x float> %{{v|_3}}, i32 %i
// CHECK: extractelement <4 x float> %{{v|1|2}}, i32 %i
simd_extract(v, i)
}
// CHECK-LABEL: @extract_s
#[no_mangle]
pub unsafe fn extract_s(v: S<4>, i: u32) -> f32 {
// CHECK: extractelement <4 x float> %{{v|_3}}, i32 %i
// CHECK: extractelement <4 x float> %{{v|1|2}}, i32 %i
simd_extract(v, i)
}
// CHECK-LABEL: @insert_m
#[no_mangle]
pub unsafe fn insert_m(v: M, i: u32, j: f32) -> M {
// CHECK: insertelement <4 x float> %{{v|_4}}, float %j, i32 %i
// CHECK: insertelement <4 x float> %{{v|1|2}}, float %j, i32 %i
simd_insert(v, i, j)
}
// CHECK-LABEL: @insert_s
#[no_mangle]
pub unsafe fn insert_s(v: S<4>, i: u32, j: f32) -> S<4> {
// CHECK: insertelement <4 x float> %{{v|_4}}, float %j, i32 %i
// CHECK: insertelement <4 x float> %{{v|1|2}}, float %j, i32 %i
simd_insert(v, i, j)
}

View File

@ -21,7 +21,6 @@ pub struct U(f32, f32, f32, f32);
// CHECK-LABEL: @build_array_s
#[no_mangle]
pub fn build_array_s(x: [f32; 4]) -> S<4> {
// CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false)
// CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false)
S::<4>(x)
}
@ -29,7 +28,6 @@ pub fn build_array_s(x: [f32; 4]) -> S<4> {
// CHECK-LABEL: @build_array_t
#[no_mangle]
pub fn build_array_t(x: [f32; 4]) -> T {
// CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false)
// CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false)
T(x)
}
@ -37,7 +35,6 @@ pub fn build_array_t(x: [f32; 4]) -> T {
// CHECK-LABEL: @build_array_u
#[no_mangle]
pub fn build_array_u(x: [f32; 4]) -> U {
// CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false)
// CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false)
unsafe { std::mem::transmute(x) }
}

View File

@ -21,6 +21,6 @@ pub struct Simd<T, const LANES: usize>([T; LANES]);
// CHECK-LABEL: smoke
#[no_mangle]
pub fn smoke(ptrs: SimdConstPtr<u8, 8>, offsets: Simd<usize, 8>) -> SimdConstPtr<u8, 8> {
// CHECK: getelementptr i8, <8 x {{i8\*|ptr}}> %_3, <8 x i64> %_4
// CHECK: getelementptr i8, <8 x {{i8\*|ptr}}> %1, <8 x i64> %2
unsafe { simd_arith_offset(ptrs, offsets) }
}

View File

@ -0,0 +1,31 @@
- // MIR for `f` before CopyProp
+ // MIR for `f` after CopyProp
fn f(_1: NotCopy) -> () {
let mut _0: (); // return place in scope 0 at $DIR/custom_move_arg.rs:+0:19: +0:19
let mut _2: NotCopy; // in scope 0 at $SRC_DIR/core/src/intrinsics/mir.rs:LL:COL
let mut _3: NotCopy; // in scope 0 at $SRC_DIR/core/src/intrinsics/mir.rs:LL:COL
bb0: {
- _2 = _1; // scope 0 at $SRC_DIR/core/src/intrinsics/mir.rs:LL:COL
- _0 = opaque::<NotCopy>(move _1) -> bb1; // scope 0 at $DIR/custom_move_arg.rs:+3:9: +3:41
+ _0 = opaque::<NotCopy>(_1) -> bb1; // scope 0 at $DIR/custom_move_arg.rs:+3:9: +3:41
// mir::Constant
// + span: $DIR/custom_move_arg.rs:15:24: 15:30
// + literal: Const { ty: fn(NotCopy) {opaque::<NotCopy>}, val: Value(<ZST>) }
}
bb1: {
- _3 = move _2; // scope 0 at $SRC_DIR/core/src/intrinsics/mir.rs:LL:COL
- _0 = opaque::<NotCopy>(_3) -> bb2; // scope 0 at $DIR/custom_move_arg.rs:+7:9: +7:35
+ _0 = opaque::<NotCopy>(_1) -> bb2; // scope 0 at $DIR/custom_move_arg.rs:+7:9: +7:35
// mir::Constant
// + span: $DIR/custom_move_arg.rs:19:24: 19:30
// + literal: Const { ty: fn(NotCopy) {opaque::<NotCopy>}, val: Value(<ZST>) }
}
bb2: {
return; // scope 0 at $DIR/custom_move_arg.rs:+10:9: +10:17
}
}

View File

@ -0,0 +1,32 @@
// unit-test: CopyProp
#![feature(custom_mir, core_intrinsics)]
#![allow(unused_assignments)]
extern crate core;
use core::intrinsics::mir::*;
struct NotCopy(bool);
// EMIT_MIR custom_move_arg.f.CopyProp.diff
#[custom_mir(dialect = "analysis", phase = "post-cleanup")]
fn f(_1: NotCopy) {
mir!({
let _2 = _1;
Call(RET, bb1, opaque(Move(_1)))
}
bb1 = {
let _3 = Move(_2);
Call(RET, bb2, opaque(_3))
}
bb2 = {
Return()
})
}
#[inline(never)]
fn opaque<T>(_t: T) {}
fn main() {
f(NotCopy(true));
println!("hi");
}

View File

@ -10,19 +10,21 @@
scope 2 (inlined <u8 as Add>::add) { // at $DIR/inherit_overflow.rs:7:13: 7:47
debug self => _1; // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
debug other => _2; // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
let mut _3: u8; // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
let mut _4: u8; // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
let mut _5: (u8, bool); // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
let mut _3: (u8, bool); // in scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
}
bb0: {
StorageLive(_1); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
_1 = const u8::MAX; // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
StorageLive(_2); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
_2 = const 1_u8; // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
_5 = CheckedAdd(const u8::MAX, const 1_u8); // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
assert(!move (_5.1: bool), "attempt to compute `{} + {}`, which would overflow", const u8::MAX, const 1_u8) -> bb1; // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
_3 = CheckedAdd(const u8::MAX, const 1_u8); // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
assert(!move (_3.1: bool), "attempt to compute `{} + {}`, which would overflow", const u8::MAX, const 1_u8) -> bb1; // scope 2 at $SRC_DIR/core/src/ops/arith.rs:LL:COL
}
bb1: {
StorageDead(_2); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
StorageDead(_1); // scope 0 at $DIR/inherit_overflow.rs:+3:13: +3:47
return; // scope 0 at $DIR/inherit_overflow.rs:+4:2: +4:2
}
}

View File

@ -7,7 +7,7 @@
let mut _2: std::pin::Pin<&mut [generator@$DIR/inline_generator.rs:15:5: 15:8]>; // in scope 0 at $DIR/inline_generator.rs:+1:14: +1:32
let mut _3: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]; // in scope 0 at $DIR/inline_generator.rs:+1:23: +1:31
let mut _4: [generator@$DIR/inline_generator.rs:15:5: 15:8]; // in scope 0 at $DIR/inline_generator.rs:+1:28: +1:31
+ let mut _7: bool; // in scope 0 at $DIR/inline_generator.rs:+1:33: +1:46
+ let mut _5: bool; // in scope 0 at $DIR/inline_generator.rs:+1:33: +1:46
scope 1 {
debug _r => _1; // in scope 1 at $DIR/inline_generator.rs:+1:9: +1:11
}
@ -15,21 +15,19 @@
+ }
+ scope 3 (inlined Pin::<&mut [generator@$DIR/inline_generator.rs:15:5: 15:8]>::new) { // at $DIR/inline_generator.rs:9:14: 9:32
+ debug pointer => _3; // in scope 3 at $SRC_DIR/core/src/pin.rs:LL:COL
+ let mut _5: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]; // in scope 3 at $SRC_DIR/core/src/pin.rs:LL:COL
+ scope 4 {
+ scope 5 (inlined Pin::<&mut [generator@$DIR/inline_generator.rs:15:5: 15:8]>::new_unchecked) { // at $SRC_DIR/core/src/pin.rs:LL:COL
+ debug pointer => _5; // in scope 5 at $SRC_DIR/core/src/pin.rs:LL:COL
+ let mut _6: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]; // in scope 5 at $SRC_DIR/core/src/pin.rs:LL:COL
+ debug pointer => _3; // in scope 5 at $SRC_DIR/core/src/pin.rs:LL:COL
+ }
+ }
+ }
+ scope 6 (inlined g::{closure#0}) { // at $DIR/inline_generator.rs:9:33: 9:46
+ debug a => _7; // in scope 6 at $DIR/inline_generator.rs:15:6: 15:7
+ let mut _8: i32; // in scope 6 at $DIR/inline_generator.rs:15:17: 15:39
+ let mut _9: u32; // in scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ debug a => _5; // in scope 6 at $DIR/inline_generator.rs:15:6: 15:7
+ let mut _6: i32; // in scope 6 at $DIR/inline_generator.rs:15:17: 15:39
+ let mut _7: u32; // in scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ let mut _8: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]; // in scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ let mut _9: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]; // in scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ let mut _10: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]; // in scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ let mut _11: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]; // in scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ let mut _12: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]; // in scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ }
bb0: {
@ -64,28 +62,22 @@
- }
-
- bb2: {
+ StorageLive(_5); // scope 4 at $SRC_DIR/core/src/pin.rs:LL:COL
+ _5 = move _3; // scope 4 at $SRC_DIR/core/src/pin.rs:LL:COL
+ StorageLive(_6); // scope 5 at $SRC_DIR/core/src/pin.rs:LL:COL
+ _6 = move _5; // scope 5 at $SRC_DIR/core/src/pin.rs:LL:COL
+ _2 = Pin::<&mut [generator@$DIR/inline_generator.rs:15:5: 15:8]> { pointer: move _6 }; // scope 5 at $SRC_DIR/core/src/pin.rs:LL:COL
+ StorageDead(_6); // scope 5 at $SRC_DIR/core/src/pin.rs:LL:COL
+ StorageDead(_5); // scope 4 at $SRC_DIR/core/src/pin.rs:LL:COL
+ _2 = Pin::<&mut [generator@$DIR/inline_generator.rs:15:5: 15:8]> { pointer: move _3 }; // scope 5 at $SRC_DIR/core/src/pin.rs:LL:COL
StorageDead(_3); // scope 0 at $DIR/inline_generator.rs:+1:31: +1:32
- _1 = <[generator@$DIR/inline_generator.rs:15:5: 15:8] as Generator<bool>>::resume(move _2, const false) -> [return: bb3, unwind: bb4]; // scope 0 at $DIR/inline_generator.rs:+1:14: +1:46
- // mir::Constant
- // + span: $DIR/inline_generator.rs:9:33: 9:39
- // + literal: Const { ty: for<'a> fn(Pin<&'a mut [generator@$DIR/inline_generator.rs:15:5: 15:8]>, bool) -> GeneratorState<<[generator@$DIR/inline_generator.rs:15:5: 15:8] as Generator<bool>>::Yield, <[generator@$DIR/inline_generator.rs:15:5: 15:8] as Generator<bool>>::Return> {<[generator@$DIR/inline_generator.rs:15:5: 15:8] as Generator<bool>>::resume}, val: Value(<ZST>) }
+ StorageLive(_7); // scope 0 at $DIR/inline_generator.rs:+1:33: +1:46
+ _7 = const false; // scope 0 at $DIR/inline_generator.rs:+1:33: +1:46
+ _10 = deref_copy (_2.0: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]); // scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ _9 = discriminant((*_10)); // scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ switchInt(move _9) -> [0: bb3, 1: bb8, 3: bb7, otherwise: bb9]; // scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ StorageLive(_5); // scope 0 at $DIR/inline_generator.rs:+1:33: +1:46
+ _5 = const false; // scope 0 at $DIR/inline_generator.rs:+1:33: +1:46
+ _8 = deref_copy (_2.0: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]); // scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ _7 = discriminant((*_8)); // scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ switchInt(move _7) -> [0: bb3, 1: bb8, 3: bb7, otherwise: bb9]; // scope 6 at $DIR/inline_generator.rs:15:5: 15:41
}
- bb3: {
+ bb1: {
+ StorageDead(_7); // scope 0 at $DIR/inline_generator.rs:+1:33: +1:46
+ StorageDead(_5); // scope 0 at $DIR/inline_generator.rs:+1:33: +1:46
StorageDead(_2); // scope 0 at $DIR/inline_generator.rs:+1:45: +1:46
StorageDead(_4); // scope 0 at $DIR/inline_generator.rs:+1:46: +1:47
_0 = const (); // scope 0 at $DIR/inline_generator.rs:+0:11: +2:2
@ -99,33 +91,33 @@
+ }
+
+ bb3: {
+ StorageLive(_8); // scope 6 at $DIR/inline_generator.rs:15:17: 15:39
+ switchInt(_7) -> [0: bb5, otherwise: bb4]; // scope 6 at $DIR/inline_generator.rs:15:20: 15:21
+ StorageLive(_6); // scope 6 at $DIR/inline_generator.rs:15:17: 15:39
+ switchInt(_5) -> [0: bb5, otherwise: bb4]; // scope 6 at $DIR/inline_generator.rs:15:20: 15:21
+ }
+
+ bb4: {
+ _8 = const 7_i32; // scope 6 at $DIR/inline_generator.rs:15:24: 15:25
+ _6 = const 7_i32; // scope 6 at $DIR/inline_generator.rs:15:24: 15:25
+ goto -> bb6; // scope 6 at $DIR/inline_generator.rs:15:17: 15:39
+ }
+
+ bb5: {
+ _8 = const 13_i32; // scope 6 at $DIR/inline_generator.rs:15:35: 15:37
+ _6 = const 13_i32; // scope 6 at $DIR/inline_generator.rs:15:35: 15:37
+ goto -> bb6; // scope 6 at $DIR/inline_generator.rs:15:17: 15:39
+ }
+
+ bb6: {
+ _1 = GeneratorState::<i32, bool>::Yielded(move _8); // scope 6 at $DIR/inline_generator.rs:15:11: 15:39
+ _11 = deref_copy (_2.0: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]); // scope 6 at $DIR/inline_generator.rs:15:11: 15:39
+ discriminant((*_11)) = 3; // scope 6 at $DIR/inline_generator.rs:15:11: 15:39
+ _1 = GeneratorState::<i32, bool>::Yielded(move _6); // scope 6 at $DIR/inline_generator.rs:15:11: 15:39
+ _9 = deref_copy (_2.0: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]); // scope 6 at $DIR/inline_generator.rs:15:11: 15:39
+ discriminant((*_9)) = 3; // scope 6 at $DIR/inline_generator.rs:15:11: 15:39
+ goto -> bb1; // scope 0 at $DIR/inline_generator.rs:15:11: 15:39
+ }
+
+ bb7: {
+ StorageLive(_8); // scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ StorageDead(_8); // scope 6 at $DIR/inline_generator.rs:15:38: 15:39
+ _1 = GeneratorState::<i32, bool>::Complete(_7); // scope 6 at $DIR/inline_generator.rs:15:41: 15:41
+ _12 = deref_copy (_2.0: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]); // scope 6 at $DIR/inline_generator.rs:15:41: 15:41
+ discriminant((*_12)) = 1; // scope 6 at $DIR/inline_generator.rs:15:41: 15:41
+ StorageLive(_6); // scope 6 at $DIR/inline_generator.rs:15:5: 15:41
+ StorageDead(_6); // scope 6 at $DIR/inline_generator.rs:15:38: 15:39
+ _1 = GeneratorState::<i32, bool>::Complete(_5); // scope 6 at $DIR/inline_generator.rs:15:41: 15:41
+ _10 = deref_copy (_2.0: &mut [generator@$DIR/inline_generator.rs:15:5: 15:8]); // scope 6 at $DIR/inline_generator.rs:15:41: 15:41
+ discriminant((*_10)) = 1; // scope 6 at $DIR/inline_generator.rs:15:41: 15:41
+ goto -> bb1; // scope 0 at $DIR/inline_generator.rs:15:41: 15:41
+ }
+

View File

@ -26,13 +26,12 @@
scope 3 (inlined core::num::<impl u32>::rotate_right) { // at $DIR/issue_101973.rs:14:18: 14:58
debug self => _4; // in scope 3 at $SRC_DIR/core/src/num/uint_macros.rs:LL:COL
debug n => _6; // in scope 3 at $SRC_DIR/core/src/num/uint_macros.rs:LL:COL
let mut _15: u32; // in scope 3 at $SRC_DIR/core/src/num/uint_macros.rs:LL:COL
let mut _16: u32; // in scope 3 at $SRC_DIR/core/src/num/uint_macros.rs:LL:COL
}
bb0: {
StorageLive(_2); // scope 0 at $DIR/issue_101973.rs:+1:5: +1:65
StorageLive(_3); // scope 0 at $DIR/issue_101973.rs:+1:5: +1:58
StorageLive(_4); // scope 0 at $DIR/issue_101973.rs:+1:5: +1:17
StorageLive(_12); // scope 2 at $DIR/issue_101973.rs:7:12: 7:27
StorageLive(_13); // scope 2 at $DIR/issue_101973.rs:7:12: 7:20
_14 = CheckedShr(_1, const 0_i32); // scope 2 at $DIR/issue_101973.rs:7:12: 7:20
@ -62,6 +61,7 @@
StorageDead(_13); // scope 2 at $DIR/issue_101973.rs:7:26: 7:27
_4 = BitOr(const 0_u32, move _12); // scope 2 at $DIR/issue_101973.rs:7:5: 7:27
StorageDead(_12); // scope 2 at $DIR/issue_101973.rs:7:26: 7:27
StorageLive(_6); // scope 0 at $DIR/issue_101973.rs:+1:31: +1:57
StorageLive(_7); // scope 0 at $DIR/issue_101973.rs:+1:31: +1:52
StorageLive(_8); // scope 0 at $DIR/issue_101973.rs:+1:32: +1:45
_10 = CheckedShr(_1, const 8_i32); // scope 0 at $DIR/issue_101973.rs:+1:32: +1:45
@ -69,6 +69,8 @@
}
bb4: {
StorageDead(_6); // scope 0 at $DIR/issue_101973.rs:+1:57: +1:58
StorageDead(_4); // scope 0 at $DIR/issue_101973.rs:+1:57: +1:58
_2 = move _3 as i32 (IntToInt); // scope 0 at $DIR/issue_101973.rs:+1:5: +1:65
StorageDead(_3); // scope 0 at $DIR/issue_101973.rs:+1:64: +1:65
_0 = move _2 as i64 (IntToInt); // scope 0 at $DIR/issue_101973.rs:+1:5: +1:72

View File

@ -5,15 +5,14 @@ fn num_to_digit(_1: char) -> u32 {
let mut _0: u32; // return place in scope 0 at $DIR/issue_59352.rs:+0:35: +0:38
let mut _2: std::option::Option<u32>; // in scope 0 at $DIR/issue_59352.rs:+2:26: +2:41
let mut _3: u32; // in scope 0 at $DIR/issue_59352.rs:+2:12: +2:23
let mut _9: isize; // in scope 0 at $SRC_DIR/core/src/macros/mod.rs:LL:COL
scope 1 (inlined char::methods::<impl char>::is_digit) { // at $DIR/issue_59352.rs:14:12: 14:23
debug self => _1; // in scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
debug radix => _3; // in scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
let mut _4: &std::option::Option<u32>; // in scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
let _5: std::option::Option<u32>; // in scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
let mut _6: char; // in scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
scope 2 (inlined Option::<u32>::is_some) { // at $SRC_DIR/core/src/char/methods.rs:LL:COL
debug self => _4; // in scope 2 at $SRC_DIR/core/src/option.rs:LL:COL
let mut _6: isize; // in scope 2 at $SRC_DIR/core/src/option.rs:LL:COL
}
}
scope 3 (inlined #[track_caller] Option::<u32>::unwrap) { // at $DIR/issue_59352.rs:14:42: 14:50
@ -29,9 +28,7 @@ fn num_to_digit(_1: char) -> u32 {
StorageLive(_3); // scope 0 at $DIR/issue_59352.rs:+2:12: +2:23
StorageLive(_4); // scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
StorageLive(_5); // scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
StorageLive(_6); // scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
_6 = _1; // scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
_5 = char::methods::<impl char>::to_digit(move _6, const 8_u32) -> bb5; // scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
_5 = char::methods::<impl char>::to_digit(_1, const 8_u32) -> bb5; // scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
// mir::Constant
// + span: $SRC_DIR/core/src/char/methods.rs:LL:COL
// + literal: Const { ty: fn(char, u32) -> Option<u32> {char::methods::<impl char>::to_digit}, val: Value(<ZST>) }
@ -39,7 +36,7 @@ fn num_to_digit(_1: char) -> u32 {
bb1: {
StorageLive(_2); // scope 0 at $DIR/issue_59352.rs:+2:26: +2:41
_2 = char::methods::<impl char>::to_digit(move _1, const 8_u32) -> bb2; // scope 0 at $DIR/issue_59352.rs:+2:26: +2:41
_2 = char::methods::<impl char>::to_digit(_1, const 8_u32) -> bb2; // scope 0 at $DIR/issue_59352.rs:+2:26: +2:41
// mir::Constant
// + span: $DIR/issue_59352.rs:14:30: 14:38
// + literal: Const { ty: fn(char, u32) -> Option<u32> {char::methods::<impl char>::to_digit}, val: Value(<ZST>) }
@ -61,12 +58,11 @@ fn num_to_digit(_1: char) -> u32 {
bb5: {
_4 = &_5; // scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
StorageDead(_6); // scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
_9 = discriminant((*_4)); // scope 2 at $SRC_DIR/core/src/option.rs:LL:COL
_6 = discriminant((*_4)); // scope 2 at $SRC_DIR/core/src/option.rs:LL:COL
StorageDead(_4); // scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
StorageDead(_5); // scope 1 at $SRC_DIR/core/src/char/methods.rs:LL:COL
StorageDead(_3); // scope 0 at $DIR/issue_59352.rs:+2:12: +2:23
switchInt(move _9) -> [1: bb1, otherwise: bb3]; // scope 0 at $DIR/issue_59352.rs:+2:8: +2:23
switchInt(move _6) -> [1: bb1, otherwise: bb3]; // scope 0 at $DIR/issue_59352.rs:+2:8: +2:23
}
bb6: {

View File

@ -12,18 +12,18 @@
let mut _7: !; // in scope 0 at $DIR/separate_const_switch.rs:+1:9: +1:10
let mut _8: std::result::Result<std::convert::Infallible, i32>; // in scope 0 at $DIR/separate_const_switch.rs:+1:9: +1:10
let _9: i32; // in scope 0 at $DIR/separate_const_switch.rs:+1:8: +1:10
let mut _16: i32; // in scope 0 at $SRC_DIR/core/src/result.rs:LL:COL
scope 1 {
debug residual => _6; // in scope 1 at $DIR/separate_const_switch.rs:+1:9: +1:10
scope 2 {
scope 8 (inlined #[track_caller] <Result<i32, i32> as FromResidual<Result<Infallible, i32>>>::from_residual) { // at $DIR/separate_const_switch.rs:25:8: 25:10
debug residual => _8; // in scope 8 at $SRC_DIR/core/src/result.rs:LL:COL
let _16: i32; // in scope 8 at $SRC_DIR/core/src/result.rs:LL:COL
let mut _17: i32; // in scope 8 at $SRC_DIR/core/src/result.rs:LL:COL
let mut _18: i32; // in scope 8 at $SRC_DIR/core/src/result.rs:LL:COL
let _14: i32; // in scope 8 at $SRC_DIR/core/src/result.rs:LL:COL
let mut _15: i32; // in scope 8 at $SRC_DIR/core/src/result.rs:LL:COL
scope 9 {
debug e => _16; // in scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
debug e => _14; // in scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
scope 10 (inlined <i32 as From<i32>>::from) { // at $SRC_DIR/core/src/result.rs:LL:COL
debug t => _18; // in scope 10 at $SRC_DIR/core/src/convert/mod.rs:LL:COL
debug t => _16; // in scope 10 at $SRC_DIR/core/src/convert/mod.rs:LL:COL
}
}
}
@ -38,15 +38,13 @@
debug self => _4; // in scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
let mut _10: isize; // in scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
let _11: i32; // in scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
let mut _12: i32; // in scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
let _13: i32; // in scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
let mut _14: std::result::Result<std::convert::Infallible, i32>; // in scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
let mut _15: i32; // in scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
let _12: i32; // in scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
let mut _13: std::result::Result<std::convert::Infallible, i32>; // in scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
scope 6 {
debug v => _11; // in scope 6 at $SRC_DIR/core/src/result.rs:LL:COL
}
scope 7 {
debug e => _13; // in scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
debug e => _12; // in scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
}
}
@ -55,11 +53,15 @@
StorageLive(_3); // scope 0 at $DIR/separate_const_switch.rs:+1:8: +1:10
StorageLive(_4); // scope 0 at $DIR/separate_const_switch.rs:+1:8: +1:9
_4 = _1; // scope 0 at $DIR/separate_const_switch.rs:+1:8: +1:9
StorageLive(_11); // scope 0 at $DIR/separate_const_switch.rs:+1:8: +1:10
StorageLive(_12); // scope 0 at $DIR/separate_const_switch.rs:+1:8: +1:10
_10 = discriminant(_4); // scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
switchInt(move _10) -> [0: bb7, 1: bb5, otherwise: bb6]; // scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
}
bb1: {
StorageDead(_12); // scope 0 at $DIR/separate_const_switch.rs:+1:8: +1:10
StorageDead(_11); // scope 0 at $DIR/separate_const_switch.rs:+1:8: +1:10
StorageDead(_4); // scope 0 at $DIR/separate_const_switch.rs:+1:9: +1:10
_5 = discriminant(_3); // scope 0 at $DIR/separate_const_switch.rs:+1:8: +1:10
switchInt(move _5) -> [0: bb2, 1: bb4, otherwise: bb3]; // scope 0 at $DIR/separate_const_switch.rs:+1:8: +1:10
@ -85,16 +87,16 @@
_6 = ((_3 as Break).0: std::result::Result<std::convert::Infallible, i32>); // scope 0 at $DIR/separate_const_switch.rs:+1:9: +1:10
StorageLive(_8); // scope 2 at $DIR/separate_const_switch.rs:+1:9: +1:10
_8 = _6; // scope 2 at $DIR/separate_const_switch.rs:+1:9: +1:10
StorageLive(_16); // scope 8 at $SRC_DIR/core/src/result.rs:LL:COL
_16 = move ((_8 as Err).0: i32); // scope 8 at $SRC_DIR/core/src/result.rs:LL:COL
StorageLive(_17); // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
StorageLive(_18); // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
_18 = move _16; // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
_17 = move _18; // scope 10 at $SRC_DIR/core/src/convert/mod.rs:LL:COL
StorageDead(_18); // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
_0 = Result::<i32, i32>::Err(move _17); // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
StorageDead(_17); // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
StorageDead(_16); // scope 8 at $SRC_DIR/core/src/result.rs:LL:COL
StorageLive(_14); // scope 2 at $DIR/separate_const_switch.rs:+1:8: +1:10
_14 = move ((_8 as Err).0: i32); // scope 8 at $SRC_DIR/core/src/result.rs:LL:COL
StorageLive(_15); // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
StorageLive(_16); // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
_16 = move _14; // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
_15 = move _16; // scope 10 at $SRC_DIR/core/src/convert/mod.rs:LL:COL
StorageDead(_16); // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
_0 = Result::<i32, i32>::Err(move _15); // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
StorageDead(_15); // scope 9 at $SRC_DIR/core/src/result.rs:LL:COL
StorageDead(_14); // scope 2 at $DIR/separate_const_switch.rs:+1:8: +1:10
StorageDead(_8); // scope 2 at $DIR/separate_const_switch.rs:+1:9: +1:10
StorageDead(_6); // scope 0 at $DIR/separate_const_switch.rs:+1:9: +1:10
StorageDead(_2); // scope 0 at $DIR/separate_const_switch.rs:+1:10: +1:11
@ -103,16 +105,11 @@
}
bb5: {
StorageLive(_13); // scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
_13 = move ((_4 as Err).0: i32); // scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
StorageLive(_14); // scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
StorageLive(_15); // scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
_15 = move _13; // scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
_14 = Result::<Infallible, i32>::Err(move _15); // scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
StorageDead(_15); // scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
_3 = ControlFlow::<Result<Infallible, i32>, i32>::Break(move _14); // scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
StorageDead(_14); // scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
StorageDead(_13); // scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
_12 = move ((_4 as Err).0: i32); // scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
StorageLive(_13); // scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
_13 = Result::<Infallible, i32>::Err(move _12); // scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
_3 = ControlFlow::<Result<Infallible, i32>, i32>::Break(move _13); // scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
StorageDead(_13); // scope 7 at $SRC_DIR/core/src/result.rs:LL:COL
goto -> bb1; // scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
}
@ -121,13 +118,8 @@
}
bb7: {
StorageLive(_11); // scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
_11 = move ((_4 as Ok).0: i32); // scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
StorageLive(_12); // scope 6 at $SRC_DIR/core/src/result.rs:LL:COL
_12 = move _11; // scope 6 at $SRC_DIR/core/src/result.rs:LL:COL
_3 = ControlFlow::<Result<Infallible, i32>, i32>::Continue(move _12); // scope 6 at $SRC_DIR/core/src/result.rs:LL:COL
StorageDead(_12); // scope 6 at $SRC_DIR/core/src/result.rs:LL:COL
StorageDead(_11); // scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
_3 = ControlFlow::<Result<Infallible, i32>, i32>::Continue(move _11); // scope 6 at $SRC_DIR/core/src/result.rs:LL:COL
goto -> bb1; // scope 5 at $SRC_DIR/core/src/result.rs:LL:COL
}
}

View File

@ -34,15 +34,10 @@
}
bb2: {
StorageLive(_6); // scope 1 at $DIR/simplify_locals_fixedpoint.rs:+1:18: +1:19
_6 = (((_1.0: std::option::Option<u8>) as Some).0: u8); // scope 1 at $DIR/simplify_locals_fixedpoint.rs:+1:18: +1:19
- StorageLive(_7); // scope 1 at $DIR/simplify_locals_fixedpoint.rs:+2:12: +2:20
- StorageLive(_8); // scope 1 at $DIR/simplify_locals_fixedpoint.rs:+2:12: +2:13
- _8 = _6; // scope 1 at $DIR/simplify_locals_fixedpoint.rs:+2:12: +2:13
- _7 = Gt(move _8, const 42_u8); // scope 1 at $DIR/simplify_locals_fixedpoint.rs:+2:12: +2:20
- StorageDead(_8); // scope 1 at $DIR/simplify_locals_fixedpoint.rs:+2:19: +2:20
- _7 = Gt(_6, const 42_u8); // scope 1 at $DIR/simplify_locals_fixedpoint.rs:+2:12: +2:20
- StorageDead(_7); // scope 1 at $DIR/simplify_locals_fixedpoint.rs:+4:9: +4:10
StorageDead(_6); // scope 0 at $DIR/simplify_locals_fixedpoint.rs:+5:5: +5:6
goto -> bb3; // scope 0 at $DIR/simplify_locals_fixedpoint.rs:+1:5: +5:6
}

View File

@ -29,6 +29,14 @@
let mut _26: &(usize, usize, usize, usize); // in scope 0 at $DIR/slice_filter.rs:+0:26: +0:38
let mut _27: &(usize, usize, usize, usize); // in scope 0 at $DIR/slice_filter.rs:+0:26: +0:38
let mut _28: &(usize, usize, usize, usize); // in scope 0 at $DIR/slice_filter.rs:+0:26: +0:38
let mut _31: &usize; // in scope 0 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _32: &usize; // in scope 0 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _37: &usize; // in scope 0 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _38: &usize; // in scope 0 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _43: &usize; // in scope 0 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _44: &usize; // in scope 0 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _49: &usize; // in scope 0 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _50: &usize; // in scope 0 at $SRC_DIR/core/src/cmp.rs:LL:COL
scope 1 {
debug a => _3; // in scope 1 at $DIR/slice_filter.rs:+0:27: +0:28
debug b => _4; // in scope 1 at $DIR/slice_filter.rs:+0:30: +0:31
@ -39,13 +47,11 @@
debug other => _10; // in scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _29: &usize; // in scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _30: &usize; // in scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _31: &usize; // in scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _32: &usize; // in scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
scope 3 (inlined cmp::impls::<impl PartialOrd for usize>::le) { // at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug self => _29; // in scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug other => _30; // in scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug self => _31; // in scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug other => _32; // in scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug self => _31; // in scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug other => _32; // in scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug self => _29; // in scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug other => _30; // in scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _33: usize; // in scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _34: usize; // in scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
}
@ -55,13 +61,11 @@
debug other => _19; // in scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _35: &usize; // in scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _36: &usize; // in scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _37: &usize; // in scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _38: &usize; // in scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
scope 5 (inlined cmp::impls::<impl PartialOrd for usize>::le) { // at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug self => _35; // in scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug other => _36; // in scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug self => _37; // in scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug other => _38; // in scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug self => _37; // in scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug other => _38; // in scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug self => _35; // in scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug other => _36; // in scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _39: usize; // in scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _40: usize; // in scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
}
@ -71,13 +75,11 @@
debug other => _14; // in scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _41: &usize; // in scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _42: &usize; // in scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _43: &usize; // in scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _44: &usize; // in scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
scope 7 (inlined cmp::impls::<impl PartialOrd for usize>::le) { // at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug self => _41; // in scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug other => _42; // in scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug self => _43; // in scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug other => _44; // in scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug self => _43; // in scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug other => _44; // in scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug self => _41; // in scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug other => _42; // in scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _45: usize; // in scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _46: usize; // in scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
}
@ -87,13 +89,11 @@
debug other => _23; // in scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _47: &usize; // in scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _48: &usize; // in scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _49: &usize; // in scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _50: &usize; // in scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
scope 9 (inlined cmp::impls::<impl PartialOrd for usize>::le) { // at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug self => _47; // in scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug other => _48; // in scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug self => _49; // in scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug other => _50; // in scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug self => _49; // in scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
- debug other => _50; // in scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug self => _47; // in scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ debug other => _48; // in scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _51: usize; // in scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
let mut _52: usize; // in scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
}
@ -121,23 +121,23 @@
StorageLive(_11); // scope 1 at $DIR/slice_filter.rs:+0:45: +0:46
_11 = _5; // scope 1 at $DIR/slice_filter.rs:+0:45: +0:46
_10 = &_11; // scope 1 at $DIR/slice_filter.rs:+0:45: +0:46
- StorageLive(_29); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
_31 = deref_copy (*_9); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _29 = _31; // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_30); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
_32 = deref_copy (*_10); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _30 = _32; // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
_29 = deref_copy (*_9); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
_30 = deref_copy (*_10); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_31); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _31 = _29; // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_32); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _32 = _30; // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageLive(_33); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _33 = (*_29); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _33 = (*_31); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _33 = (*_31); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _33 = (*_29); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageLive(_34); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _34 = (*_30); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _34 = (*_32); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _34 = (*_32); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _34 = (*_30); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
_8 = Le(move _33, move _34); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_34); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_33); // scope 3 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_30); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_29); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_32); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_31); // scope 2 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_11); // scope 1 at $DIR/slice_filter.rs:+0:45: +0:46
StorageDead(_10); // scope 1 at $DIR/slice_filter.rs:+0:45: +0:46
StorageDead(_9); // scope 1 at $DIR/slice_filter.rs:+0:45: +0:46
@ -158,23 +158,23 @@
StorageLive(_20); // scope 1 at $DIR/slice_filter.rs:+0:65: +0:66
_20 = _3; // scope 1 at $DIR/slice_filter.rs:+0:65: +0:66
_19 = &_20; // scope 1 at $DIR/slice_filter.rs:+0:65: +0:66
- StorageLive(_35); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
_37 = deref_copy (*_18); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _35 = _37; // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_36); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
_38 = deref_copy (*_19); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _36 = _38; // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
_35 = deref_copy (*_18); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
_36 = deref_copy (*_19); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_37); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _37 = _35; // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_38); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _38 = _36; // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageLive(_39); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _39 = (*_35); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _39 = (*_37); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _39 = (*_37); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _39 = (*_35); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageLive(_40); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _40 = (*_36); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _40 = (*_38); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _40 = (*_38); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _40 = (*_36); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
_17 = Le(move _39, move _40); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_40); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_39); // scope 5 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_36); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_35); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_38); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_37); // scope 4 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_20); // scope 1 at $DIR/slice_filter.rs:+0:65: +0:66
StorageDead(_19); // scope 1 at $DIR/slice_filter.rs:+0:65: +0:66
StorageDead(_18); // scope 1 at $DIR/slice_filter.rs:+0:65: +0:66
@ -206,23 +206,23 @@
StorageLive(_15); // scope 1 at $DIR/slice_filter.rs:+0:55: +0:56
_15 = _4; // scope 1 at $DIR/slice_filter.rs:+0:55: +0:56
_14 = &_15; // scope 1 at $DIR/slice_filter.rs:+0:55: +0:56
- StorageLive(_41); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
_43 = deref_copy (*_13); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _41 = _43; // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_42); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
_44 = deref_copy (*_14); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _42 = _44; // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
_41 = deref_copy (*_13); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
_42 = deref_copy (*_14); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_43); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _43 = _41; // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_44); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _44 = _42; // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageLive(_45); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _45 = (*_41); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _45 = (*_43); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _45 = (*_43); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _45 = (*_41); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageLive(_46); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _46 = (*_42); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _46 = (*_44); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _46 = (*_44); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _46 = (*_42); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
_12 = Le(move _45, move _46); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_46); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_45); // scope 7 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_42); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_41); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_44); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_43); // scope 6 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_15); // scope 1 at $DIR/slice_filter.rs:+0:55: +0:56
StorageDead(_14); // scope 1 at $DIR/slice_filter.rs:+0:55: +0:56
StorageDead(_13); // scope 1 at $DIR/slice_filter.rs:+0:55: +0:56
@ -245,23 +245,23 @@
StorageLive(_24); // scope 1 at $DIR/slice_filter.rs:+0:75: +0:76
_24 = _6; // scope 1 at $DIR/slice_filter.rs:+0:75: +0:76
_23 = &_24; // scope 1 at $DIR/slice_filter.rs:+0:75: +0:76
- StorageLive(_47); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
_49 = deref_copy (*_22); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _47 = _49; // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_48); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
_50 = deref_copy (*_23); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _48 = _50; // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
_47 = deref_copy (*_22); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
_48 = deref_copy (*_23); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_49); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _49 = _47; // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageLive(_50); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _50 = _48; // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageLive(_51); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _51 = (*_47); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _51 = (*_49); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _51 = (*_49); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _51 = (*_47); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageLive(_52); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _52 = (*_48); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _52 = (*_50); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
- _52 = (*_50); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
+ _52 = (*_48); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
_21 = Le(move _51, move _52); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_52); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_51); // scope 9 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_48); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_47); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_50); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
- StorageDead(_49); // scope 8 at $SRC_DIR/core/src/cmp.rs:LL:COL
StorageDead(_24); // scope 1 at $DIR/slice_filter.rs:+0:75: +0:76
StorageDead(_23); // scope 1 at $DIR/slice_filter.rs:+0:75: +0:76
StorageDead(_22); // scope 1 at $DIR/slice_filter.rs:+0:75: +0:76