More GVN for PtrMetadata

`PtrMetadata` doesn't care about `*const`/`*mut`/`&`/`&mut`, so GVN away those casts in its argument.

This includes updating MIR to allow calling PtrMetadata on references too, not just raw pointers.  That means that `[T]::len` can be just `_0 = PtrMetadata(_1)`, for example.

# Conflicts:
#	tests/mir-opt/pre-codegen/slice_index.slice_get_unchecked_mut_range.PreCodegen.after.panic-abort.mir
#	tests/mir-opt/pre-codegen/slice_index.slice_get_unchecked_mut_range.PreCodegen.after.panic-unwind.mir
This commit is contained in:
Scott McMurray 2024-06-19 22:14:31 -07:00
parent 31d8696ac9
commit 4a7b6c0e6c
14 changed files with 319 additions and 78 deletions

View File

@ -639,7 +639,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
(OperandValue::Immediate(llval), operand.layout) (OperandValue::Immediate(llval), operand.layout)
} }
mir::UnOp::PtrMetadata => { mir::UnOp::PtrMetadata => {
debug_assert!(operand.layout.ty.is_unsafe_ptr()); debug_assert!(
operand.layout.ty.is_unsafe_ptr() || operand.layout.ty.is_ref(),
);
let (_, meta) = operand.val.pointer_parts(); let (_, meta) = operand.val.pointer_parts();
assert_eq!(operand.layout.fields.count() > 1, meta.is_some()); assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
if let Some(meta) = meta { if let Some(meta) = meta {

View File

@ -460,7 +460,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let res = ScalarInt::truncate_from_uint(res, layout.size).0; let res = ScalarInt::truncate_from_uint(res, layout.size).0;
Ok(ImmTy::from_scalar(res.into(), layout)) Ok(ImmTy::from_scalar(res.into(), layout))
} }
ty::RawPtr(..) => { ty::RawPtr(..) | ty::Ref(..) => {
assert_eq!(un_op, PtrMetadata); assert_eq!(un_op, PtrMetadata);
let (_, meta) = val.to_scalar_and_meta(); let (_, meta) = val.to_scalar_and_meta();
Ok(match meta { Ok(match meta {

View File

@ -1446,10 +1446,12 @@ pub enum UnOp {
Not, Not,
/// The `-` operator for negation /// The `-` operator for negation
Neg, Neg,
/// Get the metadata `M` from a `*const/mut impl Pointee<Metadata = M>`. /// Gets the metadata `M` from a `*const`/`*mut`/`&`/`&mut` to
/// `impl Pointee<Metadata = M>`.
/// ///
/// For example, this will give a `()` from `*const i32`, a `usize` from /// For example, this will give a `()` from `*const i32`, a `usize` from
/// `*mut [u8]`, or a pointer to a vtable from a `*const dyn Foo`. /// `&mut [u8]`, or a `ptr::DynMetadata<dyn Foo>` (internally a pointer)
/// from a `*mut dyn Foo`.
/// ///
/// Allowed only in [`MirPhase::Runtime`]; earlier it's an intrinsic. /// Allowed only in [`MirPhase::Runtime`]; earlier it's an intrinsic.
PtrMetadata, PtrMetadata,

View File

@ -836,12 +836,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
} }
Value::BinaryOp(op, lhs, rhs) Value::BinaryOp(op, lhs, rhs)
} }
Rvalue::UnaryOp(op, ref mut arg) => { Rvalue::UnaryOp(op, ref mut arg_op) => {
let arg = self.simplify_operand(arg, location)?; return self.simplify_unary(op, arg_op, location);
if let Some(value) = self.simplify_unary(op, arg) {
return Some(value);
}
Value::UnaryOp(op, arg)
} }
Rvalue::Discriminant(ref mut place) => { Rvalue::Discriminant(ref mut place) => {
let place = self.simplify_place_value(place, location)?; let place = self.simplify_place_value(place, location)?;
@ -971,8 +967,71 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
} }
#[instrument(level = "trace", skip(self), ret)] #[instrument(level = "trace", skip(self), ret)]
fn simplify_unary(&mut self, op: UnOp, value: VnIndex) -> Option<VnIndex> { fn simplify_unary(
let value = match (op, self.get(value)) { &mut self,
op: UnOp,
arg_op: &mut Operand<'tcx>,
location: Location,
) -> Option<VnIndex> {
let mut arg_index = self.simplify_operand(arg_op, location)?;
// PtrMetadata doesn't care about *const vs *mut vs & vs &mut,
// so start by removing those distinctions so we can update the `Operand`
if op == UnOp::PtrMetadata {
let mut was_updated = false;
loop {
match self.get(arg_index) {
// Pointer casts that preserve metadata, such as
// `*const [i32]` <-> `*mut [i32]` <-> `*mut [f32]`.
// It's critical that this not eliminate cases like
// `*const [T]` -> `*const T` which remove metadata.
// We run on potentially-generic MIR, though, so unlike codegen
// we can't always know exactly what the metadata are.
// Thankfully, equality on `ptr_metadata_ty_or_tail` gives us
// what we need: `Ok(meta_ty)` if the metadata is known, or
// `Err(tail_ty)` if not. Matching metadata is ok, but if
// that's not known, then matching tail types is also ok,
// allowing things like `*mut (?A, ?T)` <-> `*mut (?B, ?T)`.
// FIXME: Would it be worth trying to normalize, rather than
// passing the identity closure? Or are the types in the
// Cast realistically about as normalized as we can get anyway?
Value::Cast { kind: CastKind::PtrToPtr, value: inner, from, to }
if from
.builtin_deref(true)
.unwrap()
.ptr_metadata_ty_or_tail(self.tcx, |t| t)
== to
.builtin_deref(true)
.unwrap()
.ptr_metadata_ty_or_tail(self.tcx, |t| t) =>
{
arg_index = *inner;
was_updated = true;
continue;
}
// `&mut *p`, `&raw *p`, etc don't change metadata.
Value::Address { place, kind: _, provenance: _ }
if let PlaceRef { local, projection: [PlaceElem::Deref] } =
place.as_ref()
&& let Some(local_index) = self.locals[local] =>
{
arg_index = local_index;
was_updated = true;
continue;
}
_ => {
if was_updated && let Some(op) = self.try_as_operand(arg_index, location) {
*arg_op = op;
}
break;
}
}
}
}
let value = match (op, self.get(arg_index)) {
(UnOp::Not, Value::UnaryOp(UnOp::Not, inner)) => return Some(*inner), (UnOp::Not, Value::UnaryOp(UnOp::Not, inner)) => return Some(*inner),
(UnOp::Neg, Value::UnaryOp(UnOp::Neg, inner)) => return Some(*inner), (UnOp::Neg, Value::UnaryOp(UnOp::Neg, inner)) => return Some(*inner),
(UnOp::Not, Value::BinaryOp(BinOp::Eq, lhs, rhs)) => { (UnOp::Not, Value::BinaryOp(BinOp::Eq, lhs, rhs)) => {
@ -984,9 +1043,26 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
(UnOp::PtrMetadata, Value::Aggregate(AggregateTy::RawPtr { .. }, _, fields)) => { (UnOp::PtrMetadata, Value::Aggregate(AggregateTy::RawPtr { .. }, _, fields)) => {
return Some(fields[1]); return Some(fields[1]);
} }
_ => return None, // We have an unsizing cast, which assigns the length to fat pointer metadata.
(
UnOp::PtrMetadata,
Value::Cast {
kind: CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize),
from,
to,
..
},
) if let ty::Slice(..) = to.builtin_deref(true).unwrap().kind()
&& let ty::Array(_, len) = from.builtin_deref(true).unwrap().kind() =>
{
return self.insert_constant(Const::from_ty_const(
*len,
self.tcx.types.usize,
self.tcx,
));
}
_ => Value::UnaryOp(op, arg_index),
}; };
Some(self.insert(value)) Some(self.insert(value))
} }

View File

@ -1116,12 +1116,17 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
UnOp::PtrMetadata => { UnOp::PtrMetadata => {
if !matches!(self.mir_phase, MirPhase::Runtime(_)) { if !matches!(self.mir_phase, MirPhase::Runtime(_)) {
// It would probably be fine to support this in earlier phases, // It would probably be fine to support this in earlier phases,
// but at the time of writing it's only ever introduced from intrinsic lowering, // but at the time of writing it's only ever introduced from intrinsic lowering
// or other runtime-phase optimization passes,
// so earlier things can just `bug!` on it. // so earlier things can just `bug!` on it.
self.fail(location, "PtrMetadata should be in runtime MIR only"); self.fail(location, "PtrMetadata should be in runtime MIR only");
} }
check_kinds!(a, "Cannot PtrMetadata non-pointer type {:?}", ty::RawPtr(..)); check_kinds!(
a,
"Cannot PtrMetadata non-pointer non-reference type {:?}",
ty::RawPtr(..) | ty::Ref(..)
);
} }
} }
} }

View File

@ -0,0 +1,31 @@
- // MIR for `array_len` before GVN
+ // MIR for `array_len` after GVN
fn array_len(_1: &mut [i32; 42]) -> usize {
debug x => _1;
let mut _0: usize;
let _2: &[i32];
let mut _3: &[i32; 42];
let mut _4: *const [i32];
scope 1 {
debug x => _2;
}
bb0: {
- StorageLive(_2);
+ nop;
StorageLive(_3);
_3 = &(*_1);
_2 = move _3 as &[i32] (PointerCoercion(Unsize));
StorageDead(_3);
StorageLive(_4);
_4 = &raw const (*_2);
- _0 = PtrMetadata(move _4);
+ _0 = const 42_usize;
StorageDead(_4);
- StorageDead(_2);
+ nop;
return;
}
}

View File

@ -0,0 +1,31 @@
- // MIR for `array_len` before GVN
+ // MIR for `array_len` after GVN
fn array_len(_1: &mut [i32; 42]) -> usize {
debug x => _1;
let mut _0: usize;
let _2: &[i32];
let mut _3: &[i32; 42];
let mut _4: *const [i32];
scope 1 {
debug x => _2;
}
bb0: {
- StorageLive(_2);
+ nop;
StorageLive(_3);
_3 = &(*_1);
_2 = move _3 as &[i32] (PointerCoercion(Unsize));
StorageDead(_3);
StorageLive(_4);
_4 = &raw const (*_2);
- _0 = PtrMetadata(move _4);
+ _0 = const 42_usize;
StorageDead(_4);
- StorageDead(_2);
+ nop;
return;
}
}

View File

@ -8,10 +8,10 @@
let mut _3: fn(u8) -> u8; let mut _3: fn(u8) -> u8;
let _5: (); let _5: ();
let mut _6: fn(u8) -> u8; let mut _6: fn(u8) -> u8;
let mut _9: {closure@$DIR/gvn.rs:612:19: 612:21}; let mut _9: {closure@$DIR/gvn.rs:614:19: 614:21};
let _10: (); let _10: ();
let mut _11: fn(); let mut _11: fn();
let mut _13: {closure@$DIR/gvn.rs:612:19: 612:21}; let mut _13: {closure@$DIR/gvn.rs:614:19: 614:21};
let _14: (); let _14: ();
let mut _15: fn(); let mut _15: fn();
scope 1 { scope 1 {
@ -19,7 +19,7 @@
let _4: fn(u8) -> u8; let _4: fn(u8) -> u8;
scope 2 { scope 2 {
debug g => _4; debug g => _4;
let _7: {closure@$DIR/gvn.rs:612:19: 612:21}; let _7: {closure@$DIR/gvn.rs:614:19: 614:21};
scope 3 { scope 3 {
debug closure => _7; debug closure => _7;
let _8: fn(); let _8: fn();
@ -62,16 +62,16 @@
StorageDead(_6); StorageDead(_6);
StorageDead(_5); StorageDead(_5);
- StorageLive(_7); - StorageLive(_7);
- _7 = {closure@$DIR/gvn.rs:612:19: 612:21}; - _7 = {closure@$DIR/gvn.rs:614:19: 614:21};
- StorageLive(_8); - StorageLive(_8);
+ nop; + nop;
+ _7 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21}; + _7 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ nop; + nop;
StorageLive(_9); StorageLive(_9);
- _9 = _7; - _9 = _7;
- _8 = move _9 as fn() (PointerCoercion(ClosureFnPointer(Safe))); - _8 = move _9 as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _9 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21}; + _9 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ _8 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21} as fn() (PointerCoercion(ClosureFnPointer(Safe))); + _8 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
StorageDead(_9); StorageDead(_9);
StorageLive(_10); StorageLive(_10);
StorageLive(_11); StorageLive(_11);
@ -88,8 +88,8 @@
StorageLive(_13); StorageLive(_13);
- _13 = _7; - _13 = _7;
- _12 = move _13 as fn() (PointerCoercion(ClosureFnPointer(Safe))); - _12 = move _13 as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _13 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21}; + _13 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ _12 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21} as fn() (PointerCoercion(ClosureFnPointer(Safe))); + _12 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
StorageDead(_13); StorageDead(_13);
StorageLive(_14); StorageLive(_14);
StorageLive(_15); StorageLive(_15);

View File

@ -8,10 +8,10 @@
let mut _3: fn(u8) -> u8; let mut _3: fn(u8) -> u8;
let _5: (); let _5: ();
let mut _6: fn(u8) -> u8; let mut _6: fn(u8) -> u8;
let mut _9: {closure@$DIR/gvn.rs:612:19: 612:21}; let mut _9: {closure@$DIR/gvn.rs:614:19: 614:21};
let _10: (); let _10: ();
let mut _11: fn(); let mut _11: fn();
let mut _13: {closure@$DIR/gvn.rs:612:19: 612:21}; let mut _13: {closure@$DIR/gvn.rs:614:19: 614:21};
let _14: (); let _14: ();
let mut _15: fn(); let mut _15: fn();
scope 1 { scope 1 {
@ -19,7 +19,7 @@
let _4: fn(u8) -> u8; let _4: fn(u8) -> u8;
scope 2 { scope 2 {
debug g => _4; debug g => _4;
let _7: {closure@$DIR/gvn.rs:612:19: 612:21}; let _7: {closure@$DIR/gvn.rs:614:19: 614:21};
scope 3 { scope 3 {
debug closure => _7; debug closure => _7;
let _8: fn(); let _8: fn();
@ -62,16 +62,16 @@
StorageDead(_6); StorageDead(_6);
StorageDead(_5); StorageDead(_5);
- StorageLive(_7); - StorageLive(_7);
- _7 = {closure@$DIR/gvn.rs:612:19: 612:21}; - _7 = {closure@$DIR/gvn.rs:614:19: 614:21};
- StorageLive(_8); - StorageLive(_8);
+ nop; + nop;
+ _7 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21}; + _7 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ nop; + nop;
StorageLive(_9); StorageLive(_9);
- _9 = _7; - _9 = _7;
- _8 = move _9 as fn() (PointerCoercion(ClosureFnPointer(Safe))); - _8 = move _9 as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _9 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21}; + _9 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ _8 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21} as fn() (PointerCoercion(ClosureFnPointer(Safe))); + _8 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
StorageDead(_9); StorageDead(_9);
StorageLive(_10); StorageLive(_10);
StorageLive(_11); StorageLive(_11);
@ -88,8 +88,8 @@
StorageLive(_13); StorageLive(_13);
- _13 = _7; - _13 = _7;
- _12 = move _13 as fn() (PointerCoercion(ClosureFnPointer(Safe))); - _12 = move _13 as fn() (PointerCoercion(ClosureFnPointer(Safe)));
+ _13 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21}; + _13 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21};
+ _12 = const ZeroSized: {closure@$DIR/gvn.rs:612:19: 612:21} as fn() (PointerCoercion(ClosureFnPointer(Safe))); + _12 = const ZeroSized: {closure@$DIR/gvn.rs:614:19: 614:21} as fn() (PointerCoercion(ClosureFnPointer(Safe)));
StorageDead(_13); StorageDead(_13);
StorageLive(_14); StorageLive(_14);
StorageLive(_15); StorageLive(_15);

View File

@ -0,0 +1,41 @@
- // MIR for `manual_slice_mut_len` before GVN
+ // MIR for `manual_slice_mut_len` after GVN
fn manual_slice_mut_len(_1: &mut [i32]) -> usize {
debug x => _1;
let mut _0: usize;
let _2: *mut [i32];
let mut _4: *mut [i32];
let mut _5: *const [i32];
scope 1 {
debug x => _2;
let _3: *const [i32];
scope 2 {
debug x => _3;
}
}
bb0: {
- StorageLive(_2);
+ nop;
_2 = &raw mut (*_1);
- StorageLive(_3);
+ nop;
StorageLive(_4);
_4 = _2;
- _3 = move _4 as *const [i32] (PtrToPtr);
+ _3 = _2 as *const [i32] (PtrToPtr);
StorageDead(_4);
StorageLive(_5);
_5 = _3;
- _0 = PtrMetadata(move _5);
+ _0 = PtrMetadata(_1);
StorageDead(_5);
- StorageDead(_3);
- StorageDead(_2);
+ nop;
+ nop;
return;
}
}

View File

@ -0,0 +1,41 @@
- // MIR for `manual_slice_mut_len` before GVN
+ // MIR for `manual_slice_mut_len` after GVN
fn manual_slice_mut_len(_1: &mut [i32]) -> usize {
debug x => _1;
let mut _0: usize;
let _2: *mut [i32];
let mut _4: *mut [i32];
let mut _5: *const [i32];
scope 1 {
debug x => _2;
let _3: *const [i32];
scope 2 {
debug x => _3;
}
}
bb0: {
- StorageLive(_2);
+ nop;
_2 = &raw mut (*_1);
- StorageLive(_3);
+ nop;
StorageLive(_4);
_4 = _2;
- _3 = move _4 as *const [i32] (PtrToPtr);
+ _3 = _2 as *const [i32] (PtrToPtr);
StorageDead(_4);
StorageLive(_5);
_5 = _3;
- _0 = PtrMetadata(move _5);
+ _0 = PtrMetadata(_1);
StorageDead(_5);
- StorageDead(_3);
- StorageDead(_2);
+ nop;
+ nop;
return;
}
}

View File

@ -7,7 +7,9 @@
#![feature(custom_mir)] #![feature(custom_mir)]
#![feature(core_intrinsics)] #![feature(core_intrinsics)]
#![feature(freeze)] #![feature(freeze)]
#![allow(ambiguous_wide_pointer_comparisons)]
#![allow(unconditional_panic)] #![allow(unconditional_panic)]
#![allow(unused)]
use std::intrinsics::mir::*; use std::intrinsics::mir::*;
use std::marker::Freeze; use std::marker::Freeze;
@ -816,6 +818,22 @@ fn casts_before_aggregate_raw_ptr(x: *const u32) -> *const [u8] {
std::intrinsics::aggregate_raw_ptr(x, 4) std::intrinsics::aggregate_raw_ptr(x, 4)
} }
fn manual_slice_mut_len(x: &mut [i32]) -> usize {
// CHECK-LABEL: fn manual_slice_mut_len
// CHECK: _0 = PtrMetadata(_1);
let x: *mut [i32] = x;
let x: *const [i32] = x;
std::intrinsics::ptr_metadata(x)
}
// `.len()` on arrays ends up being something like this
fn array_len(x: &mut [i32; 42]) -> usize {
// CHECK-LABEL: fn array_len
// CHECK: _0 = const 42_usize;
let x: &[i32] = x;
std::intrinsics::ptr_metadata(x)
}
fn main() { fn main() {
subexpression_elimination(2, 4, 5); subexpression_elimination(2, 4, 5);
wrap_unwrap(5); wrap_unwrap(5);
@ -880,3 +898,5 @@ fn identity<T>(x: T) -> T {
// EMIT_MIR gvn.meta_of_ref_to_slice.GVN.diff // EMIT_MIR gvn.meta_of_ref_to_slice.GVN.diff
// EMIT_MIR gvn.slice_from_raw_parts_as_ptr.GVN.diff // EMIT_MIR gvn.slice_from_raw_parts_as_ptr.GVN.diff
// EMIT_MIR gvn.casts_before_aggregate_raw_ptr.GVN.diff // EMIT_MIR gvn.casts_before_aggregate_raw_ptr.GVN.diff
// EMIT_MIR gvn.manual_slice_mut_len.GVN.diff
// EMIT_MIR gvn.array_len.GVN.diff

View File

@ -8,25 +8,24 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) ->
let mut _4: usize; let mut _4: usize;
scope 1 (inlined core::slice::<impl [u32]>::get_unchecked_mut::<std::ops::Range<usize>>) { scope 1 (inlined core::slice::<impl [u32]>::get_unchecked_mut::<std::ops::Range<usize>>) {
let mut _5: *mut [u32]; let mut _5: *mut [u32];
let mut _12: *mut [u32]; let mut _11: *mut [u32];
scope 2 (inlined <std::ops::Range<usize> as SliceIndex<[u32]>>::get_unchecked_mut) { scope 2 (inlined <std::ops::Range<usize> as SliceIndex<[u32]>>::get_unchecked_mut) {
let mut _7: usize; let mut _6: usize;
let _8: (); let _7: ();
let _9: usize; let _8: usize;
scope 3 { scope 3 {
scope 6 (inlined core::slice::index::get_offset_len_mut_noubcheck::<u32>) { scope 6 (inlined core::slice::index::get_offset_len_mut_noubcheck::<u32>) {
let _11: *mut u32; let _10: *mut u32;
scope 7 { scope 7 {
} }
scope 8 (inlined core::slice::index::get_mut_noubcheck::<u32>) { scope 8 (inlined core::slice::index::get_mut_noubcheck::<u32>) {
let _10: *mut u32; let _9: *mut u32;
scope 9 { scope 9 {
} }
} }
} }
} }
scope 4 (inlined std::ptr::mut_ptr::<impl *mut [u32]>::len) { scope 4 (inlined std::ptr::mut_ptr::<impl *mut [u32]>::len) {
let mut _6: *const [u32];
scope 5 (inlined std::ptr::metadata::<[u32]>) { scope 5 (inlined std::ptr::metadata::<[u32]>) {
} }
} }
@ -38,28 +37,25 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) ->
_4 = move (_2.1: usize); _4 = move (_2.1: usize);
StorageLive(_5); StorageLive(_5);
_5 = &raw mut (*_1); _5 = &raw mut (*_1);
StorageLive(_9); StorageLive(_8);
StorageLive(_7);
StorageLive(_6); StorageLive(_6);
_6 = _5 as *const [u32] (PtrToPtr); _6 = PtrMetadata(_1);
_7 = PtrMetadata(_6); _7 = <std::ops::Range<usize> as SliceIndex<[T]>>::get_unchecked_mut::precondition_check(_3, _4, move _6) -> [return: bb1, unwind unreachable];
StorageDead(_6);
_8 = <std::ops::Range<usize> as SliceIndex<[T]>>::get_unchecked_mut::precondition_check(_3, _4, move _7) -> [return: bb1, unwind unreachable];
} }
bb1: { bb1: {
StorageDead(_7); StorageDead(_6);
_9 = SubUnchecked(_4, _3); _8 = SubUnchecked(_4, _3);
StorageLive(_11);
StorageLive(_10); StorageLive(_10);
_10 = _5 as *mut u32 (PtrToPtr); StorageLive(_9);
_11 = Offset(_10, _3); _9 = _5 as *mut u32 (PtrToPtr);
StorageDead(_10); _10 = Offset(_9, _3);
_12 = *mut [u32] from (_11, _9);
StorageDead(_11);
StorageDead(_9); StorageDead(_9);
_11 = *mut [u32] from (_10, _8);
StorageDead(_10);
StorageDead(_8);
StorageDead(_5); StorageDead(_5);
_0 = &mut (*_12); _0 = &mut (*_11);
return; return;
} }
} }

View File

@ -8,25 +8,24 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) ->
let mut _4: usize; let mut _4: usize;
scope 1 (inlined core::slice::<impl [u32]>::get_unchecked_mut::<std::ops::Range<usize>>) { scope 1 (inlined core::slice::<impl [u32]>::get_unchecked_mut::<std::ops::Range<usize>>) {
let mut _5: *mut [u32]; let mut _5: *mut [u32];
let mut _12: *mut [u32]; let mut _11: *mut [u32];
scope 2 (inlined <std::ops::Range<usize> as SliceIndex<[u32]>>::get_unchecked_mut) { scope 2 (inlined <std::ops::Range<usize> as SliceIndex<[u32]>>::get_unchecked_mut) {
let mut _7: usize; let mut _6: usize;
let _8: (); let _7: ();
let _9: usize; let _8: usize;
scope 3 { scope 3 {
scope 6 (inlined core::slice::index::get_offset_len_mut_noubcheck::<u32>) { scope 6 (inlined core::slice::index::get_offset_len_mut_noubcheck::<u32>) {
let _11: *mut u32; let _10: *mut u32;
scope 7 { scope 7 {
} }
scope 8 (inlined core::slice::index::get_mut_noubcheck::<u32>) { scope 8 (inlined core::slice::index::get_mut_noubcheck::<u32>) {
let _10: *mut u32; let _9: *mut u32;
scope 9 { scope 9 {
} }
} }
} }
} }
scope 4 (inlined std::ptr::mut_ptr::<impl *mut [u32]>::len) { scope 4 (inlined std::ptr::mut_ptr::<impl *mut [u32]>::len) {
let mut _6: *const [u32];
scope 5 (inlined std::ptr::metadata::<[u32]>) { scope 5 (inlined std::ptr::metadata::<[u32]>) {
} }
} }
@ -38,28 +37,25 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) ->
_4 = move (_2.1: usize); _4 = move (_2.1: usize);
StorageLive(_5); StorageLive(_5);
_5 = &raw mut (*_1); _5 = &raw mut (*_1);
StorageLive(_9); StorageLive(_8);
StorageLive(_7);
StorageLive(_6); StorageLive(_6);
_6 = _5 as *const [u32] (PtrToPtr); _6 = PtrMetadata(_1);
_7 = PtrMetadata(_6); _7 = <std::ops::Range<usize> as SliceIndex<[T]>>::get_unchecked_mut::precondition_check(_3, _4, move _6) -> [return: bb1, unwind unreachable];
StorageDead(_6);
_8 = <std::ops::Range<usize> as SliceIndex<[T]>>::get_unchecked_mut::precondition_check(_3, _4, move _7) -> [return: bb1, unwind unreachable];
} }
bb1: { bb1: {
StorageDead(_7); StorageDead(_6);
_9 = SubUnchecked(_4, _3); _8 = SubUnchecked(_4, _3);
StorageLive(_11);
StorageLive(_10); StorageLive(_10);
_10 = _5 as *mut u32 (PtrToPtr); StorageLive(_9);
_11 = Offset(_10, _3); _9 = _5 as *mut u32 (PtrToPtr);
StorageDead(_10); _10 = Offset(_9, _3);
_12 = *mut [u32] from (_11, _9);
StorageDead(_11);
StorageDead(_9); StorageDead(_9);
_11 = *mut [u32] from (_10, _8);
StorageDead(_10);
StorageDead(_8);
StorageDead(_5); StorageDead(_5);
_0 = &mut (*_12); _0 = &mut (*_11);
return; return;
} }
} }