Rollup merge of #110930 - b-naber:normalize-elaborate-drops, r=cjgillot

Don't expect normalization to succeed in elaborate_drops

Fixes https://github.com/rust-lang/rust/issues/110682

This was exposed through the changes in https://github.com/rust-lang/rust/pull/109247, which causes more things to be inlined. Inlining can happen before monomorphization, so we can't expect normalization to succeed. In the elaborate_drops analysis we currently have [this call](033aa092ab/compiler/rustc_mir_dataflow/src/elaborate_drops.rs (L278)) to `normalize_erasing_regions`, which ICEs when normalization fails. The types are used to infer [whether the type needs a drop](033aa092ab/compiler/rustc_mir_dataflow/src/elaborate_drops.rs (L374)), where `needs_drop` itself [uses `try_normalize_erasing_regions`](033aa092ab/compiler/rustc_middle/src/ty/util.rs (L1121)).

~[`instance_mir`](https://doc.rust-lang.org/stable/nightly-rustc/rustc_middle/ty/context/struct.TyCtxt.html#method.instance_mir) isn't explicit about whether it expects the instances corresponding to the `InstanceDef`s to be monomorphized (though I think in all other contexts the function is used post-monomorphization), so the use of `instance_mir` in inlining doesn't necessarily seem wrong to me.~
This commit is contained in:
Dylan DPC 2023-05-17 19:11:53 +05:30 committed by GitHub
commit 828caa80a9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 119 additions and 1 deletions

View File

@ -276,6 +276,7 @@ where
assert_eq!(self.elaborator.param_env().reveal(), Reveal::All);
let field_ty =
tcx.normalize_erasing_regions(self.elaborator.param_env(), f.ty(tcx, substs));
(tcx.mk_place_field(base_place, field, field_ty), subpath)
})
.collect()

View File

@ -7,6 +7,7 @@ use rustc_index::Idx;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::mir::visit::*;
use rustc_middle::mir::*;
use rustc_middle::ty::TypeVisitableExt;
use rustc_middle::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
use rustc_session::config::OptLevel;
use rustc_span::{hygiene::ExpnKind, ExpnData, LocalExpnId, Span};
@ -168,7 +169,7 @@ impl<'tcx> Inliner<'tcx> {
let callee_attrs = self.tcx.codegen_fn_attrs(callsite.callee.def_id());
self.check_codegen_attributes(callsite, callee_attrs)?;
self.check_mir_is_available(caller_body, &callsite.callee)?;
let callee_body = self.tcx.instance_mir(callsite.callee.def);
let callee_body = try_instance_mir(self.tcx, callsite.callee.def)?;
self.check_mir_body(callsite, callee_body, callee_attrs)?;
if !self.tcx.consider_optimizing(|| {
@ -1128,3 +1129,27 @@ impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
}
}
}
#[instrument(skip(tcx), level = "debug")]
fn try_instance_mir<'tcx>(
tcx: TyCtxt<'tcx>,
instance: InstanceDef<'tcx>,
) -> Result<&'tcx Body<'tcx>, &'static str> {
match instance {
ty::InstanceDef::DropGlue(_, Some(ty)) => match ty.kind() {
ty::Adt(def, substs) => {
let fields = def.all_fields();
for field in fields {
let field_ty = field.ty(tcx, substs);
if field_ty.has_param() && field_ty.has_projections() {
return Err("cannot build drop shim for polymorphic type");
}
}
Ok(tcx.instance_mir(instance))
}
_ => Ok(tcx.instance_mir(instance)),
},
_ => Ok(tcx.instance_mir(instance)),
}
}

View File

@ -0,0 +1,92 @@
// build-pass
// compile-flags: -Zmir-opt-level=3
use std::fmt::Debug;
use std::mem::ManuallyDrop;
use std::ptr;
pub trait BitRegister {}
macro_rules! register {
($($t:ty),+ $(,)?) => { $(
impl BitRegister for $t {
}
)* };
}
register!(u8, u16, u32);
pub trait BitStore: Sized + Debug {
/// The register type that the implementor describes.
type Mem: BitRegister + Into<Self>;
}
macro_rules! store {
($($t:ty),+ $(,)?) => { $(
impl BitStore for $t {
type Mem = Self;
}
)+ };
}
store!(u8, u16, u32,);
#[repr(C)]
pub struct BitVec<T>
where
T: BitStore,
{
/// Region pointer describing the live portion of the owned buffer.
pointer: ptr::NonNull<T>,
/// Allocated capacity, in elements `T`, of the owned buffer.
capacity: usize,
}
impl<T> BitVec<T>
where
T: BitStore,
{
pub fn new() -> Self {
let pointer = ptr::NonNull::<T>::new(ptr::null_mut()).unwrap();
BitVec { pointer, capacity: 10 }
}
pub fn clear(&mut self) {
unsafe {
self.set_len(0);
}
}
#[inline]
pub unsafe fn set_len(&mut self, new_len: usize) {}
fn with_vec<F, R>(&mut self, func: F) -> R
where
F: FnOnce(&mut ManuallyDrop<Vec<T::Mem>>) -> R,
{
let cap = self.capacity;
let elts = 10;
let mut vec = ManuallyDrop::new(unsafe { Vec::from_raw_parts(ptr::null_mut(), elts, cap) });
let out = func(&mut vec);
out
}
}
impl<T> Drop for BitVec<T>
where
T: BitStore,
{
#[inline]
fn drop(&mut self) {
// The buffer elements do not have destructors.
self.clear();
// Run the `Vec` destructor to deällocate the buffer.
self.with_vec(|vec| unsafe { ManuallyDrop::drop(vec) });
}
}
fn main() {
let bitvec = BitVec::<u32>::new();
}