Rollup merge of #104357 - RalfJung:is-sized, r=cjgillot

add is_sized method on Abi and Layout, and use it

This avoids the double negation of `!is_unsized()` that we have quite a lot.
This commit is contained in:
Matthias Krüger 2022-11-13 17:37:38 +01:00 committed by GitHub
commit 8076b5903a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 39 additions and 29 deletions

View File

@ -128,7 +128,7 @@ pub(crate) fn codegen_const_value<'tcx>(
ty: Ty<'tcx>,
) -> CValue<'tcx> {
let layout = fx.layout_of(ty);
assert!(!layout.is_unsized(), "sized const value");
assert!(layout.is_sized(), "unsized const value");
if layout.is_zst() {
return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);

View File

@ -19,7 +19,7 @@ fn codegen_field<'tcx>(
};
if let Some(extra) = extra {
if !field_layout.is_unsized() {
if field_layout.is_sized() {
return simple(fx);
}
match field_layout.ty.kind() {
@ -364,7 +364,7 @@ impl<'tcx> CPlace<'tcx> {
fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
assert!(!layout.is_unsized());
assert!(layout.is_sized());
if layout.size.bytes() == 0 {
return CPlace {
inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
@ -825,7 +825,7 @@ impl<'tcx> CPlace<'tcx> {
fx: &FunctionCx<'_, '_, 'tcx>,
variant: VariantIdx,
) -> Self {
assert!(!self.layout().is_unsized());
assert!(self.layout().is_sized());
let layout = self.layout().for_variant(fx, variant);
CPlace { inner: self.inner, layout }
}

View File

@ -277,7 +277,7 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
offset = target_offset + field.size;
prev_effective_align = effective_field_align;
}
if !layout.is_unsized() && field_count > 0 {
if layout.is_sized() && field_count > 0 {
if offset > layout.size {
bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
}

View File

@ -72,7 +72,7 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
layout.is_unsized()
);
if !layout.is_unsized() {
if layout.is_sized() {
return None;
}

View File

@ -140,7 +140,7 @@ fn struct_llfields<'a, 'tcx>(
prev_effective_align = effective_field_align;
}
let padding_used = result.len() > field_count;
if !layout.is_unsized() && field_count > 0 {
if layout.is_sized() && field_count > 0 {
if offset > layout.size {
bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
}

View File

@ -15,7 +15,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
) -> (Bx::Value, Bx::Value) {
let layout = bx.layout_of(t);
debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}", t, info, layout);
if !layout.is_unsized() {
if layout.is_sized() {
let size = bx.const_usize(layout.size.bytes());
let align = bx.const_usize(layout.align.abi.bytes());
return (size, align);

View File

@ -29,7 +29,7 @@ pub struct PlaceRef<'tcx, V> {
impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
assert!(layout.is_sized());
PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
}
@ -38,7 +38,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
layout: TyAndLayout<'tcx>,
align: Align,
) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
assert!(layout.is_sized());
PlaceRef { llval, llextra: None, layout, align }
}
@ -48,7 +48,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx,
layout: TyAndLayout<'tcx>,
) -> Self {
assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
assert!(layout.is_sized(), "tried to statically allocate unsized place");
let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
Self::new_sized(tmp, layout)
}
@ -145,7 +145,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
);
return simple();
}
_ if !field.is_unsized() => return simple(),
_ if field.is_sized() => return simple(),
ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
ty::Adt(def, _) => {
if def.repr().packed() {

View File

@ -46,7 +46,7 @@ fn eval_body_using_ecx<'mir, 'tcx>(
ecx.tcx.def_kind(cid.instance.def_id())
);
let layout = ecx.layout_of(body.bound_return_ty().subst(tcx, cid.instance.substs))?;
assert!(!layout.is_unsized());
assert!(layout.is_sized());
let ret = ecx.allocate(layout, MemoryKind::Stack)?;
trace!(

View File

@ -572,7 +572,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
metadata: &MemPlaceMeta<M::Provenance>,
layout: &TyAndLayout<'tcx>,
) -> InterpResult<'tcx, Option<(Size, Align)>> {
if !layout.is_unsized() {
if layout.is_sized() {
return Ok(Some((layout.size, layout.align.abi)));
}
match layout.ty.kind() {

View File

@ -713,7 +713,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
assert!(!layout.is_unsized());
assert!(layout.is_sized());
let get_bytes = |this: &InterpCx<'mir, 'tcx, M>,
op: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,

View File

@ -683,7 +683,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Use size and align of the type.
let ty = self.tcx.type_of(def_id);
let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
assert!(!layout.is_unsized());
assert!(layout.is_sized());
(layout.size, layout.align.abi, AllocKind::LiveData)
}
Some(GlobalAlloc::Memory(alloc)) => {

View File

@ -280,7 +280,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
assert!(!layout.is_unsized());
assert!(layout.is_sized());
self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
}
}

View File

@ -201,7 +201,7 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
assert!(!layout.is_unsized());
assert!(layout.is_sized());
self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
}
@ -340,7 +340,7 @@ where
&self,
place: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
assert!(!place.layout.is_unsized());
assert!(place.layout.is_sized());
assert!(!place.meta.has_meta());
let size = place.layout.size;
self.get_ptr_alloc(place.ptr, size, place.align)
@ -351,7 +351,7 @@ where
&mut self,
place: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
assert!(!place.layout.is_unsized());
assert!(place.layout.is_sized());
assert!(!place.meta.has_meta());
let size = place.layout.size;
self.get_ptr_alloc_mut(place.ptr, size, place.align)
@ -485,7 +485,7 @@ where
src: Immediate<M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
assert!(dest.layout.is_sized(), "Cannot write unsized data");
trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
// See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
@ -746,7 +746,7 @@ where
layout: TyAndLayout<'tcx>,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
assert!(!layout.is_unsized());
assert!(layout.is_sized());
let ptr = self.allocate_ptr(layout.size, layout.align.abi, kind)?;
Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
}

View File

@ -209,7 +209,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Repeat(ref operand, _) => {
let src = self.eval_operand(operand, None)?;
assert!(!src.layout.is_unsized());
assert!(src.layout.is_sized());
let dest = self.force_allocation(&dest)?;
let length = dest.len(self)?;

View File

@ -53,7 +53,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx, (Size, Align)> {
let (ty, _trait_ref) = self.get_ptr_vtable(vtable)?;
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(), "there are no vtables for unsized types");
assert!(layout.is_sized(), "there are no vtables for unsized types");
Ok((layout.size, layout.align.abi))
}
}

View File

@ -66,7 +66,7 @@ pub(super) fn vtable_allocation_provider<'tcx>(
let layout = tcx
.layout_of(ty::ParamEnv::reveal_all().and(ty))
.expect("failed to build vtable representation");
assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
assert!(layout.is_sized(), "can't create a vtable for an unsized type");
let size = layout.size.bytes();
let align = layout.align.abi.bytes();

View File

@ -385,7 +385,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// I don't know how return types can seem to be unsized but this happens in the
// `type/type-unsatisfiable.rs` test.
.filter(|ret_layout| {
!ret_layout.is_unsized() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT)
ret_layout.is_sized() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT)
})
.unwrap_or_else(|| ecx.layout_of(tcx.types.unit).unwrap());

View File

@ -199,7 +199,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// I don't know how return types can seem to be unsized but this happens in the
// `type/type-unsatisfiable.rs` test.
.filter(|ret_layout| {
!ret_layout.is_unsized() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT)
ret_layout.is_sized() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT)
})
.unwrap_or_else(|| ecx.layout_of(tcx.types.unit).unwrap());

View File

@ -1083,6 +1083,11 @@ impl Abi {
}
}
#[inline]
pub fn is_sized(&self) -> bool {
!self.is_unsized()
}
/// Returns `true` if this is a single signed integer scalar
#[inline]
pub fn is_signed(&self) -> bool {
@ -1490,6 +1495,11 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
self.abi.is_unsized()
}
#[inline]
pub fn is_sized(&self) -> bool {
self.abi.is_sized()
}
/// Returns `true` if the type is a ZST and not unsized.
pub fn is_zst(&self) -> bool {
match self.abi {

View File

@ -668,7 +668,7 @@ fn layout_of_uncached<'tcx>(
let mut abi = Abi::Aggregate { sized: true };
let index = VariantIdx::new(0);
for field in &variants[index] {
assert!(!field.is_unsized());
assert!(field.is_sized());
align = align.max(field.align);
// If all non-ZST fields have the same ABI, forward this ABI

View File

@ -1053,7 +1053,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// pointers we need to retag, so we can stop recursion early.
// This optimization is crucial for ZSTs, because they can contain way more fields
// than we can ever visit.
if !place.layout.is_unsized() && place.layout.size < self.ecx.pointer_size() {
if place.layout.is_sized() && place.layout.size < self.ecx.pointer_size() {
return Ok(());
}