tighten sanity checks around Scalar and ScalarPair

This commit is contained in:
Ralf Jung 2022-04-19 14:12:21 -04:00
parent d53f1e8fbf
commit 719655658a
3 changed files with 45 additions and 45 deletions

View File

@ -284,8 +284,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Abi::Scalar(s) if force => Some(s.primitive()),
_ => None,
};
if let Some(_) = scalar_layout {
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
if let Some(s) = scalar_layout {
let size = s.size(self);
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, size))?;
return Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }));
}
let scalar_pair_layout = match mplace.layout.abi {
@ -302,7 +304,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a_size, b_size) = (a.size(self), b.size(self));
let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
let a_val = alloc.read_scalar(alloc_range(Size::ZERO, a_size))?;
let b_val = alloc.read_scalar(alloc_range(b_offset, b_size))?;
return Ok(Some(ImmTy {
@ -394,28 +396,41 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Err(value) => value,
};
let field_layout = op.layout.field(self, field);
let field_layout = base.layout.field(self, field);
if field_layout.is_zst() {
let immediate = Scalar::ZST.into();
return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
}
let offset = op.layout.fields.offset(field);
let immediate = match *base {
let offset = base.layout.fields.offset(field);
// This makes several assumptions about what layouts we will encounter; we match what
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
let field_val = match (*base, base.layout.abi) {
// the field covers the entire type
_ if offset.bytes() == 0 && field_layout.size == op.layout.size => *base,
// extract fields from types with `ScalarPair` ABI
Immediate::ScalarPair(a, b) => {
let val = if offset.bytes() == 0 { a } else { b };
Immediate::from(val)
_ if field_layout.size == base.layout.size => {
assert!(offset.bytes() == 0);
*base
}
Immediate::Scalar(val) => span_bug!(
// extract fields from types with `ScalarPair` ABI
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
Immediate::from(if offset.bytes() == 0 {
assert_eq!(field_layout.size, a.size(self));
a_val
} else {
assert_eq!(offset, a.size(self).align_to(b.align(self).abi));
assert_eq!(field_layout.size, b.size(self));
b_val
})
}
_ => span_bug!(
self.cur_span(),
"field access on non aggregate {:#?}, {:#?}",
val,
op.layout
"invalid field access on immediate {}, layout {:#?}",
base,
base.layout
),
};
Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })
Ok(OpTy { op: Operand::Immediate(field_val), layout: field_layout })
}
pub fn operand_index(

View File

@ -16,7 +16,7 @@ use rustc_target::abi::{HasDataLayout, Size, VariantIdx, Variants};
use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy,
Operand, Pointer, PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
Operand, Pointer, Provenance, Scalar, ScalarMaybeUninit,
};
#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
@ -700,24 +700,7 @@ where
src: Immediate<M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
if cfg!(debug_assertions) {
// This is a very common path, avoid some checks in release mode
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
match src {
Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(..))) => assert_eq!(
self.pointer_size(),
dest.layout.size,
"Size mismatch when writing pointer"
),
Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Int(int))) => {
assert_eq!(int.size(), dest.layout.size, "Size mismatch when writing bits")
}
Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size
Immediate::ScalarPair(_, _) => {
// FIXME: Can we check anything here?
}
}
}
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
// See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
@ -769,15 +752,15 @@ where
// cover all the bytes!
match value {
Immediate::Scalar(scalar) => {
match dest.layout.abi {
Abi::Scalar(_) => {} // fine
_ => span_bug!(
let Abi::Scalar(s) = dest.layout.abi else { span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid Scalar layout: {:#?}",
dest.layout
),
}
alloc.write_scalar(alloc_range(Size::ZERO, dest.layout.size), scalar)
)
};
let size = s.size(&tcx);
assert_eq!(dest.layout.size, size, "abi::Scalar size does not match layout size");
alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
}
Immediate::ScalarPair(a_val, b_val) => {
// We checked `ptr_align` above, so all fields will have the alignment they need.
@ -791,6 +774,7 @@ where
};
let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
let b_offset = a_size.align_to(b.align(&tcx).abi);
assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
// It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
// but that does not work: We could be a newtype around a pair, then the

View File

@ -645,17 +645,18 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// i.e. that we go over the `check_init` below.
let size = scalar_layout.size(self.ecx);
let is_full_range = match scalar_layout {
ScalarAbi::Initialized { valid_range, .. } => {
ScalarAbi::Initialized { .. } => {
if M::enforce_number_validity(self.ecx) {
false // not "full" since uninit is not accepted
} else {
valid_range.is_full_for(size)
scalar_layout.is_always_valid(self.ecx)
}
}
ScalarAbi::Union { .. } => true,
};
if is_full_range {
// Nothing to check
// Nothing to check. Cruciall we don't even `read_scalar` until here, since that would
// fail for `Union` scalars!
return Ok(());
}
// We have something to check: it must at least be initialized.
@ -688,7 +689,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
} else {
return Ok(());
}
} else if scalar_layout.valid_range(self.ecx).is_full_for(size) {
} else if scalar_layout.is_always_valid(self.ecx) {
// Easy. (This is reachable if `enforce_number_validity` is set.)
return Ok(());
} else {