Mark scalar layout unions so that backends that do not support partially initialized scalars can special case them.

This commit is contained in:
Oli Scherer 2022-03-03 12:02:12 +00:00
parent 2ed6786404
commit d32ce37a17
37 changed files with 356 additions and 288 deletions

View File

@ -21,7 +21,7 @@ pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
}
pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
match scalar.value {
match scalar.primitive() {
Primitive::Int(int, _sign) => match int {
Integer::I8 => types::I8,
Integer::I16 => types::I16,

View File

@ -105,7 +105,7 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
// Decode the discriminant (specifically if it's niche-encoded).
match *tag_encoding {
TagEncoding::Direct => {
let signed = match tag_scalar.value {
let signed = match tag_scalar.primitive() {
Int(_, signed) => signed,
_ => false,
};

View File

@ -50,7 +50,7 @@ fn codegen_field<'tcx>(
}
fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
let b_offset = a_scalar.size(&tcx).align_to(b_scalar.align(&tcx).abi);
Offset32::new(b_offset.bytes().try_into().unwrap())
}

View File

@ -694,11 +694,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
}
fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
let vr = scalar.valid_range.clone();
match scalar.value {
let vr = scalar.valid_range(bx);
match scalar.primitive() {
abi::Int(..) => {
if !scalar.is_always_valid(bx) {
bx.range_metadata(load, scalar.valid_range);
bx.range_metadata(load, vr);
}
}
abi::Pointer if vr.start < vr.end && !vr.contains(0) => {
@ -720,7 +720,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
OperandValue::Immediate(self.to_immediate(load, place.layout))
}
else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
let b_offset = a.size(self).align_to(b.align(self).abi);
let pair_type = place.layout.gcc_type(self, false);
let mut load = |i, scalar: &abi::Scalar, align| {

View File

@ -158,14 +158,14 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
}
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
match cv {
Scalar::Int(ScalarInt::ZST) => {
assert_eq!(0, layout.value.size(self).bytes());
assert_eq!(0, layout.size(self).bytes());
self.const_undef(self.type_ix(0))
}
Scalar::Int(int) => {
let data = int.assert_bits(layout.value.size(self));
let data = int.assert_bits(layout.size(self));
// FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
// the paths for floating-point values.
@ -209,7 +209,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let base_addr = self.const_bitcast(base_addr, self.usize_type);
let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
let ptr = self.const_bitcast(base_addr + offset, ptr_type);
if layout.value != Pointer {
if layout.primitive() != Pointer {
self.const_bitcast(ptr.dereference(None).to_rvalue(), ty)
}
else {

View File

@ -328,7 +328,7 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
&cx.tcx,
),
abi::Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
abi::Scalar::Initialized { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
cx.type_i8p(),
));
next_offset = offset + pointer_size;

View File

@ -224,7 +224,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
}
fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> {
match scalar.value {
match scalar.primitive() {
Int(i, true) => cx.type_from_integer(i),
Int(i, false) => cx.type_from_unsigned_integer(i),
F32 => cx.type_f32(),
@ -282,7 +282,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
Size::ZERO
}
else {
a.value.size(cx).align_to(b.value.align(cx).abi)
a.size(cx).align_to(b.align(cx).abi)
};
self.scalar_gcc_type_at(cx, scalar, offset)
}

View File

@ -510,9 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
// If the value is a boolean, the range is 0..2 and that ultimately
// become 0..0 when the type becomes i1, which would be rejected
// by the LLVM verifier.
if let Int(..) = scalar.value {
if let Int(..) = scalar.primitive() {
if !scalar.is_bool() && !scalar.is_always_valid(bx) {
bx.range_metadata(callsite, scalar.valid_range);
bx.range_metadata(callsite, scalar.valid_range(bx));
}
}
}

View File

@ -753,7 +753,7 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'
/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
/// the equivalent integer type.
fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
match scalar.value {
match scalar.primitive() {
Primitive::Int(Integer::I8, _) => cx.type_i8(),
Primitive::Int(Integer::I16, _) => cx.type_i16(),
Primitive::Int(Integer::I32, _) => cx.type_i32(),
@ -774,7 +774,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
) -> &'ll Value {
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
} else {
@ -785,7 +785,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
let elem_ty = llvm_asm_scalar_type(bx.cx, s);
let count = 16 / layout.size.bytes();
let vec_ty = bx.cx.type_vector(elem_ty, count);
if let Primitive::Pointer = s.value {
if let Primitive::Pointer = s.primitive() {
value = bx.ptrtoint(value, bx.cx.type_isize());
}
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
@ -800,7 +800,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.value == Primitive::F64 =>
if s.primitive() == Primitive::F64 =>
{
bx.bitcast(value, bx.cx.type_i64())
}
@ -812,7 +812,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I32, _) = s.value {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_f32())
} else {
value
@ -826,19 +826,21 @@ fn llvm_fixup_input<'ll, 'tcx>(
),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.value {
if let Primitive::Int(Integer::I64, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_f64())
} else {
value
}
}
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
_ => value,
},
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
_ => value,
}
}
_ => value,
}
}
@ -852,7 +854,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
) -> &'ll Value {
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
bx.extract_element(value, bx.const_i32(0))
} else {
value
@ -860,7 +862,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
}
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
value = bx.extract_element(value, bx.const_i32(0));
if let Primitive::Pointer = s.value {
if let Primitive::Pointer = s.primitive() {
value = bx.inttoptr(value, layout.llvm_type(bx.cx));
}
value
@ -875,7 +877,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.value == Primitive::F64 =>
if s.primitive() == Primitive::F64 =>
{
bx.bitcast(value, bx.cx.type_f64())
}
@ -887,7 +889,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I32, _) = s.value {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_i32())
} else {
value
@ -901,20 +903,22 @@ fn llvm_fixup_output<'ll, 'tcx>(
),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.value {
if let Primitive::Int(Integer::I64, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_i64())
} else {
value
}
}
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
_ => value,
},
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
_ => value,
}
}
_ => value,
}
}
@ -927,7 +931,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
) -> &'ll Type {
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
cx.type_vector(cx.type_i8(), 8)
} else {
layout.llvm_type(cx)
@ -946,7 +950,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
cx.type_vector(elem_ty, count * 2)
}
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.value == Primitive::F64 =>
if s.primitive() == Primitive::F64 =>
{
cx.type_i64()
}
@ -958,7 +962,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I32, _) = s.value {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
cx.type_f32()
} else {
layout.llvm_type(cx)
@ -972,19 +976,21 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.value {
if let Primitive::Int(Integer::I64, _) = s.primitive() {
cx.type_f64()
} else {
layout.llvm_type(cx)
}
}
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
Primitive::F32 => cx.type_i32(),
Primitive::F64 => cx.type_i64(),
_ => layout.llvm_type(cx),
},
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
Primitive::F32 => cx.type_i32(),
Primitive::F64 => cx.type_i64(),
_ => layout.llvm_type(cx),
}
}
_ => layout.llvm_type(cx),
}
}

View File

@ -484,14 +484,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
bx.noundef_metadata(load);
}
match scalar.value {
match scalar.primitive() {
abi::Int(..) => {
if !scalar.is_always_valid(bx) {
bx.range_metadata(load, scalar.valid_range);
bx.range_metadata(load, scalar.valid_range(bx));
}
}
abi::Pointer => {
if !scalar.valid_range.contains(0) {
if !scalar.valid_range(bx).contains(0) {
bx.nonnull_metadata(load);
}
@ -525,7 +525,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
});
OperandValue::Immediate(self.to_immediate(llval, place.layout))
} else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
let b_offset = a.size(self).align_to(b.align(self).abi);
let pair_ty = place.layout.llvm_type(self);
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {

View File

@ -221,16 +221,16 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
match cv {
Scalar::Int(ScalarInt::ZST) => {
assert_eq!(0, layout.value.size(self).bytes());
assert_eq!(0, layout.size(self).bytes());
self.const_undef(self.type_ix(0))
}
Scalar::Int(int) => {
let data = int.assert_bits(layout.value.size(self));
let data = int.assert_bits(layout.size(self));
let llval = self.const_uint_big(self.type_ix(bitsize), data);
if layout.value == Pointer {
if layout.primitive() == Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
self.const_bitcast(llval, llty)
@ -269,7 +269,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
1,
)
};
if layout.value != Pointer {
if layout.primitive() != Pointer {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else {
self.const_bitcast(llval, llty)

View File

@ -109,7 +109,10 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
&cx.tcx,
),
Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
Scalar::Initialized {
value: Primitive::Pointer,
valid_range: WrappingRange { start: 0, end: !0 },
},
cx.type_i8p_ext(address_space),
));
next_offset = offset + pointer_size;

View File

@ -118,7 +118,7 @@ fn tag_base_type<'ll, 'tcx>(
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } => {
// Niche tags are always normalized to unsized integers of the correct size.
match tag.value {
match tag.primitive() {
Primitive::Int(t, _) => t,
Primitive::F32 => Integer::I32,
Primitive::F64 => Integer::I64,
@ -136,7 +136,7 @@ fn tag_base_type<'ll, 'tcx>(
Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
// Direct tags preserve the sign.
tag.value.to_ty(cx.tcx)
tag.primitive().to_ty(cx.tcx)
}
}
}
@ -425,7 +425,7 @@ fn compute_discriminant_value<'ll, 'tcx>(
let value = (variant_index.as_u32() as u128)
.wrapping_sub(niche_variants.start().as_u32() as u128)
.wrapping_add(niche_start);
let value = tag.value.size(cx).truncate(value);
let value = tag.size(cx).truncate(value);
// NOTE(eddyb) do *NOT* remove this assert, until
// we pass the full 128-bit value to LLVM, otherwise
// truncation will be silent and remain undetected.

View File

@ -134,7 +134,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
sym::va_arg => {
match fn_abi.ret.layout.abi {
abi::Abi::Scalar(scalar) => {
match scalar.value {
match scalar.primitive() {
Primitive::Int(..) => {
if self.cx().size_of(ret_ty).bytes() < 4 {
// `va_arg` should not be called on an integer type

View File

@ -309,7 +309,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
scalar: Scalar,
offset: Size,
) -> &'a Type {
match scalar.value {
match scalar.primitive() {
Int(i, _) => cx.type_from_integer(i),
F32 => cx.type_f32(),
F64 => cx.type_f64(),
@ -362,8 +362,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
return cx.type_i1();
}
let offset =
if index == 0 { Size::ZERO } else { a.value.size(cx).align_to(b.value.align(cx).abi) };
let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
self.scalar_llvm_type_at(cx, scalar, offset)
}

View File

@ -464,13 +464,13 @@ fn push_debuginfo_type_name<'tcx>(
// calculate the range of values for the dataful variant
let dataful_discriminant_range =
dataful_variant_layout.largest_niche().unwrap().scalar.valid_range;
dataful_variant_layout.largest_niche().unwrap().valid_range;
let min = dataful_discriminant_range.start;
let min = tag.value.size(&tcx).truncate(min);
let min = tag.size(&tcx).truncate(min);
let max = dataful_discriminant_range.end;
let max = tag.value.size(&tcx).truncate(max);
let max = tag.size(&tcx).truncate(max);
let dataful_variant_name = variant_name(*dataful_variant);
write!(output, ", {}, {}, {}", min, max, dataful_variant_name).unwrap();

View File

@ -1572,7 +1572,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match (src.layout.abi, dst.layout.abi) {
(abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
// HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
if (src_scalar.value == abi::Pointer) == (dst_scalar.value == abi::Pointer) {
if (src_scalar.primitive() == abi::Pointer)
== (dst_scalar.primitive() == abi::Pointer)
{
assert_eq!(src.layout.size, dst.layout.size);
// NOTE(eddyb) the `from_immediate` and `to_immediate_scalar`

View File

@ -207,11 +207,11 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
// Extract a scalar component from a pair.
(OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
if offset.bytes() == 0 {
assert_eq!(field.size, a.value.size(bx.cx()));
assert_eq!(field.size, a.size(bx.cx()));
OperandValue::Immediate(a_llval)
} else {
assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
assert_eq!(field.size, b.value.size(bx.cx()));
assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
assert_eq!(field.size, b.size(bx.cx()));
OperandValue::Immediate(b_llval)
}
}
@ -316,7 +316,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
};
let ty = bx.backend_type(dest.layout);
let b_offset = a_scalar.value.size(bx).align_to(b_scalar.value.align(bx).abi);
let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
let llptr = bx.struct_gep(ty, dest.llval, 0);
let val = bx.from_immediate(a);

View File

@ -100,7 +100,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
self.llval
}
Abi::ScalarPair(a, b)
if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) =>
if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) =>
{
// Offset matches second field.
let ty = bx.backend_type(self.layout);
@ -234,7 +234,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
// Decode the discriminant (specifically if it's niche-encoded).
match *tag_encoding {
TagEncoding::Direct => {
let signed = match tag_scalar.value {
let signed = match tag_scalar.primitive() {
// We use `i1` for bytes that are always `0` or `1`,
// e.g., `#[repr(i8)] enum E { A, B }`, but we can't
// let LLVM interpret the `i1` as signed, because

View File

@ -299,7 +299,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let mut signed = false;
if let Abi::Scalar(scalar) = operand.layout.abi {
if let Int(_, s) = scalar.value {
if let Int(_, s) = scalar.primitive() {
// We use `i1` for bytes that are always `0` or `1`,
// e.g., `#[repr(i8)] enum E { A, B }`, but we can't
// let LLVM interpret the `i1` as signed, because
@ -307,15 +307,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
signed = !scalar.is_bool() && s;
if !scalar.is_always_valid(bx.cx())
&& scalar.valid_range.end >= scalar.valid_range.start
&& scalar.valid_range(bx.cx()).end
>= scalar.valid_range(bx.cx()).start
{
// We want `table[e as usize ± k]` to not
// have bound checks, and this is the most
// convenient place to put the `assume`s.
if scalar.valid_range.start > 0 {
let enum_value_lower_bound = bx
.cx()
.const_uint_big(ll_t_in, scalar.valid_range.start);
if scalar.valid_range(bx.cx()).start > 0 {
let enum_value_lower_bound = bx.cx().const_uint_big(
ll_t_in,
scalar.valid_range(bx.cx()).start,
);
let cmp_start = bx.icmp(
IntPredicate::IntUGE,
llval,
@ -324,8 +326,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.assume(cmp_start);
}
let enum_value_upper_bound =
bx.cx().const_uint_big(ll_t_in, scalar.valid_range.end);
let enum_value_upper_bound = bx
.cx()
.const_uint_big(ll_t_in, scalar.valid_range(bx.cx()).end);
let cmp_end = bx.icmp(
IntPredicate::IntULE,
llval,

View File

@ -188,7 +188,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let val = self.read_scalar(&args[0])?.check_init()?;
let bits = val.to_bits(layout_of.size)?;
let kind = match layout_of.abi {
Abi::Scalar(scalar) => scalar.value,
Abi::Scalar(scalar) => scalar.primitive(),
_ => span_bug!(
self.cur_span(),
"{} called on invalid type {:?}",

View File

@ -265,6 +265,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}));
};
// It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
// However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
// and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
// case where some of the bytes are initialized and others are not. So, we only permit
// reads from `Scalar`s and `ScalarPair`s that cannot be uninitialized.
match mplace.layout.abi {
Abi::Scalar(..) => {
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
@ -274,7 +279,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// We checked `ptr_align` above, so all fields will have the alignment they need.
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = (a.value, b.value);
let (a_size, b_size) = (a.size(self), b.size(self));
let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
@ -676,7 +680,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
// Get layout for tag.
let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?;
let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
// Read tag and sanity-check `tag_layout`.
let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;

View File

@ -772,13 +772,11 @@ where
// We checked `ptr_align` above, so all fields will have the alignment they need.
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = match dest.layout.abi {
Abi::ScalarPair(a, b) => (a.value, b.value),
_ => span_bug!(
let Abi::ScalarPair(a, b) = dest.layout.abi else { span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
dest.layout
),
)
};
let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
let b_offset = a_size.align_to(b.align(&tcx).abi);
@ -1047,7 +1045,7 @@ where
// raw discriminants for enums are isize or bigger during
// their computation, but the in-memory tag is the smallest possible
// representation
let size = tag_layout.value.size(self);
let size = tag_layout.size(self);
let tag_val = size.truncate(discr_val);
let tag_dest = self.place_field(dest, tag_field)?;
@ -1071,7 +1069,7 @@ where
.expect("overflow computing relative variant idx");
// We need to use machine arithmetic when taking into account `niche_start`:
// tag_val = variant_index_relative + niche_start_val
let tag_layout = self.layout_of(tag_layout.value.to_int_ty(*self.tcx))?;
let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
ImmTy::from_uint(variant_index_relative, tag_layout);

View File

@ -189,12 +189,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// that will take care to make it UB to leave the range, just
// like for transmute).
(abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => {
caller.value == callee.value
caller.primitive() == callee.primitive()
}
(
abi::Abi::ScalarPair(caller1, caller2),
abi::Abi::ScalarPair(callee1, callee2),
) => caller1.value == callee1.value && caller2.value == callee2.value,
) => {
caller1.primitive() == callee1.primitive()
&& caller2.primitive() == callee2.primitive()
}
// Be conservative
_ => false,
}

View File

@ -629,12 +629,12 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
op: &OpTy<'tcx, M::PointerTag>,
scalar_layout: ScalarAbi,
) -> InterpResult<'tcx> {
if scalar_layout.valid_range.is_full_for(op.layout.size) {
if scalar_layout.valid_range(self.ecx).is_full_for(op.layout.size) {
// Nothing to check
return Ok(());
}
// At least one value is excluded.
let valid_range = scalar_layout.valid_range;
let valid_range = scalar_layout.valid_range(self.ecx);
let WrappingRange { start, end } = valid_range;
let max_value = op.layout.size.unsigned_int_max();
assert!(end <= max_value);

View File

@ -12,7 +12,7 @@ use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TyCtxt, TypeFoldable};
use rustc_span::source_map;
use rustc_span::symbol::sym;
use rustc_span::{Span, Symbol, DUMMY_SP};
use rustc_target::abi::Abi;
use rustc_target::abi::{Abi, WrappingRange};
use rustc_target::abi::{Integer, TagEncoding, Variants};
use rustc_target::spec::abi::Abi as SpecAbi;
@ -796,14 +796,18 @@ crate fn repr_nullable_ptr<'tcx>(
// Return the nullable type this Option-like enum can be safely represented with.
let field_ty_abi = &cx.layout_of(field_ty).unwrap().abi;
if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
match (field_ty_scalar.valid_range.start, field_ty_scalar.valid_range.end) {
(0, x) if x == field_ty_scalar.value.size(&cx.tcx).unsigned_int_max() - 1 => {
match field_ty_scalar.valid_range(cx) {
WrappingRange { start: 0, end }
if end == field_ty_scalar.size(&cx.tcx).unsigned_int_max() - 1 =>
{
return Some(get_nullable_type(cx, field_ty).unwrap());
}
(1, _) => {
WrappingRange { start: 1, .. } => {
return Some(get_nullable_type(cx, field_ty).unwrap());
}
(start, end) => unreachable!("Unhandled start and end range: ({}, {})", start, end),
WrappingRange { start, end } => {
unreachable!("Unhandled start and end range: ({}, {})", start, end)
}
};
}
}
@ -1342,7 +1346,7 @@ impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
return
};
let tag_size = tag.value.size(&cx.tcx).bytes();
let tag_size = tag.size(&cx.tcx).bytes();
debug!(
"enum `{}` is {} bytes large with layout:\n{:#?}",

View File

@ -305,10 +305,10 @@ fn invert_mapping(map: &[u32]) -> Vec<u32> {
impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
let dl = self.data_layout();
let b_align = b.value.align(dl);
let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
let b_offset = a.value.size(dl).align_to(b_align.abi);
let size = (b_offset + b.value.size(dl)).align_to(align.abi);
let b_align = b.align(dl);
let align = a.align(dl).max(b_align).max(dl.aggregate_align);
let b_offset = a.size(dl).align_to(b_align.abi);
let size = (b_offset + b.size(dl)).align_to(align.abi);
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
// returns the last maximum.
@ -567,7 +567,10 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
let scalar_unit = |value: Primitive| {
let size = value.size(dl);
assert!(size.bits() <= 128);
Scalar { value, valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() } }
Scalar::Initialized {
value,
valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() },
}
};
let scalar =
|value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
@ -581,11 +584,14 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
// Basic scalars.
ty::Bool => tcx.intern_layout(LayoutS::scalar(
self,
Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } },
Scalar::Initialized {
value: Int(I8, false),
valid_range: WrappingRange { start: 0, end: 1 },
},
)),
ty::Char => tcx.intern_layout(LayoutS::scalar(
self,
Scalar {
Scalar::Initialized {
value: Int(I32, false),
valid_range: WrappingRange { start: 0, end: 0x10FFFF },
},
@ -598,7 +604,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
}),
ty::FnPtr(_) => {
let mut ptr = scalar_unit(Pointer);
ptr.valid_range = ptr.valid_range.with_start(1);
ptr.valid_range_mut().start = 1;
tcx.intern_layout(LayoutS::scalar(self, ptr))
}
@ -616,7 +622,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
let mut data_ptr = scalar_unit(Pointer);
if !ty.is_unsafe_ptr() {
data_ptr.valid_range = data_ptr.valid_range.with_start(1);
data_ptr.valid_range_mut().start = 1;
}
let pointee = tcx.normalize_erasing_regions(param_env, pointee);
@ -632,7 +638,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
ty::Dynamic(..) => {
let mut vtable = scalar_unit(Pointer);
vtable.valid_range = vtable.valid_range.with_start(1);
vtable.valid_range_mut().start = 1;
vtable
}
_ => return Err(LayoutError::Unknown(unsized_part)),
@ -889,14 +895,14 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
// If all non-ZST fields have the same ABI, forward this ABI
if optimize && !field.is_zst() {
// Normalize scalar_unit to the maximal valid range
// Discard valid range information and allow undef
let field_abi = match field.abi {
Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
Abi::Scalar(x) => Abi::Scalar(x.to_union()),
Abi::ScalarPair(x, y) => {
Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
Abi::ScalarPair(x.to_union(), y.to_union())
}
Abi::Vector { element: x, count } => {
Abi::Vector { element: scalar_unit(x.value), count }
Abi::Vector { element: x.to_union(), count }
}
Abi::Uninhabited | Abi::Aggregate { .. } => {
Abi::Aggregate { sized: true }
@ -1000,14 +1006,16 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
if let Bound::Included(start) = start {
// FIXME(eddyb) this might be incorrect - it doesn't
// account for wrap-around (end < start) ranges.
assert!(scalar.valid_range.start <= start);
scalar.valid_range.start = start;
let valid_range = scalar.valid_range_mut();
assert!(valid_range.start <= start);
valid_range.start = start;
}
if let Bound::Included(end) = end {
// FIXME(eddyb) this might be incorrect - it doesn't
// account for wrap-around (end < start) ranges.
assert!(scalar.valid_range.end >= end);
scalar.valid_range.end = end;
let valid_range = scalar.valid_range_mut();
assert!(valid_range.end >= end);
valid_range.end = end;
}
// Update `largest_niche` if we have introduced a larger niche.
@ -1133,9 +1141,15 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
// guaranteed to be initialised, not the
// other primitive.
if offset.bytes() == 0 {
Abi::ScalarPair(niche_scalar, scalar_unit(second.value))
Abi::ScalarPair(
niche_scalar,
scalar_unit(second.primitive()),
)
} else {
Abi::ScalarPair(scalar_unit(first.value), niche_scalar)
Abi::ScalarPair(
scalar_unit(first.primitive()),
niche_scalar,
)
}
}
_ => Abi::Aggregate { sized: true },
@ -1314,7 +1328,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
}
let tag_mask = ity.size().unsigned_int_max();
let tag = Scalar {
let tag = Scalar::Initialized {
value: Int(ity, signed),
valid_range: WrappingRange {
start: (min as u128 & tag_mask),
@ -1325,7 +1339,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
// Without latter check aligned enums with custom discriminant values
// Would result in ICE see the issue #92464 for more info
if tag.value.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
if tag.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
abi = Abi::Scalar(tag);
} else {
// Try to use a ScalarPair for all tagged enums.
@ -1345,7 +1359,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
}
};
let prim = match field.abi {
Abi::Scalar(scalar) => scalar.value,
Abi::Scalar(scalar) => scalar.primitive(),
_ => {
common_prim = None;
break;
@ -1599,7 +1613,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
let max_discr = (info.variant_fields.len() - 1) as u128;
let discr_int = Integer::fit_unsigned(max_discr);
let discr_int_ty = discr_int.to_ty(tcx, false);
let tag = Scalar {
let tag = Scalar::Initialized {
value: Primitive::Int(discr_int, false),
valid_range: WrappingRange { start: 0, end: max_discr },
};
@ -1898,7 +1912,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
adt_kind.into(),
adt_packed,
match tag_encoding {
TagEncoding::Direct => Some(tag.value.size(self)),
TagEncoding::Direct => Some(tag.size(self)),
_ => None,
},
variant_infos,
@ -2304,7 +2318,7 @@ where
let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
TyAndLayout {
layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
ty: tag.value.to_ty(tcx),
ty: tag.primitive().to_ty(tcx),
}
};
@ -3079,11 +3093,9 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
}
// Only pointer types handled below.
if scalar.value != Pointer {
return;
}
let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
if !scalar.valid_range.contains(0) {
if !valid_range.contains(0) {
attrs.set(ArgAttribute::NonNull);
}

View File

@ -6,7 +6,7 @@ use crate::abi::{self, HasDataLayout, Size, TyAbiInterface};
fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
// Always sign extend u32 values on 64-bit mips
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
if let abi::Int(i, signed) = scalar.value {
if let abi::Int(i, signed) = scalar.primitive() {
if !signed && i.size().bits() == 32 {
if let PassMode::Direct(ref mut attrs) = arg.mode {
attrs.ext(ArgExtension::Sext);
@ -25,7 +25,7 @@ where
C: HasDataLayout,
{
match ret.layout.field(cx, i).abi {
abi::Abi::Scalar(scalar) => match scalar.value {
abi::Abi::Scalar(scalar) => match scalar.primitive() {
abi::F32 => Some(Reg::f32()),
abi::F64 => Some(Reg::f64()),
_ => None,
@ -110,7 +110,7 @@ where
// We only care about aligned doubles
if let abi::Abi::Scalar(scalar) = field.abi {
if let abi::F64 = scalar.value {
if let abi::F64 = scalar.primitive() {
if offset.is_aligned(dl.f64_align.abi) {
// Insert enough integers to cover [last_offset, offset)
assert!(last_offset.is_aligned(dl.f64_align.abi));

View File

@ -348,7 +348,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
// The primitive for this algorithm.
Abi::Scalar(scalar) => {
let kind = match scalar.value {
let kind = match scalar.primitive() {
abi::Int(..) | abi::Pointer => RegKind::Integer,
abi::F32 | abi::F64 => RegKind::Float,
};
@ -482,7 +482,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
Abi::ScalarPair(a, b) => PassMode::Pair(
scalar_attrs(&layout, a, Size::ZERO),
scalar_attrs(&layout, b, a.value.size(cx).align_to(b.value.align(cx).abi)),
scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
),
Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()),
@ -534,7 +534,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
pub fn extend_integer_width_to(&mut self, bits: u64) {
// Only integers have signedness
if let Abi::Scalar(scalar) = self.layout.abi {
if let abi::Int(i, signed) = scalar.value {
if let abi::Int(i, signed) = scalar.primitive() {
if i.size().bits() < bits {
if let PassMode::Direct(ref mut attrs) = self.mode {
if signed {

View File

@ -44,7 +44,7 @@ where
Ty: TyAbiInterface<'a, C> + Copy,
{
match arg_layout.abi {
Abi::Scalar(scalar) => match scalar.value {
Abi::Scalar(scalar) => match scalar.primitive() {
abi::Int(..) | abi::Pointer => {
if arg_layout.size.bits() > xlen {
return Err(CannotUseFpConv);
@ -298,7 +298,7 @@ fn classify_arg<'a, Ty, C>(
fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
if let Abi::Scalar(scalar) = arg.layout.abi {
if let abi::Int(i, _) = scalar.value {
if let abi::Int(i, _) = scalar.primitive() {
// 32-bit integers are always sign-extended
if i.size().bits() == 32 && xlen > 32 {
if let PassMode::Direct(ref mut attrs) = arg.mode {

View File

@ -20,7 +20,7 @@ where
{
let dl = cx.data_layout();
if scalar.value != abi::F32 && scalar.value != abi::F64 {
if !scalar.primitive().is_float() {
return data;
}
@ -56,7 +56,7 @@ where
return data;
}
if scalar.value == abi::F32 {
if scalar.primitive() == abi::F32 {
data.arg_attribute = ArgAttribute::InReg;
data.prefix[data.prefix_index] = Some(Reg::f32());
data.last_offset = offset + Reg::f32().size;
@ -79,17 +79,15 @@ where
C: HasDataLayout,
{
data = arg_scalar(cx, &scalar1, offset, data);
if scalar1.value == abi::F32 {
offset += Reg::f32().size;
} else if scalar2.value == abi::F64 {
offset += Reg::f64().size;
} else if let abi::Int(i, _signed) = scalar1.value {
offset += i.size();
} else if scalar1.value == abi::Pointer {
offset = offset + Reg::i64().size;
match (scalar1.primitive(), scalar2.primitive()) {
(abi::F32, _) => offset += Reg::f32().size,
(_, abi::F64) => offset += Reg::f64().size,
(abi::Int(i, _signed), _) => offset += i.size(),
(abi::Pointer, _) => offset += Reg::i64().size,
_ => {}
}
if (offset.raw % 4) != 0 && (scalar2.value == abi::F32 || scalar2.value == abi::F64) {
if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
offset.raw += 4 - (offset.raw % 4);
}
data = arg_scalar(cx, &scalar2, offset, data);

View File

@ -49,7 +49,7 @@ where
let mut c = match layout.abi {
Abi::Uninhabited => return Ok(()),
Abi::Scalar(scalar) => match scalar.value {
Abi::Scalar(scalar) => match scalar.primitive() {
abi::Int(..) | abi::Pointer => Class::Int,
abi::F32 | abi::F64 => Class::Sse,
},

View File

@ -752,6 +752,10 @@ pub struct WrappingRange {
}
impl WrappingRange {
pub fn full(size: Size) -> Self {
Self { start: 0, end: size.unsigned_int_max() }
}
/// Returns `true` if `v` is contained in the range.
#[inline(always)]
pub fn contains(&self, v: u128) -> bool {
@ -799,13 +803,23 @@ impl fmt::Debug for WrappingRange {
/// Information about one scalar component of a Rust type.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
#[derive(HashStable_Generic)]
pub struct Scalar {
pub value: Primitive,
pub enum Scalar {
Initialized {
value: Primitive,
// FIXME(eddyb) always use the shortest range, e.g., by finding
// the largest space between two consecutive valid values and
// taking everything else as the (shortest) valid range.
pub valid_range: WrappingRange,
// FIXME(eddyb) always use the shortest range, e.g., by finding
// the largest space between two consecutive valid values and
// taking everything else as the (shortest) valid range.
valid_range: WrappingRange,
},
Union {
/// Even for unions, we need to use the correct registers for the kind of
/// values inside the union, so we keep the `Primitive` type around. We
/// also use it to compute the size of the scalar.
/// However, unions never have niches and even allow undef,
/// so there is no `valid_range`.
value: Primitive,
},
}
impl Scalar {
@ -813,14 +827,58 @@ impl Scalar {
pub fn is_bool(&self) -> bool {
matches!(
self,
Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } }
Scalar::Initialized {
value: Int(I8, false),
valid_range: WrappingRange { start: 0, end: 1 }
}
)
}
/// Get the primitive representation of this type, ignoring the valid range and whether the
/// value is allowed to be undefined (due to being a union).
pub fn primitive(&self) -> Primitive {
match *self {
Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
}
}
pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
self.primitive().align(cx)
}
pub fn size(self, cx: &impl HasDataLayout) -> Size {
self.primitive().size(cx)
}
#[inline]
pub fn to_union(&self) -> Self {
Self::Union { value: self.primitive() }
}
#[inline]
pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
match *self {
Scalar::Initialized { valid_range, .. } => valid_range,
Scalar::Union { value } => WrappingRange::full(value.size(cx)),
}
}
#[inline]
/// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
match self {
Scalar::Initialized { valid_range, .. } => valid_range,
Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
}
}
/// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
#[inline]
pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
self.valid_range.is_full_for(self.value.size(cx))
match *self {
Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
Scalar::Union { .. } => true,
}
}
}
@ -988,7 +1046,7 @@ impl Abi {
#[inline]
pub fn is_signed(&self) -> bool {
match self {
Abi::Scalar(scal) => match scal.value {
Abi::Scalar(scal) => match scal.primitive() {
Primitive::Int(_, signed) => signed,
_ => false,
},
@ -1060,17 +1118,19 @@ pub enum TagEncoding {
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub struct Niche {
pub offset: Size,
pub scalar: Scalar,
pub value: Primitive,
pub valid_range: WrappingRange,
}
impl Niche {
pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
let niche = Niche { offset, scalar };
let Scalar::Initialized { value, valid_range } = scalar else { return None };
let niche = Niche { offset, value, valid_range };
if niche.available(cx) > 0 { Some(niche) } else { None }
}
pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
let Scalar { value, valid_range: v } = self.scalar;
let Self { value, valid_range: v, .. } = *self;
let size = value.size(cx);
assert!(size.bits() <= 128);
let max_value = size.unsigned_int_max();
@ -1083,7 +1143,7 @@ impl Niche {
pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
assert!(count > 0);
let Scalar { value, valid_range: v } = self.scalar;
let Self { value, valid_range: v, .. } = *self;
let size = value.size(cx);
assert!(size.bits() <= 128);
let max_value = size.unsigned_int_max();
@ -1107,12 +1167,12 @@ impl Niche {
// If niche zero is already reserved, the selection of bounds are of little interest.
let move_start = |v: WrappingRange| {
let start = v.start.wrapping_sub(count) & max_value;
Some((start, Scalar { value, valid_range: v.with_start(start) }))
Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
};
let move_end = |v: WrappingRange| {
let start = v.end.wrapping_add(1) & max_value;
let end = v.end.wrapping_add(count) & max_value;
Some((start, Scalar { value, valid_range: v.with_end(end) }))
Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
};
let distance_end_zero = max_value - v.end;
if v.start > v.end {
@ -1172,8 +1232,8 @@ pub struct LayoutS<'a> {
impl<'a> LayoutS<'a> {
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
let size = scalar.value.size(cx);
let align = scalar.value.align(cx);
let size = scalar.size(cx);
let align = scalar.align(cx);
LayoutS {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
@ -1325,7 +1385,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
C: HasDataLayout,
{
match self.abi {
Abi::Scalar(scalar) => scalar.value.is_float(),
Abi::Scalar(scalar) => scalar.primitive().is_float(),
Abi::Aggregate { .. } => {
if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
self.field(cx, 0).is_single_fp_element(cx)
@ -1371,7 +1431,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
let scalar_allows_raw_init = move |s: Scalar| -> bool {
if zero {
// The range must contain 0.
s.valid_range.contains(0)
s.valid_range(cx).contains(0)
} else {
// The range must include all values.
s.is_always_valid(cx)

View File

@ -1769,7 +1769,7 @@ fn document_type_layout(w: &mut Buffer, cx: &Context<'_>, ty_def_id: DefId) {
let tag_size = if let TagEncoding::Niche { .. } = tag_encoding {
0
} else if let Primitive::Int(i, _) = tag.value {
} else if let Primitive::Int(i, _) = tag.primitive() {
i.size().bytes()
} else {
span_bug!(tcx.def_span(ty_def_id), "tag is neither niche nor int")

View File

@ -10,7 +10,7 @@ error: layout_of(E) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I32,
false,
@ -86,13 +86,11 @@ error: layout_of(E) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I32,
false,
),
valid_range: 0..=0,
},
value: Int(
I32,
false,
),
valid_range: 0..=0,
},
),
align: AbiAndPrefAlign {
@ -133,14 +131,14 @@ error: layout_of(S) = Layout {
index: 0,
},
abi: ScalarPair(
Scalar {
Initialized {
value: Int(
I32,
true,
),
valid_range: 0..=4294967295,
},
Scalar {
Initialized {
value: Int(
I32,
true,
@ -202,7 +200,7 @@ error: layout_of(std::result::Result<i32, i32>) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I32,
false,
@ -271,14 +269,14 @@ error: layout_of(std::result::Result<i32, i32>) = Layout {
],
},
abi: ScalarPair(
Scalar {
Initialized {
value: Int(
I32,
false,
),
valid_range: 0..=1,
},
Scalar {
Initialized {
value: Int(
I32,
true,
@ -291,13 +289,11 @@ error: layout_of(std::result::Result<i32, i32>) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I32,
false,
),
valid_range: 0..=1,
},
value: Int(
I32,
false,
),
valid_range: 0..=1,
},
),
align: AbiAndPrefAlign {
@ -321,7 +317,7 @@ error: layout_of(i32) = Layout {
index: 0,
},
abi: Scalar(
Scalar {
Initialized {
value: Int(
I32,
true,

View File

@ -10,7 +10,7 @@ error: layout_of(A) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I8,
false,
@ -47,7 +47,7 @@ error: layout_of(A) = Layout {
],
},
abi: Scalar(
Scalar {
Initialized {
value: Int(
I8,
false,
@ -60,13 +60,11 @@ error: layout_of(A) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I8,
false,
),
valid_range: 0..=0,
},
value: Int(
I8,
false,
),
valid_range: 0..=0,
},
),
align: AbiAndPrefAlign {
@ -98,7 +96,7 @@ error: layout_of(B) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I8,
false,
@ -135,7 +133,7 @@ error: layout_of(B) = Layout {
],
},
abi: Scalar(
Scalar {
Initialized {
value: Int(
I8,
false,
@ -148,13 +146,11 @@ error: layout_of(B) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I8,
false,
),
valid_range: 255..=255,
},
value: Int(
I8,
false,
),
valid_range: 255..=255,
},
),
align: AbiAndPrefAlign {
@ -186,7 +182,7 @@ error: layout_of(C) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I16,
false,
@ -223,7 +219,7 @@ error: layout_of(C) = Layout {
],
},
abi: Scalar(
Scalar {
Initialized {
value: Int(
I16,
false,
@ -236,13 +232,11 @@ error: layout_of(C) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I16,
false,
),
valid_range: 256..=256,
},
value: Int(
I16,
false,
),
valid_range: 256..=256,
},
),
align: AbiAndPrefAlign {
@ -274,7 +268,7 @@ error: layout_of(P) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I32,
false,
@ -311,7 +305,7 @@ error: layout_of(P) = Layout {
],
},
abi: Scalar(
Scalar {
Initialized {
value: Int(
I32,
false,
@ -324,13 +318,11 @@ error: layout_of(P) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I32,
false,
),
valid_range: 268435456..=268435456,
},
value: Int(
I32,
false,
),
valid_range: 268435456..=268435456,
},
),
align: AbiAndPrefAlign {
@ -362,7 +354,7 @@ error: layout_of(T) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I32,
true,
@ -399,7 +391,7 @@ error: layout_of(T) = Layout {
],
},
abi: Scalar(
Scalar {
Initialized {
value: Int(
I32,
true,
@ -412,13 +404,11 @@ error: layout_of(T) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I32,
true,
),
valid_range: 2164260864..=2164260864,
},
value: Int(
I32,
true,
),
valid_range: 2164260864..=2164260864,
},
),
align: AbiAndPrefAlign {

View File

@ -10,7 +10,7 @@ error: layout_of(A) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I8,
false,
@ -47,7 +47,7 @@ error: layout_of(A) = Layout {
],
},
abi: Scalar(
Scalar {
Initialized {
value: Int(
I8,
false,
@ -60,13 +60,11 @@ error: layout_of(A) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I8,
false,
),
valid_range: 0..=0,
},
value: Int(
I8,
false,
),
valid_range: 0..=0,
},
),
align: AbiAndPrefAlign {
@ -98,7 +96,7 @@ error: layout_of(B) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I8,
false,
@ -135,7 +133,7 @@ error: layout_of(B) = Layout {
],
},
abi: Scalar(
Scalar {
Initialized {
value: Int(
I8,
false,
@ -148,13 +146,11 @@ error: layout_of(B) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I8,
false,
),
valid_range: 255..=255,
},
value: Int(
I8,
false,
),
valid_range: 255..=255,
},
),
align: AbiAndPrefAlign {
@ -186,7 +182,7 @@ error: layout_of(C) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I16,
false,
@ -223,7 +219,7 @@ error: layout_of(C) = Layout {
],
},
abi: Scalar(
Scalar {
Initialized {
value: Int(
I16,
false,
@ -236,13 +232,11 @@ error: layout_of(C) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I16,
false,
),
valid_range: 256..=256,
},
value: Int(
I16,
false,
),
valid_range: 256..=256,
},
),
align: AbiAndPrefAlign {
@ -274,7 +268,7 @@ error: layout_of(P) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I32,
false,
@ -311,7 +305,7 @@ error: layout_of(P) = Layout {
],
},
abi: Scalar(
Scalar {
Initialized {
value: Int(
I32,
false,
@ -324,13 +318,11 @@ error: layout_of(P) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I32,
false,
),
valid_range: 268435456..=268435456,
},
value: Int(
I32,
false,
),
valid_range: 268435456..=268435456,
},
),
align: AbiAndPrefAlign {
@ -362,7 +354,7 @@ error: layout_of(T) = Layout {
],
},
variants: Multiple {
tag: Scalar {
tag: Initialized {
value: Int(
I32,
true,
@ -399,7 +391,7 @@ error: layout_of(T) = Layout {
],
},
abi: Scalar(
Scalar {
Initialized {
value: Int(
I32,
true,
@ -412,13 +404,11 @@ error: layout_of(T) = Layout {
offset: Size {
raw: 0,
},
scalar: Scalar {
value: Int(
I32,
true,
),
valid_range: 2164260864..=2164260864,
},
value: Int(
I32,
true,
),
valid_range: 2164260864..=2164260864,
},
),
align: AbiAndPrefAlign {