mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 06:44:35 +00:00
Rollup merge of #126159 - RalfJung:scalarint-size-mismatch, r=oli-obk
ScalarInt: size mismatches are a bug, do not delay the panic Cc [Zulip](https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/topic/Why.20are.20ScalarInt.20to.20iN.2FuN.20methods.20fallible.3F) r? ``@oli-obk``
This commit is contained in:
commit
2d7f7ffba5
@ -110,7 +110,7 @@ pub(crate) fn codegen_const_value<'tcx>(
|
|||||||
if fx.clif_type(layout.ty).is_some() {
|
if fx.clif_type(layout.ty).is_some() {
|
||||||
return CValue::const_val(fx, layout, int);
|
return CValue::const_val(fx, layout, int);
|
||||||
} else {
|
} else {
|
||||||
let raw_val = int.size().truncate(int.assert_bits(int.size()));
|
let raw_val = int.size().truncate(int.to_bits(int.size()));
|
||||||
let val = match int.size().bytes() {
|
let val = match int.size().bytes() {
|
||||||
1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
|
1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
|
||||||
2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
|
2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
|
||||||
@ -501,12 +501,12 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
|
|||||||
Ordering::Equal => scalar_int,
|
Ordering::Equal => scalar_int,
|
||||||
Ordering::Less => match ty.kind() {
|
Ordering::Less => match ty.kind() {
|
||||||
ty::Uint(_) => ScalarInt::try_from_uint(
|
ty::Uint(_) => ScalarInt::try_from_uint(
|
||||||
scalar_int.assert_uint(scalar_int.size()),
|
scalar_int.to_uint(scalar_int.size()),
|
||||||
fx.layout_of(*ty).size,
|
fx.layout_of(*ty).size,
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
ty::Int(_) => ScalarInt::try_from_int(
|
ty::Int(_) => ScalarInt::try_from_int(
|
||||||
scalar_int.assert_int(scalar_int.size()),
|
scalar_int.to_int(scalar_int.size()),
|
||||||
fx.layout_of(*ty).size,
|
fx.layout_of(*ty).size,
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
|
@ -902,7 +902,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
|
|||||||
.span_fatal(span, "Index argument for `_mm_cmpestri` is not a constant");
|
.span_fatal(span, "Index argument for `_mm_cmpestri` is not a constant");
|
||||||
};
|
};
|
||||||
|
|
||||||
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
|
let imm8 = imm8.to_u8();
|
||||||
|
|
||||||
codegen_inline_asm_inner(
|
codegen_inline_asm_inner(
|
||||||
fx,
|
fx,
|
||||||
@ -955,7 +955,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
|
|||||||
.span_fatal(span, "Index argument for `_mm_cmpestrm` is not a constant");
|
.span_fatal(span, "Index argument for `_mm_cmpestrm` is not a constant");
|
||||||
};
|
};
|
||||||
|
|
||||||
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
|
let imm8 = imm8.to_u8();
|
||||||
|
|
||||||
codegen_inline_asm_inner(
|
codegen_inline_asm_inner(
|
||||||
fx,
|
fx,
|
||||||
@ -1003,7 +1003,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
|
|||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
|
let imm8 = imm8.to_u8();
|
||||||
|
|
||||||
codegen_inline_asm_inner(
|
codegen_inline_asm_inner(
|
||||||
fx,
|
fx,
|
||||||
@ -1040,7 +1040,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
|
|||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
|
let imm8 = imm8.to_u8();
|
||||||
|
|
||||||
codegen_inline_asm_inner(
|
codegen_inline_asm_inner(
|
||||||
fx,
|
fx,
|
||||||
@ -1195,7 +1195,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
|
|||||||
.span_fatal(span, "Func argument for `_mm_sha1rnds4_epu32` is not a constant");
|
.span_fatal(span, "Func argument for `_mm_sha1rnds4_epu32` is not a constant");
|
||||||
};
|
};
|
||||||
|
|
||||||
let func = func.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", func));
|
let func = func.to_u8();
|
||||||
|
|
||||||
codegen_inline_asm_inner(
|
codegen_inline_asm_inner(
|
||||||
fx,
|
fx,
|
||||||
|
@ -147,8 +147,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
|||||||
|
|
||||||
let total_len = lane_count * 2;
|
let total_len = lane_count * 2;
|
||||||
|
|
||||||
let indexes =
|
let indexes = idx.iter().map(|idx| idx.unwrap_leaf().to_u32()).collect::<Vec<u32>>();
|
||||||
idx.iter().map(|idx| idx.unwrap_leaf().try_to_u32().unwrap()).collect::<Vec<u32>>();
|
|
||||||
|
|
||||||
for &idx in &indexes {
|
for &idx in &indexes {
|
||||||
assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
|
assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
|
||||||
@ -282,9 +281,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
|||||||
fx.tcx.dcx().span_fatal(span, "Index argument for `simd_insert` is not a constant");
|
fx.tcx.dcx().span_fatal(span, "Index argument for `simd_insert` is not a constant");
|
||||||
};
|
};
|
||||||
|
|
||||||
let idx: u32 = idx_const
|
let idx: u32 = idx_const.to_u32();
|
||||||
.try_to_u32()
|
|
||||||
.unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const));
|
|
||||||
let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
|
let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
|
||||||
if u64::from(idx) >= lane_count {
|
if u64::from(idx) >= lane_count {
|
||||||
fx.tcx.dcx().span_fatal(
|
fx.tcx.dcx().span_fatal(
|
||||||
@ -330,9 +327,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
|||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
let idx = idx_const
|
let idx = idx_const.to_u32();
|
||||||
.try_to_u32()
|
|
||||||
.unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const));
|
|
||||||
let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
|
let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
|
||||||
if u64::from(idx) >= lane_count {
|
if u64::from(idx) >= lane_count {
|
||||||
fx.tcx.dcx().span_fatal(
|
fx.tcx.dcx().span_fatal(
|
||||||
|
@ -327,7 +327,7 @@ impl<'tcx> CValue<'tcx> {
|
|||||||
|
|
||||||
let val = match layout.ty.kind() {
|
let val = match layout.ty.kind() {
|
||||||
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
|
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
|
||||||
let const_val = const_val.assert_bits(layout.size);
|
let const_val = const_val.to_bits(layout.size);
|
||||||
let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
|
let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
|
||||||
let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
|
let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
|
||||||
fx.bcx.ins().iconcat(lsb, msb)
|
fx.bcx.ins().iconcat(lsb, msb)
|
||||||
@ -339,7 +339,7 @@ impl<'tcx> CValue<'tcx> {
|
|||||||
| ty::Ref(..)
|
| ty::Ref(..)
|
||||||
| ty::RawPtr(..)
|
| ty::RawPtr(..)
|
||||||
| ty::FnPtr(..) => {
|
| ty::FnPtr(..) => {
|
||||||
let raw_val = const_val.size().truncate(const_val.assert_bits(layout.size));
|
let raw_val = const_val.size().truncate(const_val.to_bits(layout.size));
|
||||||
fx.bcx.ins().iconst(clif_ty, raw_val as i64)
|
fx.bcx.ins().iconst(clif_ty, raw_val as i64)
|
||||||
}
|
}
|
||||||
ty::Float(FloatTy::F32) => {
|
ty::Float(FloatTy::F32) => {
|
||||||
|
@ -166,7 +166,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
|||||||
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
|
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
|
||||||
match cv {
|
match cv {
|
||||||
Scalar::Int(int) => {
|
Scalar::Int(int) => {
|
||||||
let data = int.assert_bits(layout.size(self));
|
let data = int.to_bits(layout.size(self));
|
||||||
|
|
||||||
// FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
|
// FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
|
||||||
// the paths for floating-point values.
|
// the paths for floating-point values.
|
||||||
|
@ -244,7 +244,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
|
|||||||
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
|
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
|
||||||
match cv {
|
match cv {
|
||||||
Scalar::Int(int) => {
|
Scalar::Int(int) => {
|
||||||
let data = int.assert_bits(layout.size(self));
|
let data = int.to_bits(layout.size(self));
|
||||||
let llval = self.const_uint_big(self.type_ix(bitsize), data);
|
let llval = self.const_uint_big(self.type_ix(bitsize), data);
|
||||||
if matches!(layout.primitive(), Pointer(_)) {
|
if matches!(layout.primitive(), Pointer(_)) {
|
||||||
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
|
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
|
||||||
|
@ -1223,7 +1223,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
|
|||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(arg_idx, val)| {
|
.map(|(arg_idx, val)| {
|
||||||
let idx = val.unwrap_leaf().try_to_i32().unwrap();
|
let idx = val.unwrap_leaf().to_i32();
|
||||||
if idx >= i32::try_from(total_len).unwrap() {
|
if idx >= i32::try_from(total_len).unwrap() {
|
||||||
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
|
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
|
||||||
span,
|
span,
|
||||||
|
@ -163,7 +163,7 @@ pub fn asm_const_to_str<'tcx>(
|
|||||||
let mir::ConstValue::Scalar(scalar) = const_value else {
|
let mir::ConstValue::Scalar(scalar) = const_value else {
|
||||||
span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value)
|
span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value)
|
||||||
};
|
};
|
||||||
let value = scalar.assert_bits(ty_and_layout.size);
|
let value = scalar.assert_scalar_int().to_bits(ty_and_layout.size);
|
||||||
match ty_and_layout.ty.kind() {
|
match ty_and_layout.ty.kind() {
|
||||||
ty::Uint(_) => value.to_string(),
|
ty::Uint(_) => value.to_string(),
|
||||||
ty::Int(int_ty) => match int_ty.normalize(tcx.sess.target.pointer_width) {
|
ty::Int(int_ty) => match int_ty.normalize(tcx.sess.target.pointer_width) {
|
||||||
|
@ -95,10 +95,10 @@ fn const_to_valtree_inner<'tcx>(
|
|||||||
}
|
}
|
||||||
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
|
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
|
||||||
let val = ecx.read_immediate(place)?;
|
let val = ecx.read_immediate(place)?;
|
||||||
let val = val.to_scalar();
|
let val = val.to_scalar_int().unwrap();
|
||||||
*num_nodes += 1;
|
*num_nodes += 1;
|
||||||
|
|
||||||
Ok(ty::ValTree::Leaf(val.assert_int()))
|
Ok(ty::ValTree::Leaf(val))
|
||||||
}
|
}
|
||||||
|
|
||||||
ty::Pat(base, ..) => {
|
ty::Pat(base, ..) => {
|
||||||
@ -125,7 +125,7 @@ fn const_to_valtree_inner<'tcx>(
|
|||||||
let val = val.to_scalar();
|
let val = val.to_scalar();
|
||||||
// We are in the CTFE machine, so ptr-to-int casts will fail.
|
// We are in the CTFE machine, so ptr-to-int casts will fail.
|
||||||
// This can only be `Ok` if `val` already is an integer.
|
// This can only be `Ok` if `val` already is an integer.
|
||||||
let Ok(val) = val.try_to_int() else {
|
let Ok(val) = val.try_to_scalar_int() else {
|
||||||
return Err(ValTreeCreationError::NonSupportedType);
|
return Err(ValTreeCreationError::NonSupportedType);
|
||||||
};
|
};
|
||||||
// It's just a ScalarInt!
|
// It's just a ScalarInt!
|
||||||
@ -411,7 +411,7 @@ fn valtree_into_mplace<'tcx>(
|
|||||||
ty::Adt(def, _) if def.is_enum() => {
|
ty::Adt(def, _) if def.is_enum() => {
|
||||||
// First element of valtree corresponds to variant
|
// First element of valtree corresponds to variant
|
||||||
let scalar_int = branches[0].unwrap_leaf();
|
let scalar_int = branches[0].unwrap_leaf();
|
||||||
let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap());
|
let variant_idx = VariantIdx::from_u32(scalar_int.to_u32());
|
||||||
let variant = def.variant(variant_idx);
|
let variant = def.variant(variant_idx);
|
||||||
debug!(?variant);
|
debug!(?variant);
|
||||||
|
|
||||||
|
@ -123,14 +123,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
// (`tag_bits` itself is only used for error messages below.)
|
// (`tag_bits` itself is only used for error messages below.)
|
||||||
let tag_bits = tag_val
|
let tag_bits = tag_val
|
||||||
.to_scalar()
|
.to_scalar()
|
||||||
.try_to_int()
|
.try_to_scalar_int()
|
||||||
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
|
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
|
||||||
.assert_bits(tag_layout.size);
|
.to_bits(tag_layout.size);
|
||||||
// Cast bits from tag layout to discriminant layout.
|
// Cast bits from tag layout to discriminant layout.
|
||||||
// After the checks we did above, this cannot fail, as
|
// After the checks we did above, this cannot fail, as
|
||||||
// discriminants are int-like.
|
// discriminants are int-like.
|
||||||
let discr_val = self.int_to_int_or_float(&tag_val, discr_layout).unwrap();
|
let discr_val = self.int_to_int_or_float(&tag_val, discr_layout).unwrap();
|
||||||
let discr_bits = discr_val.to_scalar().assert_bits(discr_layout.size);
|
let discr_bits = discr_val.to_scalar().to_bits(discr_layout.size)?;
|
||||||
// Convert discriminant to variant index, and catch invalid discriminants.
|
// Convert discriminant to variant index, and catch invalid discriminants.
|
||||||
let index = match *ty.kind() {
|
let index = match *ty.kind() {
|
||||||
ty::Adt(adt, _) => {
|
ty::Adt(adt, _) => {
|
||||||
@ -152,7 +152,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
// discriminant (encoded in niche/tag) and variant index are the same.
|
// discriminant (encoded in niche/tag) and variant index are the same.
|
||||||
let variants_start = niche_variants.start().as_u32();
|
let variants_start = niche_variants.start().as_u32();
|
||||||
let variants_end = niche_variants.end().as_u32();
|
let variants_end = niche_variants.end().as_u32();
|
||||||
let variant = match tag_val.try_to_int() {
|
let variant = match tag_val.try_to_scalar_int() {
|
||||||
Err(dbg_val) => {
|
Err(dbg_val) => {
|
||||||
// So this is a pointer then, and casting to an int failed.
|
// So this is a pointer then, and casting to an int failed.
|
||||||
// Can only happen during CTFE.
|
// Can only happen during CTFE.
|
||||||
@ -167,7 +167,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
untagged_variant
|
untagged_variant
|
||||||
}
|
}
|
||||||
Ok(tag_bits) => {
|
Ok(tag_bits) => {
|
||||||
let tag_bits = tag_bits.assert_bits(tag_layout.size);
|
let tag_bits = tag_bits.to_bits(tag_layout.size);
|
||||||
// We need to use machine arithmetic to get the relative variant idx:
|
// We need to use machine arithmetic to get the relative variant idx:
|
||||||
// variant_index_relative = tag_val - niche_start_val
|
// variant_index_relative = tag_val - niche_start_val
|
||||||
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
|
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
|
||||||
@ -175,7 +175,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
let variant_index_relative_val =
|
let variant_index_relative_val =
|
||||||
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
|
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
|
||||||
let variant_index_relative =
|
let variant_index_relative =
|
||||||
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
|
variant_index_relative_val.to_scalar().to_bits(tag_val.layout.size)?;
|
||||||
// Check if this is in the range that indicates an actual discriminant.
|
// Check if this is in the range that indicates an actual discriminant.
|
||||||
if variant_index_relative <= u128::from(variants_end - variants_start) {
|
if variant_index_relative <= u128::from(variants_end - variants_start) {
|
||||||
let variant_index_relative = u32::try_from(variant_index_relative)
|
let variant_index_relative = u32::try_from(variant_index_relative)
|
||||||
@ -294,8 +294,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
ImmTy::from_uint(variant_index_relative, tag_layout);
|
ImmTy::from_uint(variant_index_relative, tag_layout);
|
||||||
let tag = self
|
let tag = self
|
||||||
.binary_op(mir::BinOp::Add, &variant_index_relative_val, &niche_start_val)?
|
.binary_op(mir::BinOp::Add, &variant_index_relative_val, &niche_start_val)?
|
||||||
.to_scalar()
|
.to_scalar_int()?;
|
||||||
.assert_int();
|
|
||||||
Ok(Some((tag, tag_field)))
|
Ok(Some((tag, tag_field)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -519,7 +519,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
|
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
|
||||||
// First, check x % y != 0 (or if that computation overflows).
|
// First, check x % y != 0 (or if that computation overflows).
|
||||||
let rem = self.binary_op(BinOp::Rem, a, b)?;
|
let rem = self.binary_op(BinOp::Rem, a, b)?;
|
||||||
if rem.to_scalar().assert_bits(a.layout.size) != 0 {
|
if rem.to_scalar().to_bits(a.layout.size)? != 0 {
|
||||||
throw_ub_custom!(
|
throw_ub_custom!(
|
||||||
fluent::const_eval_exact_div_has_remainder,
|
fluent::const_eval_exact_div_has_remainder,
|
||||||
a = format!("{a}"),
|
a = format!("{a}"),
|
||||||
|
@ -1344,7 +1344,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
/// Test if this value might be null.
|
/// Test if this value might be null.
|
||||||
/// If the machine does not support ptr-to-int casts, this is conservative.
|
/// If the machine does not support ptr-to-int casts, this is conservative.
|
||||||
pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
|
pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
|
||||||
Ok(match scalar.try_to_int() {
|
Ok(match scalar.try_to_scalar_int() {
|
||||||
Ok(int) => int.is_null(),
|
Ok(int) => int.is_null(),
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
// Can only happen during CTFE.
|
// Can only happen during CTFE.
|
||||||
|
@ -87,6 +87,12 @@ impl<Prov: Provenance> Immediate<Prov> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
|
||||||
|
pub fn to_scalar_int(self) -> ScalarInt {
|
||||||
|
self.to_scalar().try_to_scalar_int().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
|
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
|
||||||
pub fn to_scalar_pair(self) -> (Scalar<Prov>, Scalar<Prov>) {
|
pub fn to_scalar_pair(self) -> (Scalar<Prov>, Scalar<Prov>) {
|
||||||
@ -219,19 +225,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
|||||||
Self::from_scalar(Scalar::from(s), layout)
|
Self::from_scalar(Scalar::from(s), layout)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
|
|
||||||
Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
|
|
||||||
}
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
|
pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
|
||||||
Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
|
Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
|
|
||||||
Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
|
|
||||||
}
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
|
pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
|
||||||
Self::from_scalar(Scalar::from_int(i, layout.size), layout)
|
Self::from_scalar(Scalar::from_int(i, layout.size), layout)
|
||||||
@ -276,7 +274,8 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn to_const_int(self) -> ConstInt {
|
pub fn to_const_int(self) -> ConstInt {
|
||||||
assert!(self.layout.ty.is_integral());
|
assert!(self.layout.ty.is_integral());
|
||||||
let int = self.to_scalar().assert_int();
|
let int = self.imm.to_scalar_int();
|
||||||
|
assert_eq!(int.size(), self.layout.size);
|
||||||
ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
|
ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,10 +95,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
let l = left.to_scalar_int()?;
|
let l = left.to_scalar_int()?;
|
||||||
let r = right.to_scalar_int()?;
|
let r = right.to_scalar_int()?;
|
||||||
// Prepare to convert the values to signed or unsigned form.
|
// Prepare to convert the values to signed or unsigned form.
|
||||||
let l_signed = || l.assert_int(left.layout.size);
|
let l_signed = || l.to_int(left.layout.size);
|
||||||
let l_unsigned = || l.assert_uint(left.layout.size);
|
let l_unsigned = || l.to_uint(left.layout.size);
|
||||||
let r_signed = || r.assert_int(right.layout.size);
|
let r_signed = || r.to_int(right.layout.size);
|
||||||
let r_unsigned = || r.assert_uint(right.layout.size);
|
let r_unsigned = || r.to_uint(right.layout.size);
|
||||||
|
|
||||||
let throw_ub_on_overflow = match bin_op {
|
let throw_ub_on_overflow = match bin_op {
|
||||||
AddUnchecked => Some(sym::unchecked_add),
|
AddUnchecked => Some(sym::unchecked_add),
|
||||||
|
@ -653,8 +653,8 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||||||
let WrappingRange { start, end } = valid_range;
|
let WrappingRange { start, end } = valid_range;
|
||||||
let max_value = size.unsigned_int_max();
|
let max_value = size.unsigned_int_max();
|
||||||
assert!(end <= max_value);
|
assert!(end <= max_value);
|
||||||
let bits = match scalar.try_to_int() {
|
let bits = match scalar.try_to_scalar_int() {
|
||||||
Ok(int) => int.assert_bits(size),
|
Ok(int) => int.to_bits(size),
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
// So this is a pointer then, and casting to an int failed.
|
// So this is a pointer then, and casting to an int failed.
|
||||||
// Can only happen during CTFE.
|
// Can only happen during CTFE.
|
||||||
|
@ -2385,11 +2385,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
|||||||
min_len: u64,
|
min_len: u64,
|
||||||
) -> (Option<Ty<'tcx>>, Ty<'tcx>) {
|
) -> (Option<Ty<'tcx>>, Ty<'tcx>) {
|
||||||
let len = match len.eval(self.tcx, self.param_env, span) {
|
let len = match len.eval(self.tcx, self.param_env, span) {
|
||||||
// FIXME(BoxyUwU): Assert the `Ty` is a `usize`?
|
|
||||||
Ok((_, val)) => val
|
Ok((_, val)) => val
|
||||||
.try_to_scalar()
|
.try_to_scalar()
|
||||||
.and_then(|scalar| scalar.try_to_int().ok())
|
.and_then(|scalar| scalar.try_to_scalar_int().ok())
|
||||||
.and_then(|int| int.try_to_target_usize(self.tcx).ok()),
|
.map(|int| int.to_target_usize(self.tcx)),
|
||||||
Err(ErrorHandled::Reported(..)) => {
|
Err(ErrorHandled::Reported(..)) => {
|
||||||
let guar = self.error_scrutinee_unfixed_length(span);
|
let guar = self.error_scrutinee_unfixed_length(span);
|
||||||
return (Some(Ty::new_error(self.tcx, guar)), arr_ty);
|
return (Some(Ty::new_error(self.tcx, guar)), arr_ty);
|
||||||
|
@ -84,11 +84,11 @@ impl<'tcx> ConstValue<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
|
pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
|
||||||
self.try_to_scalar()?.try_to_int().ok()
|
self.try_to_scalar()?.try_to_scalar_int().ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_to_bits(&self, size: Size) -> Option<u128> {
|
pub fn try_to_bits(&self, size: Size) -> Option<u128> {
|
||||||
self.try_to_scalar_int()?.try_to_bits(size).ok()
|
Some(self.try_to_scalar_int()?.to_bits(size))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_to_bool(&self) -> Option<bool> {
|
pub fn try_to_bool(&self) -> Option<bool> {
|
||||||
@ -96,7 +96,7 @@ impl<'tcx> ConstValue<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_to_target_usize(&self, tcx: TyCtxt<'tcx>) -> Option<u64> {
|
pub fn try_to_target_usize(&self, tcx: TyCtxt<'tcx>) -> Option<u64> {
|
||||||
self.try_to_scalar_int()?.try_to_target_usize(tcx).ok()
|
Some(self.try_to_scalar_int()?.to_target_usize(tcx))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_to_bits_for_ty(
|
pub fn try_to_bits_for_ty(
|
||||||
@ -300,7 +300,7 @@ impl<'tcx> Const<'tcx> {
|
|||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_bits(self, size: Size) -> Option<u128> {
|
pub fn try_to_bits(self, size: Size) -> Option<u128> {
|
||||||
self.try_to_scalar_int()?.try_to_bits(size).ok()
|
Some(self.try_to_scalar_int()?.to_bits(size))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -367,7 +367,7 @@ impl<'tcx> Const<'tcx> {
|
|||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
param_env: ty::ParamEnv<'tcx>,
|
param_env: ty::ParamEnv<'tcx>,
|
||||||
) -> Option<ScalarInt> {
|
) -> Option<ScalarInt> {
|
||||||
self.try_eval_scalar(tcx, param_env)?.try_to_int().ok()
|
self.try_eval_scalar(tcx, param_env)?.try_to_scalar_int().ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -375,7 +375,7 @@ impl<'tcx> Const<'tcx> {
|
|||||||
let int = self.try_eval_scalar_int(tcx, param_env)?;
|
let int = self.try_eval_scalar_int(tcx, param_env)?;
|
||||||
let size =
|
let size =
|
||||||
tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(self.ty())).ok()?.size;
|
tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(self.ty())).ok()?.size;
|
||||||
int.try_to_bits(size).ok()
|
Some(int.to_bits(size))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
|
/// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
|
||||||
@ -391,7 +391,7 @@ impl<'tcx> Const<'tcx> {
|
|||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
param_env: ty::ParamEnv<'tcx>,
|
param_env: ty::ParamEnv<'tcx>,
|
||||||
) -> Option<u64> {
|
) -> Option<u64> {
|
||||||
self.try_eval_scalar_int(tcx, param_env)?.try_to_target_usize(tcx).ok()
|
Some(self.try_eval_scalar_int(tcx, param_env)?.to_target_usize(tcx))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -122,16 +122,12 @@ impl<Prov> Scalar<Prov> {
|
|||||||
Scalar::Int(c.into())
|
Scalar::Int(c.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
|
|
||||||
ScalarInt::try_from_uint(i, size).map(Scalar::Int)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_uint(i: impl Into<u128>, size: Size) -> Self {
|
pub fn from_uint(i: impl Into<u128>, size: Size) -> Self {
|
||||||
let i = i.into();
|
let i = i.into();
|
||||||
Self::try_from_uint(i, size)
|
ScalarInt::try_from_uint(i, size)
|
||||||
.unwrap_or_else(|| bug!("Unsigned value {:#x} does not fit in {} bits", i, size.bits()))
|
.unwrap_or_else(|| bug!("Unsigned value {:#x} does not fit in {} bits", i, size.bits()))
|
||||||
|
.into()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -164,16 +160,12 @@ impl<Prov> Scalar<Prov> {
|
|||||||
Self::from_uint(i, cx.data_layout().pointer_size)
|
Self::from_uint(i, cx.data_layout().pointer_size)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
|
|
||||||
ScalarInt::try_from_int(i, size).map(Scalar::Int)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_int(i: impl Into<i128>, size: Size) -> Self {
|
pub fn from_int(i: impl Into<i128>, size: Size) -> Self {
|
||||||
let i = i.into();
|
let i = i.into();
|
||||||
Self::try_from_int(i, size)
|
ScalarInt::try_from_int(i, size)
|
||||||
.unwrap_or_else(|| bug!("Signed value {:#x} does not fit in {} bits", i, size.bits()))
|
.unwrap_or_else(|| bug!("Signed value {:#x} does not fit in {} bits", i, size.bits()))
|
||||||
|
.into()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -227,7 +219,7 @@ impl<Prov> Scalar<Prov> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// This is almost certainly not the method you want! You should dispatch on the type
|
/// This is almost certainly not the method you want! You should dispatch on the type
|
||||||
/// and use `to_{u8,u16,...}`/`scalar_to_ptr` to perform ptr-to-int / int-to-ptr casts as needed.
|
/// and use `to_{u8,u16,...}`/`to_pointer` to perform ptr-to-int / int-to-ptr casts as needed.
|
||||||
///
|
///
|
||||||
/// This method only exists for the benefit of low-level operations that truly need to treat the
|
/// This method only exists for the benefit of low-level operations that truly need to treat the
|
||||||
/// scalar in whatever form it is.
|
/// scalar in whatever form it is.
|
||||||
@ -289,7 +281,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
|
|||||||
/// The error type is `AllocId`, not `CtfeProvenance`, since `AllocId` is the "minimal"
|
/// The error type is `AllocId`, not `CtfeProvenance`, since `AllocId` is the "minimal"
|
||||||
/// component all provenance types must have.
|
/// component all provenance types must have.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_int(self) -> Result<ScalarInt, Scalar<AllocId>> {
|
pub fn try_to_scalar_int(self) -> Result<ScalarInt, Scalar<AllocId>> {
|
||||||
match self {
|
match self {
|
||||||
Scalar::Int(int) => Ok(int),
|
Scalar::Int(int) => Ok(int),
|
||||||
Scalar::Ptr(ptr, sz) => {
|
Scalar::Ptr(ptr, sz) => {
|
||||||
@ -307,13 +299,13 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
|
|||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn to_scalar_int(self) -> InterpResult<'tcx, ScalarInt> {
|
pub fn to_scalar_int(self) -> InterpResult<'tcx, ScalarInt> {
|
||||||
self.try_to_int().map_err(|_| err_unsup!(ReadPointerAsInt(None)).into())
|
self.try_to_scalar_int().map_err(|_| err_unsup!(ReadPointerAsInt(None)).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
|
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
|
||||||
pub fn assert_int(self) -> ScalarInt {
|
pub fn assert_scalar_int(self) -> ScalarInt {
|
||||||
self.try_to_int().unwrap()
|
self.try_to_scalar_int().expect("got a pointer where a ScalarInt was expected")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This throws UB (instead of ICEing) on a size mismatch since size mismatches can arise in
|
/// This throws UB (instead of ICEing) on a size mismatch since size mismatches can arise in
|
||||||
@ -330,13 +322,6 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
|
|
||||||
pub fn assert_bits(self, target_size: Size) -> u128 {
|
|
||||||
self.to_bits(target_size)
|
|
||||||
.unwrap_or_else(|_| panic!("assertion failed: {self:?} fits {target_size:?}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn to_bool(self) -> InterpResult<'tcx, bool> {
|
pub fn to_bool(self) -> InterpResult<'tcx, bool> {
|
||||||
let val = self.to_u8()?;
|
let val = self.to_u8()?;
|
||||||
match val {
|
match val {
|
||||||
|
@ -1033,8 +1033,8 @@ impl<'tcx> PatRangeBoundary<'tcx> {
|
|||||||
if let (Some(a), Some(b)) = (a.try_to_scalar_int(), b.try_to_scalar_int()) {
|
if let (Some(a), Some(b)) = (a.try_to_scalar_int(), b.try_to_scalar_int()) {
|
||||||
let sz = ty.primitive_size(tcx);
|
let sz = ty.primitive_size(tcx);
|
||||||
let cmp = match ty.kind() {
|
let cmp = match ty.kind() {
|
||||||
ty::Uint(_) | ty::Char => a.assert_uint(sz).cmp(&b.assert_uint(sz)),
|
ty::Uint(_) | ty::Char => a.to_uint(sz).cmp(&b.to_uint(sz)),
|
||||||
ty::Int(_) => a.assert_int(sz).cmp(&b.assert_int(sz)),
|
ty::Int(_) => a.to_int(sz).cmp(&b.to_int(sz)),
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
return Some(cmp);
|
return Some(cmp);
|
||||||
|
@ -376,7 +376,7 @@ impl<'tcx> Const<'tcx> {
|
|||||||
param_env: ParamEnv<'tcx>,
|
param_env: ParamEnv<'tcx>,
|
||||||
) -> Option<(Ty<'tcx>, ScalarInt)> {
|
) -> Option<(Ty<'tcx>, ScalarInt)> {
|
||||||
let (ty, scalar) = self.try_eval_scalar(tcx, param_env)?;
|
let (ty, scalar) = self.try_eval_scalar(tcx, param_env)?;
|
||||||
let val = scalar.try_to_int().ok()?;
|
let val = scalar.try_to_scalar_int().ok()?;
|
||||||
Some((ty, val))
|
Some((ty, val))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -388,7 +388,7 @@ impl<'tcx> Const<'tcx> {
|
|||||||
let (ty, scalar) = self.try_eval_scalar_int(tcx, param_env)?;
|
let (ty, scalar) = self.try_eval_scalar_int(tcx, param_env)?;
|
||||||
let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
|
let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
|
||||||
// if `ty` does not depend on generic parameters, use an empty param_env
|
// if `ty` does not depend on generic parameters, use an empty param_env
|
||||||
scalar.try_to_bits(size).ok()
|
Some(scalar.to_bits(size))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -405,7 +405,7 @@ impl<'tcx> Const<'tcx> {
|
|||||||
param_env: ParamEnv<'tcx>,
|
param_env: ParamEnv<'tcx>,
|
||||||
) -> Option<u64> {
|
) -> Option<u64> {
|
||||||
let (_, scalar) = self.try_eval_scalar_int(tcx, param_env)?;
|
let (_, scalar) = self.try_eval_scalar_int(tcx, param_env)?;
|
||||||
scalar.try_to_target_usize(tcx).ok()
|
Some(scalar.to_target_usize(tcx))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -246,6 +246,10 @@ impl ScalarInt {
|
|||||||
Self::try_from_uint(i, tcx.data_layout.pointer_size)
|
Self::try_from_uint(i, tcx.data_layout.pointer_size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Try to convert this ScalarInt to the raw underlying bits.
|
||||||
|
/// Fails if the size is wrong. Generally a wrong size should lead to a panic,
|
||||||
|
/// but Miri sometimes wants to be resilient to size mismatches,
|
||||||
|
/// so the interpreter will generally use this `try` method.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_bits(self, target_size: Size) -> Result<u128, Size> {
|
pub fn try_to_bits(self, target_size: Size) -> Result<u128, Size> {
|
||||||
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
|
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
|
||||||
@ -258,165 +262,149 @@ impl ScalarInt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn assert_bits(self, target_size: Size) -> u128 {
|
pub fn to_bits(self, target_size: Size) -> u128 {
|
||||||
self.try_to_bits(target_size).unwrap_or_else(|size| {
|
self.try_to_bits(target_size).unwrap_or_else(|size| {
|
||||||
bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes())
|
bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to convert the `ScalarInt` to an unsigned integer of the given size.
|
/// Extracts the bits from the scalar without checking the size.
|
||||||
/// Fails if the size of the `ScalarInt` is not equal to `size` and returns the
|
|
||||||
/// `ScalarInt`s size in that case.
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_uint(self, size: Size) -> Result<u128, Size> {
|
pub fn to_bits_unchecked(self) -> u128 {
|
||||||
self.try_to_bits(size)
|
self.check_data();
|
||||||
|
self.data
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the `ScalarInt` to an unsigned integer of the given size.
|
||||||
|
/// Panics if the size of the `ScalarInt` is not equal to `size`.
|
||||||
|
#[inline]
|
||||||
|
pub fn to_uint(self, size: Size) -> u128 {
|
||||||
|
self.to_bits(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the `ScalarInt` to `u8`.
|
||||||
|
/// Panics if the `size` of the `ScalarInt`in not equal to 1 byte.
|
||||||
|
#[inline]
|
||||||
|
pub fn to_u8(self) -> u8 {
|
||||||
|
self.to_uint(Size::from_bits(8)).try_into().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the `ScalarInt` to `u16`.
|
||||||
|
/// Panics if the size of the `ScalarInt` in not equal to 2 bytes.
|
||||||
|
#[inline]
|
||||||
|
pub fn to_u16(self) -> u16 {
|
||||||
|
self.to_uint(Size::from_bits(16)).try_into().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the `ScalarInt` to `u32`.
|
||||||
|
/// Panics if the `size` of the `ScalarInt` in not equal to 4 bytes.
|
||||||
|
#[inline]
|
||||||
|
pub fn to_u32(self) -> u32 {
|
||||||
|
self.to_uint(Size::from_bits(32)).try_into().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the `ScalarInt` to `u64`.
|
||||||
|
/// Panics if the `size` of the `ScalarInt` in not equal to 8 bytes.
|
||||||
|
#[inline]
|
||||||
|
pub fn to_u64(self) -> u64 {
|
||||||
|
self.to_uint(Size::from_bits(64)).try_into().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the `ScalarInt` to `u128`.
|
||||||
|
/// Panics if the `size` of the `ScalarInt` in not equal to 16 bytes.
|
||||||
|
#[inline]
|
||||||
|
pub fn to_u128(self) -> u128 {
|
||||||
|
self.to_uint(Size::from_bits(128))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn assert_uint(self, size: Size) -> u128 {
|
pub fn to_target_usize(&self, tcx: TyCtxt<'_>) -> u64 {
|
||||||
self.assert_bits(size)
|
self.to_uint(tcx.data_layout.pointer_size).try_into().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tries to convert the `ScalarInt` to `u8`. Fails if the `size` of the `ScalarInt`
|
/// Converts the `ScalarInt` to `bool`.
|
||||||
// in not equal to 1 byte and returns the `size` value of the `ScalarInt` in
|
/// Panics if the `size` of the `ScalarInt` is not equal to 1 byte.
|
||||||
// that case.
|
/// Errors if it is not a valid `bool`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_u8(self) -> Result<u8, Size> {
|
pub fn try_to_bool(self) -> Result<bool, ()> {
|
||||||
self.try_to_uint(Size::from_bits(8)).map(|v| u8::try_from(v).unwrap())
|
match self.to_u8() {
|
||||||
}
|
|
||||||
|
|
||||||
/// Tries to convert the `ScalarInt` to `u16`. Fails if the size of the `ScalarInt`
|
|
||||||
/// in not equal to 2 bytes and returns the `size` value of the `ScalarInt` in
|
|
||||||
/// that case.
|
|
||||||
#[inline]
|
|
||||||
pub fn try_to_u16(self) -> Result<u16, Size> {
|
|
||||||
self.try_to_uint(Size::from_bits(16)).map(|v| u16::try_from(v).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Tries to convert the `ScalarInt` to `u32`. Fails if the `size` of the `ScalarInt`
|
|
||||||
/// in not equal to 4 bytes and returns the `size` value of the `ScalarInt` in
|
|
||||||
/// that case.
|
|
||||||
#[inline]
|
|
||||||
pub fn try_to_u32(self) -> Result<u32, Size> {
|
|
||||||
self.try_to_uint(Size::from_bits(32)).map(|v| u32::try_from(v).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Tries to convert the `ScalarInt` to `u64`. Fails if the `size` of the `ScalarInt`
|
|
||||||
/// in not equal to 8 bytes and returns the `size` value of the `ScalarInt` in
|
|
||||||
/// that case.
|
|
||||||
#[inline]
|
|
||||||
pub fn try_to_u64(self) -> Result<u64, Size> {
|
|
||||||
self.try_to_uint(Size::from_bits(64)).map(|v| u64::try_from(v).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Tries to convert the `ScalarInt` to `u128`. Fails if the `size` of the `ScalarInt`
|
|
||||||
/// in not equal to 16 bytes and returns the `size` value of the `ScalarInt` in
|
|
||||||
/// that case.
|
|
||||||
#[inline]
|
|
||||||
pub fn try_to_u128(self) -> Result<u128, Size> {
|
|
||||||
self.try_to_uint(Size::from_bits(128))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn try_to_target_usize(&self, tcx: TyCtxt<'_>) -> Result<u64, Size> {
|
|
||||||
self.try_to_uint(tcx.data_layout.pointer_size).map(|v| u64::try_from(v).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tries to convert the `ScalarInt` to `bool`. Fails if the `size` of the `ScalarInt`
|
|
||||||
// in not equal to 1 byte or if the value is not 0 or 1 and returns the `size`
|
|
||||||
// value of the `ScalarInt` in that case.
|
|
||||||
#[inline]
|
|
||||||
pub fn try_to_bool(self) -> Result<bool, Size> {
|
|
||||||
match self.try_to_u8()? {
|
|
||||||
0 => Ok(false),
|
0 => Ok(false),
|
||||||
1 => Ok(true),
|
1 => Ok(true),
|
||||||
_ => Err(self.size()),
|
_ => Err(()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to convert the `ScalarInt` to a signed integer of the given size.
|
/// Converts the `ScalarInt` to a signed integer of the given size.
|
||||||
/// Fails if the size of the `ScalarInt` is not equal to `size` and returns the
|
/// Panics if the size of the `ScalarInt` is not equal to `size`.
|
||||||
/// `ScalarInt`s size in that case.
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_int(self, size: Size) -> Result<i128, Size> {
|
pub fn to_int(self, size: Size) -> i128 {
|
||||||
let b = self.try_to_bits(size)?;
|
let b = self.to_bits(size);
|
||||||
Ok(size.sign_extend(b) as i128)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn assert_int(self, size: Size) -> i128 {
|
|
||||||
let b = self.assert_bits(size);
|
|
||||||
size.sign_extend(b) as i128
|
size.sign_extend(b) as i128
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to convert the `ScalarInt` to i8.
|
/// Converts the `ScalarInt` to i8.
|
||||||
/// Fails if the size of the `ScalarInt` is not equal to 1 byte
|
/// Panics if the size of the `ScalarInt` is not equal to 1 byte.
|
||||||
/// and returns the `ScalarInt`s size in that case.
|
pub fn to_i8(self) -> i8 {
|
||||||
pub fn try_to_i8(self) -> Result<i8, Size> {
|
self.to_int(Size::from_bits(8)).try_into().unwrap()
|
||||||
self.try_to_int(Size::from_bits(8)).map(|v| i8::try_from(v).unwrap())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to convert the `ScalarInt` to i16.
|
/// Converts the `ScalarInt` to i16.
|
||||||
/// Fails if the size of the `ScalarInt` is not equal to 2 bytes
|
/// Panics if the size of the `ScalarInt` is not equal to 2 bytes.
|
||||||
/// and returns the `ScalarInt`s size in that case.
|
pub fn to_i16(self) -> i16 {
|
||||||
pub fn try_to_i16(self) -> Result<i16, Size> {
|
self.to_int(Size::from_bits(16)).try_into().unwrap()
|
||||||
self.try_to_int(Size::from_bits(16)).map(|v| i16::try_from(v).unwrap())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to convert the `ScalarInt` to i32.
|
/// Converts the `ScalarInt` to i32.
|
||||||
/// Fails if the size of the `ScalarInt` is not equal to 4 bytes
|
/// Panics if the size of the `ScalarInt` is not equal to 4 bytes.
|
||||||
/// and returns the `ScalarInt`s size in that case.
|
pub fn to_i32(self) -> i32 {
|
||||||
pub fn try_to_i32(self) -> Result<i32, Size> {
|
self.to_int(Size::from_bits(32)).try_into().unwrap()
|
||||||
self.try_to_int(Size::from_bits(32)).map(|v| i32::try_from(v).unwrap())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to convert the `ScalarInt` to i64.
|
/// Converts the `ScalarInt` to i64.
|
||||||
/// Fails if the size of the `ScalarInt` is not equal to 8 bytes
|
/// Panics if the size of the `ScalarInt` is not equal to 8 bytes.
|
||||||
/// and returns the `ScalarInt`s size in that case.
|
pub fn to_i64(self) -> i64 {
|
||||||
pub fn try_to_i64(self) -> Result<i64, Size> {
|
self.to_int(Size::from_bits(64)).try_into().unwrap()
|
||||||
self.try_to_int(Size::from_bits(64)).map(|v| i64::try_from(v).unwrap())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to convert the `ScalarInt` to i128.
|
/// Converts the `ScalarInt` to i128.
|
||||||
/// Fails if the size of the `ScalarInt` is not equal to 16 bytes
|
/// Panics if the size of the `ScalarInt` is not equal to 16 bytes.
|
||||||
/// and returns the `ScalarInt`s size in that case.
|
pub fn to_i128(self) -> i128 {
|
||||||
pub fn try_to_i128(self) -> Result<i128, Size> {
|
self.to_int(Size::from_bits(128))
|
||||||
self.try_to_int(Size::from_bits(128))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_target_isize(&self, tcx: TyCtxt<'_>) -> Result<i64, Size> {
|
pub fn to_target_isize(&self, tcx: TyCtxt<'_>) -> i64 {
|
||||||
self.try_to_int(tcx.data_layout.pointer_size).map(|v| i64::try_from(v).unwrap())
|
self.to_int(tcx.data_layout.pointer_size).try_into().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_float<F: Float>(self) -> Result<F, Size> {
|
pub fn to_float<F: Float>(self) -> F {
|
||||||
// Going through `to_uint` to check size and truncation.
|
// Going through `to_uint` to check size and truncation.
|
||||||
Ok(F::from_bits(self.try_to_bits(Size::from_bits(F::BITS))?))
|
F::from_bits(self.to_bits(Size::from_bits(F::BITS)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_f16(self) -> Result<Half, Size> {
|
pub fn to_f16(self) -> Half {
|
||||||
self.try_to_float()
|
self.to_float()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_f32(self) -> Result<Single, Size> {
|
pub fn to_f32(self) -> Single {
|
||||||
self.try_to_float()
|
self.to_float()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_f64(self) -> Result<Double, Size> {
|
pub fn to_f64(self) -> Double {
|
||||||
self.try_to_float()
|
self.to_float()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_to_f128(self) -> Result<Quad, Size> {
|
pub fn to_f128(self) -> Quad {
|
||||||
self.try_to_float()
|
self.to_float()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! from {
|
macro_rules! from_x_for_scalar_int {
|
||||||
($($ty:ty),*) => {
|
($($ty:ty),*) => {
|
||||||
$(
|
$(
|
||||||
impl From<$ty> for ScalarInt {
|
impl From<$ty> for ScalarInt {
|
||||||
@ -432,30 +420,29 @@ macro_rules! from {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! try_from {
|
macro_rules! from_scalar_int_for_x {
|
||||||
($($ty:ty),*) => {
|
($($ty:ty),*) => {
|
||||||
$(
|
$(
|
||||||
impl TryFrom<ScalarInt> for $ty {
|
impl From<ScalarInt> for $ty {
|
||||||
type Error = Size;
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn try_from(int: ScalarInt) -> Result<Self, Size> {
|
fn from(int: ScalarInt) -> Self {
|
||||||
// The `unwrap` cannot fail because to_bits (if it succeeds)
|
// The `unwrap` cannot fail because to_bits (if it succeeds)
|
||||||
// is guaranteed to return a value that fits into the size.
|
// is guaranteed to return a value that fits into the size.
|
||||||
int.try_to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
|
int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
|
||||||
.map(|u| u.try_into().unwrap())
|
.try_into().unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
from!(u8, u16, u32, u64, u128, bool);
|
from_x_for_scalar_int!(u8, u16, u32, u64, u128, bool);
|
||||||
try_from!(u8, u16, u32, u64, u128);
|
from_scalar_int_for_x!(u8, u16, u32, u64, u128);
|
||||||
|
|
||||||
impl TryFrom<ScalarInt> for bool {
|
impl TryFrom<ScalarInt> for bool {
|
||||||
type Error = Size;
|
type Error = ();
|
||||||
#[inline]
|
#[inline]
|
||||||
fn try_from(int: ScalarInt) -> Result<Self, Size> {
|
fn try_from(int: ScalarInt) -> Result<Self, ()> {
|
||||||
int.try_to_bool()
|
int.try_to_bool()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -463,7 +450,7 @@ impl TryFrom<ScalarInt> for bool {
|
|||||||
impl From<char> for ScalarInt {
|
impl From<char> for ScalarInt {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(c: char) -> Self {
|
fn from(c: char) -> Self {
|
||||||
Self { data: c as u128, size: NonZero::new(std::mem::size_of::<char>() as u8).unwrap() }
|
(c as u32).into()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -476,10 +463,7 @@ impl TryFrom<ScalarInt> for char {
|
|||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn try_from(int: ScalarInt) -> Result<Self, Self::Error> {
|
fn try_from(int: ScalarInt) -> Result<Self, Self::Error> {
|
||||||
let Ok(bits) = int.try_to_bits(Size::from_bytes(std::mem::size_of::<char>())) else {
|
match char::from_u32(int.to_u32()) {
|
||||||
return Err(CharTryFromScalarInt);
|
|
||||||
};
|
|
||||||
match char::from_u32(bits.try_into().unwrap()) {
|
|
||||||
Some(c) => Ok(c),
|
Some(c) => Ok(c),
|
||||||
None => Err(CharTryFromScalarInt),
|
None => Err(CharTryFromScalarInt),
|
||||||
}
|
}
|
||||||
@ -494,11 +478,10 @@ impl From<Half> for ScalarInt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<ScalarInt> for Half {
|
impl From<ScalarInt> for Half {
|
||||||
type Error = Size;
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn try_from(int: ScalarInt) -> Result<Self, Size> {
|
fn from(int: ScalarInt) -> Self {
|
||||||
int.try_to_bits(Size::from_bytes(2)).map(Self::from_bits)
|
Self::from_bits(int.to_bits(Size::from_bytes(2)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -510,11 +493,10 @@ impl From<Single> for ScalarInt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<ScalarInt> for Single {
|
impl From<ScalarInt> for Single {
|
||||||
type Error = Size;
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn try_from(int: ScalarInt) -> Result<Self, Size> {
|
fn from(int: ScalarInt) -> Self {
|
||||||
int.try_to_bits(Size::from_bytes(4)).map(Self::from_bits)
|
Self::from_bits(int.to_bits(Size::from_bytes(4)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -526,11 +508,10 @@ impl From<Double> for ScalarInt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<ScalarInt> for Double {
|
impl From<ScalarInt> for Double {
|
||||||
type Error = Size;
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn try_from(int: ScalarInt) -> Result<Self, Size> {
|
fn from(int: ScalarInt) -> Self {
|
||||||
int.try_to_bits(Size::from_bytes(8)).map(Self::from_bits)
|
Self::from_bits(int.to_bits(Size::from_bytes(8)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -542,11 +523,10 @@ impl From<Quad> for ScalarInt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<ScalarInt> for Quad {
|
impl From<ScalarInt> for Quad {
|
||||||
type Error = Size;
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn try_from(int: ScalarInt) -> Result<Self, Size> {
|
fn from(int: ScalarInt) -> Self {
|
||||||
int.try_to_bits(Size::from_bytes(16)).map(Self::from_bits)
|
Self::from_bits(int.to_bits(Size::from_bytes(16)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ impl<'tcx> ValTree<'tcx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_to_target_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> {
|
pub fn try_to_target_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> {
|
||||||
self.try_to_scalar_int().and_then(|s| s.try_to_target_usize(tcx).ok())
|
self.try_to_scalar_int().map(|s| s.to_target_usize(tcx))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the values inside the ValTree as a slice of bytes. This only works for
|
/// Get the values inside the ValTree as a slice of bytes. This only works for
|
||||||
@ -100,8 +100,9 @@ impl<'tcx> ValTree<'tcx> {
|
|||||||
_ => return None,
|
_ => return None,
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(tcx.arena.alloc_from_iter(
|
Some(
|
||||||
self.unwrap_branch().into_iter().map(|v| v.unwrap_leaf().try_to_u8().unwrap()),
|
tcx.arena
|
||||||
))
|
.alloc_from_iter(self.unwrap_branch().into_iter().map(|v| v.unwrap_leaf().to_u8())),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1652,7 +1652,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
|
|||||||
if let ty::ConstKind::Value(_, ty::ValTree::Leaf(int)) = len.kind() {
|
if let ty::ConstKind::Value(_, ty::ValTree::Leaf(int)) = len.kind() {
|
||||||
match self.tcx().try_get_global_alloc(prov.alloc_id()) {
|
match self.tcx().try_get_global_alloc(prov.alloc_id()) {
|
||||||
Some(GlobalAlloc::Memory(alloc)) => {
|
Some(GlobalAlloc::Memory(alloc)) => {
|
||||||
let len = int.assert_bits(self.tcx().data_layout.pointer_size);
|
let len = int.to_bits(self.tcx().data_layout.pointer_size);
|
||||||
let range =
|
let range =
|
||||||
AllocRange { start: offset, size: Size::from_bytes(len) };
|
AllocRange { start: offset, size: Size::from_bytes(len) };
|
||||||
if let Ok(byte_str) =
|
if let Ok(byte_str) =
|
||||||
@ -1730,7 +1730,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
|
|||||||
}
|
}
|
||||||
// Pointer types
|
// Pointer types
|
||||||
ty::Ref(..) | ty::RawPtr(_, _) | ty::FnPtr(_) => {
|
ty::Ref(..) | ty::RawPtr(_, _) | ty::FnPtr(_) => {
|
||||||
let data = int.assert_bits(self.tcx().data_layout.pointer_size);
|
let data = int.to_bits(self.tcx().data_layout.pointer_size);
|
||||||
self.typed_value(
|
self.typed_value(
|
||||||
|this| {
|
|this| {
|
||||||
write!(this, "0x{data:x}")?;
|
write!(this, "0x{data:x}")?;
|
||||||
|
@ -15,11 +15,10 @@ use rustc_index::{Idx, IndexSlice, IndexVec};
|
|||||||
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
|
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
|
||||||
use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
|
use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
|
||||||
use rustc_middle::middle::region;
|
use rustc_middle::middle::region;
|
||||||
use rustc_middle::mir::interpret::Scalar;
|
|
||||||
use rustc_middle::mir::*;
|
use rustc_middle::mir::*;
|
||||||
use rustc_middle::query::TyCtxtAt;
|
use rustc_middle::query::TyCtxtAt;
|
||||||
use rustc_middle::thir::{self, ExprId, LintLevel, LocalVarId, Param, ParamId, PatKind, Thir};
|
use rustc_middle::thir::{self, ExprId, LintLevel, LocalVarId, Param, ParamId, PatKind, Thir};
|
||||||
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt};
|
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt, TypeVisitableExt};
|
||||||
use rustc_middle::{bug, span_bug};
|
use rustc_middle::{bug, span_bug};
|
||||||
use rustc_span::symbol::sym;
|
use rustc_span::symbol::sym;
|
||||||
use rustc_span::Span;
|
use rustc_span::Span;
|
||||||
@ -1014,14 +1013,14 @@ fn parse_float_into_constval<'tcx>(
|
|||||||
float_ty: ty::FloatTy,
|
float_ty: ty::FloatTy,
|
||||||
neg: bool,
|
neg: bool,
|
||||||
) -> Option<ConstValue<'tcx>> {
|
) -> Option<ConstValue<'tcx>> {
|
||||||
parse_float_into_scalar(num, float_ty, neg).map(ConstValue::Scalar)
|
parse_float_into_scalar(num, float_ty, neg).map(|s| ConstValue::Scalar(s.into()))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn parse_float_into_scalar(
|
pub(crate) fn parse_float_into_scalar(
|
||||||
num: Symbol,
|
num: Symbol,
|
||||||
float_ty: ty::FloatTy,
|
float_ty: ty::FloatTy,
|
||||||
neg: bool,
|
neg: bool,
|
||||||
) -> Option<Scalar> {
|
) -> Option<ScalarInt> {
|
||||||
let num = num.as_str();
|
let num = num.as_str();
|
||||||
match float_ty {
|
match float_ty {
|
||||||
// FIXME(f16_f128): When available, compare to the library parser as with `f32` and `f64`
|
// FIXME(f16_f128): When available, compare to the library parser as with `f32` and `f64`
|
||||||
@ -1030,7 +1029,7 @@ pub(crate) fn parse_float_into_scalar(
|
|||||||
if neg {
|
if neg {
|
||||||
f = -f;
|
f = -f;
|
||||||
}
|
}
|
||||||
Some(Scalar::from_f16(f))
|
Some(ScalarInt::from(f))
|
||||||
}
|
}
|
||||||
ty::FloatTy::F32 => {
|
ty::FloatTy::F32 => {
|
||||||
let Ok(rust_f) = num.parse::<f32>() else { return None };
|
let Ok(rust_f) = num.parse::<f32>() else { return None };
|
||||||
@ -1053,7 +1052,7 @@ pub(crate) fn parse_float_into_scalar(
|
|||||||
f = -f;
|
f = -f;
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(Scalar::from_f32(f))
|
Some(ScalarInt::from(f))
|
||||||
}
|
}
|
||||||
ty::FloatTy::F64 => {
|
ty::FloatTy::F64 => {
|
||||||
let Ok(rust_f) = num.parse::<f64>() else { return None };
|
let Ok(rust_f) = num.parse::<f64>() else { return None };
|
||||||
@ -1076,7 +1075,7 @@ pub(crate) fn parse_float_into_scalar(
|
|||||||
f = -f;
|
f = -f;
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(Scalar::from_f64(f))
|
Some(ScalarInt::from(f))
|
||||||
}
|
}
|
||||||
// FIXME(f16_f128): When available, compare to the library parser as with `f32` and `f64`
|
// FIXME(f16_f128): When available, compare to the library parser as with `f32` and `f64`
|
||||||
ty::FloatTy::F128 => {
|
ty::FloatTy::F128 => {
|
||||||
@ -1084,7 +1083,7 @@ pub(crate) fn parse_float_into_scalar(
|
|||||||
if neg {
|
if neg {
|
||||||
f = -f;
|
f = -f;
|
||||||
}
|
}
|
||||||
Some(Scalar::from_f128(f))
|
Some(ScalarInt::from(f))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -58,11 +58,9 @@ pub(crate) fn lit_to_const<'tcx>(
|
|||||||
}
|
}
|
||||||
(ast::LitKind::Bool(b), ty::Bool) => ty::ValTree::from_scalar_int((*b).into()),
|
(ast::LitKind::Bool(b), ty::Bool) => ty::ValTree::from_scalar_int((*b).into()),
|
||||||
(ast::LitKind::Float(n, _), ty::Float(fty)) => {
|
(ast::LitKind::Float(n, _), ty::Float(fty)) => {
|
||||||
let bits = parse_float_into_scalar(*n, *fty, neg)
|
let bits = parse_float_into_scalar(*n, *fty, neg).ok_or_else(|| {
|
||||||
.ok_or_else(|| {
|
tcx.dcx().bug(format!("couldn't parse float literal: {:?}", lit_input.lit))
|
||||||
tcx.dcx().bug(format!("couldn't parse float literal: {:?}", lit_input.lit))
|
})?;
|
||||||
})?
|
|
||||||
.assert_int();
|
|
||||||
ty::ValTree::from_scalar_int(bits)
|
ty::ValTree::from_scalar_int(bits)
|
||||||
}
|
}
|
||||||
(ast::LitKind::Char(c), ty::Char) => ty::ValTree::from_scalar_int((*c).into()),
|
(ast::LitKind::Char(c), ty::Char) => ty::ValTree::from_scalar_int((*c).into()),
|
||||||
|
@ -282,8 +282,7 @@ impl<'tcx> ConstToPat<'tcx> {
|
|||||||
}
|
}
|
||||||
ty::Adt(adt_def, args) if adt_def.is_enum() => {
|
ty::Adt(adt_def, args) if adt_def.is_enum() => {
|
||||||
let (&variant_index, fields) = cv.unwrap_branch().split_first().unwrap();
|
let (&variant_index, fields) = cv.unwrap_branch().split_first().unwrap();
|
||||||
let variant_index =
|
let variant_index = VariantIdx::from_u32(variant_index.unwrap_leaf().to_u32());
|
||||||
VariantIdx::from_u32(variant_index.unwrap_leaf().try_to_u32().ok().unwrap());
|
|
||||||
PatKind::Variant {
|
PatKind::Variant {
|
||||||
adt_def: *adt_def,
|
adt_def: *adt_def,
|
||||||
args,
|
args,
|
||||||
@ -371,8 +370,8 @@ impl<'tcx> ConstToPat<'tcx> {
|
|||||||
let v = cv.unwrap_leaf();
|
let v = cv.unwrap_leaf();
|
||||||
let is_nan = match flt {
|
let is_nan = match flt {
|
||||||
ty::FloatTy::F16 => unimplemented!("f16_f128"),
|
ty::FloatTy::F16 => unimplemented!("f16_f128"),
|
||||||
ty::FloatTy::F32 => v.try_to_f32().unwrap().is_nan(),
|
ty::FloatTy::F32 => v.to_f32().is_nan(),
|
||||||
ty::FloatTy::F64 => v.try_to_f64().unwrap().is_nan(),
|
ty::FloatTy::F64 => v.to_f64().is_nan(),
|
||||||
ty::FloatTy::F128 => unimplemented!("f16_f128"),
|
ty::FloatTy::F128 => unimplemented!("f16_f128"),
|
||||||
};
|
};
|
||||||
if is_nan {
|
if is_nan {
|
||||||
|
@ -326,7 +326,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
|
|||||||
// This allows the set of visited edges to grow monotonically with the lattice.
|
// This allows the set of visited edges to grow monotonically with the lattice.
|
||||||
FlatSet::Bottom => TerminatorEdges::None,
|
FlatSet::Bottom => TerminatorEdges::None,
|
||||||
FlatSet::Elem(scalar) => {
|
FlatSet::Elem(scalar) => {
|
||||||
let choice = scalar.assert_bits(scalar.size());
|
let choice = scalar.assert_scalar_int().to_bits_unchecked();
|
||||||
TerminatorEdges::Single(targets.target_for_value(choice))
|
TerminatorEdges::Single(targets.target_for_value(choice))
|
||||||
}
|
}
|
||||||
FlatSet::Top => TerminatorEdges::SwitchInt { discr, targets },
|
FlatSet::Top => TerminatorEdges::SwitchInt { discr, targets },
|
||||||
@ -609,7 +609,7 @@ fn propagatable_scalar(
|
|||||||
map: &Map,
|
map: &Map,
|
||||||
) -> Option<Scalar> {
|
) -> Option<Scalar> {
|
||||||
if let FlatSet::Elem(value) = state.get_idx(place, map)
|
if let FlatSet::Elem(value) = state.get_idx(place, map)
|
||||||
&& value.try_to_int().is_ok()
|
&& value.try_to_scalar_int().is_ok()
|
||||||
{
|
{
|
||||||
// Do not attempt to propagate pointers, as we may fail to preserve their identity.
|
// Do not attempt to propagate pointers, as we may fail to preserve their identity.
|
||||||
Some(value)
|
Some(value)
|
||||||
@ -670,7 +670,7 @@ fn try_write_constant<'tcx>(
|
|||||||
let FlatSet::Elem(Scalar::Int(discr)) = state.get_idx(discr, map) else {
|
let FlatSet::Elem(Scalar::Int(discr)) = state.get_idx(discr, map) else {
|
||||||
throw_machine_stop_str!("discriminant with provenance")
|
throw_machine_stop_str!("discriminant with provenance")
|
||||||
};
|
};
|
||||||
let discr_bits = discr.assert_bits(discr.size());
|
let discr_bits = discr.to_bits(discr.size());
|
||||||
let Some((variant, _)) = def.discriminants(*ecx.tcx).find(|(_, var)| discr_bits == var.val) else {
|
let Some((variant, _)) = def.discriminants(*ecx.tcx).find(|(_, var)| discr_bits == var.val) else {
|
||||||
throw_machine_stop_str!("illegal discriminant for enum")
|
throw_machine_stop_str!("illegal discriminant for enum")
|
||||||
};
|
};
|
||||||
|
@ -488,7 +488,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||||||
let slice = self.evaluated[slice].as_ref()?;
|
let slice = self.evaluated[slice].as_ref()?;
|
||||||
let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
|
let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
|
||||||
let len = slice.len(&self.ecx).ok()?;
|
let len = slice.len(&self.ecx).ok()?;
|
||||||
let imm = ImmTy::try_from_uint(len, usize_layout)?;
|
let imm = ImmTy::from_uint(len, usize_layout);
|
||||||
imm.into()
|
imm.into()
|
||||||
}
|
}
|
||||||
NullaryOp(null_op, ty) => {
|
NullaryOp(null_op, ty) => {
|
||||||
@ -509,7 +509,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||||||
NullOp::UbChecks => return None,
|
NullOp::UbChecks => return None,
|
||||||
};
|
};
|
||||||
let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
|
let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
|
||||||
let imm = ImmTy::try_from_uint(val, usize_layout)?;
|
let imm = ImmTy::from_uint(val, usize_layout);
|
||||||
imm.into()
|
imm.into()
|
||||||
}
|
}
|
||||||
UnaryOp(un_op, operand) => {
|
UnaryOp(un_op, operand) => {
|
||||||
@ -1255,7 +1255,7 @@ fn op_to_prop_const<'tcx>(
|
|||||||
// If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
|
// If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
|
||||||
if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi
|
if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi
|
||||||
&& let Ok(scalar) = ecx.read_scalar(op)
|
&& let Ok(scalar) = ecx.read_scalar(op)
|
||||||
&& scalar.try_to_int().is_ok()
|
&& scalar.try_to_scalar_int().is_ok()
|
||||||
{
|
{
|
||||||
return Some(ConstValue::Scalar(scalar));
|
return Some(ConstValue::Scalar(scalar));
|
||||||
}
|
}
|
||||||
|
@ -356,15 +356,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||||||
debug!("check_binary_op: reporting assert for {:?}", location);
|
debug!("check_binary_op: reporting assert for {:?}", location);
|
||||||
let panic = AssertKind::Overflow(
|
let panic = AssertKind::Overflow(
|
||||||
op,
|
op,
|
||||||
match l {
|
// Invent a dummy value, the diagnostic ignores it anyway
|
||||||
Some(l) => l.to_const_int(),
|
ConstInt::new(
|
||||||
// Invent a dummy value, the diagnostic ignores it anyway
|
ScalarInt::try_from_uint(1_u8, left_size).unwrap(),
|
||||||
None => ConstInt::new(
|
left_ty.is_signed(),
|
||||||
ScalarInt::try_from_uint(1_u8, left_size).unwrap(),
|
left_ty.is_ptr_sized_integral(),
|
||||||
left_ty.is_signed(),
|
),
|
||||||
left_ty.is_ptr_sized_integral(),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
r.to_const_int(),
|
r.to_const_int(),
|
||||||
);
|
);
|
||||||
self.report_assert_as_lint(location, AssertLintKind::ArithmeticOverflow, panic);
|
self.report_assert_as_lint(location, AssertLintKind::ArithmeticOverflow, panic);
|
||||||
@ -787,8 +784,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
|
|||||||
TerminatorKind::SwitchInt { ref discr, ref targets } => {
|
TerminatorKind::SwitchInt { ref discr, ref targets } => {
|
||||||
if let Some(ref value) = self.eval_operand(discr)
|
if let Some(ref value) = self.eval_operand(discr)
|
||||||
&& let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value))
|
&& let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value))
|
||||||
&& let Ok(constant) = value_const.try_to_int()
|
&& let Ok(constant) = value_const.to_bits(value_const.size())
|
||||||
&& let Ok(constant) = constant.try_to_bits(constant.size())
|
|
||||||
{
|
{
|
||||||
// We managed to evaluate the discriminant, so we know we only need to visit
|
// We managed to evaluate the discriminant, so we know we only need to visit
|
||||||
// one target.
|
// one target.
|
||||||
|
@ -372,7 +372,7 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn int_equal(l: ScalarInt, r: impl Into<u128>, size: Size) -> bool {
|
fn int_equal(l: ScalarInt, r: impl Into<u128>, size: Size) -> bool {
|
||||||
l.assert_int(l.size()) == ScalarInt::try_from_uint(r, size).unwrap().assert_int(size)
|
l.to_bits_unchecked() == ScalarInt::try_from_uint(r, size).unwrap().to_bits_unchecked()
|
||||||
}
|
}
|
||||||
|
|
||||||
// We first compare the two branches, and then the other branches need to fulfill the same conditions.
|
// We first compare the two branches, and then the other branches need to fulfill the same conditions.
|
||||||
|
@ -500,14 +500,14 @@ impl<'tcx> Validator<'_, 'tcx> {
|
|||||||
}
|
}
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
match rhs_val.map(|x| x.assert_uint(sz)) {
|
match rhs_val.map(|x| x.to_uint(sz)) {
|
||||||
// for the zero test, int vs uint does not matter
|
// for the zero test, int vs uint does not matter
|
||||||
Some(x) if x != 0 => {} // okay
|
Some(x) if x != 0 => {} // okay
|
||||||
_ => return Err(Unpromotable), // value not known or 0 -- not okay
|
_ => return Err(Unpromotable), // value not known or 0 -- not okay
|
||||||
}
|
}
|
||||||
// Furthermore, for signed divison, we also have to exclude `int::MIN / -1`.
|
// Furthermore, for signed divison, we also have to exclude `int::MIN / -1`.
|
||||||
if lhs_ty.is_signed() {
|
if lhs_ty.is_signed() {
|
||||||
match rhs_val.map(|x| x.assert_int(sz)) {
|
match rhs_val.map(|x| x.to_int(sz)) {
|
||||||
Some(-1) | None => {
|
Some(-1) | None => {
|
||||||
// The RHS is -1 or unknown, so we have to be careful.
|
// The RHS is -1 or unknown, so we have to be careful.
|
||||||
// But is the LHS int::MIN?
|
// But is the LHS int::MIN?
|
||||||
@ -518,7 +518,7 @@ impl<'tcx> Validator<'_, 'tcx> {
|
|||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
let lhs_min = sz.signed_int_min();
|
let lhs_min = sz.signed_int_min();
|
||||||
match lhs_val.map(|x| x.assert_int(sz)) {
|
match lhs_val.map(|x| x.to_int(sz)) {
|
||||||
Some(x) if x != lhs_min => {} // okay
|
Some(x) if x != lhs_min => {} // okay
|
||||||
_ => return Err(Unpromotable), // value not known or int::MIN -- not okay
|
_ => return Err(Unpromotable), // value not known or int::MIN -- not okay
|
||||||
}
|
}
|
||||||
|
@ -49,7 +49,7 @@ impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral {
|
|||||||
let layout = tcx
|
let layout = tcx
|
||||||
.layout_of(param_env.and(opt.branch_value_ty))
|
.layout_of(param_env.and(opt.branch_value_ty))
|
||||||
.expect("if we have an evaluated constant we must know the layout");
|
.expect("if we have an evaluated constant we must know the layout");
|
||||||
int.assert_bits(layout.size)
|
int.to_bits(layout.size)
|
||||||
}
|
}
|
||||||
Scalar::Ptr(..) => continue,
|
Scalar::Ptr(..) => continue,
|
||||||
};
|
};
|
||||||
|
@ -5,12 +5,12 @@ use rustc_index::bit_set::BitSet;
|
|||||||
use rustc_index::IndexVec;
|
use rustc_index::IndexVec;
|
||||||
use rustc_infer::traits::Reveal;
|
use rustc_infer::traits::Reveal;
|
||||||
use rustc_middle::mir::coverage::CoverageKind;
|
use rustc_middle::mir::coverage::CoverageKind;
|
||||||
use rustc_middle::mir::interpret::Scalar;
|
|
||||||
use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
|
use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
|
||||||
use rustc_middle::mir::*;
|
use rustc_middle::mir::*;
|
||||||
use rustc_middle::ty::adjustment::PointerCoercion;
|
use rustc_middle::ty::adjustment::PointerCoercion;
|
||||||
use rustc_middle::ty::{
|
use rustc_middle::ty::{
|
||||||
self, CoroutineArgsExt, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt, Variance,
|
self, CoroutineArgsExt, InstanceDef, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt,
|
||||||
|
Variance,
|
||||||
};
|
};
|
||||||
use rustc_middle::{bug, span_bug};
|
use rustc_middle::{bug, span_bug};
|
||||||
use rustc_target::abi::{Size, FIRST_VARIANT};
|
use rustc_target::abi::{Size, FIRST_VARIANT};
|
||||||
@ -1478,7 +1478,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
|
|||||||
});
|
});
|
||||||
|
|
||||||
for (value, _) in targets.iter() {
|
for (value, _) in targets.iter() {
|
||||||
if Scalar::<()>::try_from_uint(value, size).is_none() {
|
if ScalarInt::try_from_uint(value, size).is_none() {
|
||||||
self.fail(
|
self.fail(
|
||||||
location,
|
location,
|
||||||
format!("the value {value:#x} is not a proper {switch_ty:?}"),
|
format!("the value {value:#x} is not a proper {switch_ty:?}"),
|
||||||
|
@ -6,11 +6,12 @@ use rustc_hir::def_id::DefId;
|
|||||||
use rustc_hir::HirId;
|
use rustc_hir::HirId;
|
||||||
use rustc_index::{Idx, IndexVec};
|
use rustc_index::{Idx, IndexVec};
|
||||||
use rustc_middle::middle::stability::EvalResult;
|
use rustc_middle::middle::stability::EvalResult;
|
||||||
use rustc_middle::mir::interpret::Scalar;
|
|
||||||
use rustc_middle::mir::{self, Const};
|
use rustc_middle::mir::{self, Const};
|
||||||
use rustc_middle::thir::{self, FieldPat, Pat, PatKind, PatRange, PatRangeBoundary};
|
use rustc_middle::thir::{self, FieldPat, Pat, PatKind, PatRange, PatRangeBoundary};
|
||||||
use rustc_middle::ty::layout::IntegerExt;
|
use rustc_middle::ty::layout::IntegerExt;
|
||||||
use rustc_middle::ty::{self, FieldDef, OpaqueTypeKey, Ty, TyCtxt, TypeVisitableExt, VariantDef};
|
use rustc_middle::ty::{
|
||||||
|
self, FieldDef, OpaqueTypeKey, ScalarInt, Ty, TyCtxt, TypeVisitableExt, VariantDef,
|
||||||
|
};
|
||||||
use rustc_middle::{bug, span_bug};
|
use rustc_middle::{bug, span_bug};
|
||||||
use rustc_session::lint;
|
use rustc_session::lint;
|
||||||
use rustc_span::{ErrorGuaranteed, Span, DUMMY_SP};
|
use rustc_span::{ErrorGuaranteed, Span, DUMMY_SP};
|
||||||
@ -701,9 +702,9 @@ impl<'p, 'tcx: 'p> RustcPatCtxt<'p, 'tcx> {
|
|||||||
ty::Int(_) => miint.as_finite_int(size.bits()).unwrap(),
|
ty::Int(_) => miint.as_finite_int(size.bits()).unwrap(),
|
||||||
_ => miint.as_finite_uint().unwrap(),
|
_ => miint.as_finite_uint().unwrap(),
|
||||||
};
|
};
|
||||||
match Scalar::try_from_uint(bits, size) {
|
match ScalarInt::try_from_uint(bits, size) {
|
||||||
Some(scalar) => {
|
Some(scalar) => {
|
||||||
let value = mir::Const::from_scalar(tcx, scalar, ty.inner());
|
let value = mir::Const::from_scalar(tcx, scalar.into(), ty.inner());
|
||||||
PatRangeBoundary::Finite(value)
|
PatRangeBoundary::Finite(value)
|
||||||
}
|
}
|
||||||
// The value doesn't fit. Since `x >= 0` and 0 always encodes the minimum value
|
// The value doesn't fit. Since `x >= 0` and 0 always encodes the minimum value
|
||||||
|
@ -420,7 +420,7 @@ pub(crate) mod rustc {
|
|||||||
fn from_tag(tag: ScalarInt, tcx: TyCtxt<'tcx>) -> Self {
|
fn from_tag(tag: ScalarInt, tcx: TyCtxt<'tcx>) -> Self {
|
||||||
use rustc_target::abi::Endian;
|
use rustc_target::abi::Endian;
|
||||||
let size = tag.size();
|
let size = tag.size();
|
||||||
let bits = tag.assert_bits(size);
|
let bits = tag.to_bits(size);
|
||||||
let bytes: [u8; 16];
|
let bytes: [u8; 16];
|
||||||
let bytes = match tcx.data_layout.endian {
|
let bytes = match tcx.data_layout.endian {
|
||||||
Endian::Little => {
|
Endian::Little => {
|
||||||
|
@ -47,7 +47,7 @@ fn destructure_const<'tcx>(
|
|||||||
ty::Adt(def, args) => {
|
ty::Adt(def, args) => {
|
||||||
let (variant_idx, branches) = if def.is_enum() {
|
let (variant_idx, branches) = if def.is_enum() {
|
||||||
let (head, rest) = branches.split_first().unwrap();
|
let (head, rest) = branches.split_first().unwrap();
|
||||||
(VariantIdx::from_u32(head.unwrap_leaf().try_to_u32().unwrap()), rest)
|
(VariantIdx::from_u32(head.unwrap_leaf().to_u32()), rest)
|
||||||
} else {
|
} else {
|
||||||
(FIRST_VARIANT, branches)
|
(FIRST_VARIANT, branches)
|
||||||
};
|
};
|
||||||
|
@ -431,8 +431,7 @@ fn print_const_with_custom_print_scalar<'tcx>(
|
|||||||
(mir::Const::Val(mir::ConstValue::Scalar(int), _), ty::Int(i)) => {
|
(mir::Const::Val(mir::ConstValue::Scalar(int), _), ty::Int(i)) => {
|
||||||
let ty = ct.ty();
|
let ty = ct.ty();
|
||||||
let size = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size;
|
let size = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size;
|
||||||
let data = int.assert_bits(size);
|
let sign_extended_data = int.assert_scalar_int().to_int(size);
|
||||||
let sign_extended_data = size.sign_extend(data) as i128;
|
|
||||||
let mut output = if with_underscores {
|
let mut output = if with_underscores {
|
||||||
format_integer_with_underscore_sep(&sign_extended_data.to_string())
|
format_integer_with_underscore_sep(&sign_extended_data.to_string())
|
||||||
} else {
|
} else {
|
||||||
|
@ -55,7 +55,7 @@ impl<'tcx> LateLintPass<'tcx> for LargeConstArrays {
|
|||||||
&& let ty = cx.tcx.type_of(item.owner_id).instantiate_identity()
|
&& let ty = cx.tcx.type_of(item.owner_id).instantiate_identity()
|
||||||
&& let ty::Array(element_type, cst) = ty.kind()
|
&& let ty::Array(element_type, cst) = ty.kind()
|
||||||
&& let ConstKind::Value(_, ty::ValTree::Leaf(element_count)) = cst.kind()
|
&& let ConstKind::Value(_, ty::ValTree::Leaf(element_count)) = cst.kind()
|
||||||
&& let Ok(element_count) = element_count.try_to_target_usize(cx.tcx)
|
&& let element_count = element_count.to_target_usize(cx.tcx)
|
||||||
&& let Ok(element_size) = cx.layout_of(*element_type).map(|l| l.size.bytes())
|
&& let Ok(element_size) = cx.layout_of(*element_type).map(|l| l.size.bytes())
|
||||||
&& self.maximum_allowed_size < u128::from(element_count) * u128::from(element_size)
|
&& self.maximum_allowed_size < u128::from(element_count) * u128::from(element_size)
|
||||||
{
|
{
|
||||||
|
@ -65,7 +65,7 @@ impl<'tcx> LateLintPass<'tcx> for LargeStackArrays {
|
|||||||
&& !self.is_from_vec_macro(cx, expr.span)
|
&& !self.is_from_vec_macro(cx, expr.span)
|
||||||
&& let ty::Array(element_type, cst) = cx.typeck_results().expr_ty(expr).kind()
|
&& let ty::Array(element_type, cst) = cx.typeck_results().expr_ty(expr).kind()
|
||||||
&& let ConstKind::Value(_, ty::ValTree::Leaf(element_count)) = cst.kind()
|
&& let ConstKind::Value(_, ty::ValTree::Leaf(element_count)) = cst.kind()
|
||||||
&& let Ok(element_count) = element_count.try_to_target_usize(cx.tcx)
|
&& let element_count = element_count.to_target_usize(cx.tcx)
|
||||||
&& let Ok(element_size) = cx.layout_of(*element_type).map(|l| l.size.bytes())
|
&& let Ok(element_size) = cx.layout_of(*element_type).map(|l| l.size.bytes())
|
||||||
&& !cx.tcx.hir().parent_iter(expr.hir_id).any(|(_, node)| {
|
&& !cx.tcx.hir().parent_iter(expr.hir_id).any(|(_, node)| {
|
||||||
matches!(
|
matches!(
|
||||||
|
@ -199,7 +199,7 @@ impl<'tcx> NonCopyConst<'tcx> {
|
|||||||
.any(|field| Self::is_value_unfrozen_raw_inner(cx, *field, ty)),
|
.any(|field| Self::is_value_unfrozen_raw_inner(cx, *field, ty)),
|
||||||
ty::Adt(def, args) if def.is_enum() => {
|
ty::Adt(def, args) if def.is_enum() => {
|
||||||
let (&variant_index, fields) = val.unwrap_branch().split_first().unwrap();
|
let (&variant_index, fields) = val.unwrap_branch().split_first().unwrap();
|
||||||
let variant_index = VariantIdx::from_u32(variant_index.unwrap_leaf().try_to_u32().ok().unwrap());
|
let variant_index = VariantIdx::from_u32(variant_index.unwrap_leaf().to_u32());
|
||||||
fields
|
fields
|
||||||
.iter()
|
.iter()
|
||||||
.copied()
|
.copied()
|
||||||
|
@ -56,8 +56,7 @@ impl LateLintPass<'_> for ZeroRepeatSideEffects {
|
|||||||
} else if let ExprKind::Repeat(inner_expr, _) = expr.kind
|
} else if let ExprKind::Repeat(inner_expr, _) = expr.kind
|
||||||
&& let ty::Array(_, cst) = cx.typeck_results().expr_ty(expr).kind()
|
&& let ty::Array(_, cst) = cx.typeck_results().expr_ty(expr).kind()
|
||||||
&& let ConstKind::Value(_, ty::ValTree::Leaf(element_count)) = cst.kind()
|
&& let ConstKind::Value(_, ty::ValTree::Leaf(element_count)) = cst.kind()
|
||||||
&& let Ok(element_count) = element_count.try_to_target_usize(cx.tcx)
|
&& element_count.to_target_usize(cx.tcx) == 0
|
||||||
&& element_count == 0
|
|
||||||
{
|
{
|
||||||
inner_check(cx, expr, inner_expr, false);
|
inner_check(cx, expr, inner_expr, false);
|
||||||
}
|
}
|
||||||
|
@ -810,14 +810,14 @@ pub fn mir_to_const<'tcx>(lcx: &LateContext<'tcx>, result: mir::Const<'tcx>) ->
|
|||||||
(ConstValue::Scalar(Scalar::Int(int)), _) => match result.ty().kind() {
|
(ConstValue::Scalar(Scalar::Int(int)), _) => match result.ty().kind() {
|
||||||
ty::Adt(adt_def, _) if adt_def.is_struct() => Some(Constant::Adt(result)),
|
ty::Adt(adt_def, _) if adt_def.is_struct() => Some(Constant::Adt(result)),
|
||||||
ty::Bool => Some(Constant::Bool(int == ScalarInt::TRUE)),
|
ty::Bool => Some(Constant::Bool(int == ScalarInt::TRUE)),
|
||||||
ty::Uint(_) | ty::Int(_) => Some(Constant::Int(int.assert_bits(int.size()))),
|
ty::Uint(_) | ty::Int(_) => Some(Constant::Int(int.to_bits(int.size()))),
|
||||||
ty::Float(FloatTy::F32) => Some(Constant::F32(f32::from_bits(
|
ty::Float(FloatTy::F32) => Some(Constant::F32(f32::from_bits(
|
||||||
int.try_into().expect("invalid f32 bit representation"),
|
int.try_into().expect("invalid f32 bit representation"),
|
||||||
))),
|
))),
|
||||||
ty::Float(FloatTy::F64) => Some(Constant::F64(f64::from_bits(
|
ty::Float(FloatTy::F64) => Some(Constant::F64(f64::from_bits(
|
||||||
int.try_into().expect("invalid f64 bit representation"),
|
int.try_into().expect("invalid f64 bit representation"),
|
||||||
))),
|
))),
|
||||||
ty::RawPtr(_, _) => Some(Constant::RawPtr(int.assert_bits(int.size()))),
|
ty::RawPtr(_, _) => Some(Constant::RawPtr(int.to_bits(int.size()))),
|
||||||
_ => None,
|
_ => None,
|
||||||
},
|
},
|
||||||
(_, ty::Ref(_, inner_ty, _)) if matches!(inner_ty.kind(), ty::Str) => {
|
(_, ty::Ref(_, inner_ty, _)) if matches!(inner_ty.kind(), ty::Str) => {
|
||||||
|
@ -23,7 +23,7 @@ use rustc_middle::ty::{
|
|||||||
};
|
};
|
||||||
use rustc_span::symbol::Ident;
|
use rustc_span::symbol::Ident;
|
||||||
use rustc_span::{sym, Span, Symbol, DUMMY_SP};
|
use rustc_span::{sym, Span, Symbol, DUMMY_SP};
|
||||||
use rustc_target::abi::{Size, VariantIdx};
|
use rustc_target::abi::VariantIdx;
|
||||||
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
|
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
|
||||||
use rustc_trait_selection::traits::query::normalize::QueryNormalizeExt;
|
use rustc_trait_selection::traits::query::normalize::QueryNormalizeExt;
|
||||||
use rustc_trait_selection::traits::{Obligation, ObligationCause};
|
use rustc_trait_selection::traits::{Obligation, ObligationCause};
|
||||||
@ -865,22 +865,8 @@ impl core::ops::Add<u32> for EnumValue {
|
|||||||
pub fn read_explicit_enum_value(tcx: TyCtxt<'_>, id: DefId) -> Option<EnumValue> {
|
pub fn read_explicit_enum_value(tcx: TyCtxt<'_>, id: DefId) -> Option<EnumValue> {
|
||||||
if let Ok(ConstValue::Scalar(Scalar::Int(value))) = tcx.const_eval_poly(id) {
|
if let Ok(ConstValue::Scalar(Scalar::Int(value))) = tcx.const_eval_poly(id) {
|
||||||
match tcx.type_of(id).instantiate_identity().kind() {
|
match tcx.type_of(id).instantiate_identity().kind() {
|
||||||
ty::Int(_) => Some(EnumValue::Signed(match value.size().bytes() {
|
ty::Int(_) => Some(EnumValue::Signed(value.to_int(value.size()))),
|
||||||
1 => i128::from(value.assert_bits(Size::from_bytes(1)) as u8 as i8),
|
ty::Uint(_) => Some(EnumValue::Unsigned(value.to_uint(value.size()))),
|
||||||
2 => i128::from(value.assert_bits(Size::from_bytes(2)) as u16 as i16),
|
|
||||||
4 => i128::from(value.assert_bits(Size::from_bytes(4)) as u32 as i32),
|
|
||||||
8 => i128::from(value.assert_bits(Size::from_bytes(8)) as u64 as i64),
|
|
||||||
16 => value.assert_bits(Size::from_bytes(16)) as i128,
|
|
||||||
_ => return None,
|
|
||||||
})),
|
|
||||||
ty::Uint(_) => Some(EnumValue::Unsigned(match value.size().bytes() {
|
|
||||||
1 => value.assert_bits(Size::from_bytes(1)),
|
|
||||||
2 => value.assert_bits(Size::from_bytes(2)),
|
|
||||||
4 => value.assert_bits(Size::from_bytes(4)),
|
|
||||||
8 => value.assert_bits(Size::from_bytes(8)),
|
|
||||||
16 => value.assert_bits(Size::from_bytes(16)),
|
|
||||||
_ => return None,
|
|
||||||
})),
|
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -645,8 +645,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
|||||||
for i in 0..dest_len {
|
for i in 0..dest_len {
|
||||||
let src_index: u64 = index[usize::try_from(i).unwrap()]
|
let src_index: u64 = index[usize::try_from(i).unwrap()]
|
||||||
.unwrap_leaf()
|
.unwrap_leaf()
|
||||||
.try_to_u32()
|
.to_u32()
|
||||||
.unwrap()
|
|
||||||
.into();
|
.into();
|
||||||
let dest = this.project_index(&dest, i)?;
|
let dest = this.project_index(&dest, i)?;
|
||||||
|
|
||||||
|
@ -51,9 +51,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
|||||||
|
|
||||||
let fds = &mut this.machine.fds;
|
let fds = &mut this.machine.fds;
|
||||||
let sv0 = fds.insert_fd(FileDescriptor::new(SocketPair));
|
let sv0 = fds.insert_fd(FileDescriptor::new(SocketPair));
|
||||||
let sv0 = Scalar::try_from_int(sv0, sv.layout.size).unwrap();
|
let sv0 = Scalar::from_int(sv0, sv.layout.size);
|
||||||
let sv1 = fds.insert_fd(FileDescriptor::new(SocketPair));
|
let sv1 = fds.insert_fd(FileDescriptor::new(SocketPair));
|
||||||
let sv1 = Scalar::try_from_int(sv1, sv.layout.size).unwrap();
|
let sv1 = Scalar::from_int(sv1, sv.layout.size);
|
||||||
|
|
||||||
this.write_scalar(sv0, &sv)?;
|
this.write_scalar(sv0, &sv)?;
|
||||||
this.write_scalar(sv1, &sv.offset(sv.layout.size, sv.layout, this)?)?;
|
this.write_scalar(sv1, &sv.offset(sv.layout.size, sv.layout, this)?)?;
|
||||||
|
Loading…
Reference in New Issue
Block a user