diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs index ba98f2e772c..a53598018f4 100644 --- a/compiler/rustc_codegen_cranelift/src/constant.rs +++ b/compiler/rustc_codegen_cranelift/src/constant.rs @@ -110,7 +110,7 @@ pub(crate) fn codegen_const_value<'tcx>( if fx.clif_type(layout.ty).is_some() { return CValue::const_val(fx, layout, int); } else { - let raw_val = int.size().truncate(int.assert_bits(int.size())); + let raw_val = int.size().truncate(int.to_bits(int.size())); let val = match int.size().bytes() { 1 => fx.bcx.ins().iconst(types::I8, raw_val as i64), 2 => fx.bcx.ins().iconst(types::I16, raw_val as i64), @@ -501,12 +501,12 @@ pub(crate) fn mir_operand_get_const_val<'tcx>( Ordering::Equal => scalar_int, Ordering::Less => match ty.kind() { ty::Uint(_) => ScalarInt::try_from_uint( - scalar_int.assert_uint(scalar_int.size()), + scalar_int.to_uint(scalar_int.size()), fx.layout_of(*ty).size, ) .unwrap(), ty::Int(_) => ScalarInt::try_from_int( - scalar_int.assert_int(scalar_int.size()), + scalar_int.to_int(scalar_int.size()), fx.layout_of(*ty).size, ) .unwrap(), diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs index 27b55ecc72e..d454f3c1de7 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs @@ -902,7 +902,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>( .span_fatal(span, "Index argument for `_mm_cmpestri` is not a constant"); }; - let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); + let imm8 = imm8.to_u8(); codegen_inline_asm_inner( fx, @@ -955,7 +955,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>( .span_fatal(span, "Index argument for `_mm_cmpestrm` is not a constant"); }; - let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); + let imm8 = imm8.to_u8(); codegen_inline_asm_inner( fx, @@ -1003,7 +1003,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>( ); }; - let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); + let imm8 = imm8.to_u8(); codegen_inline_asm_inner( fx, @@ -1040,7 +1040,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>( ); }; - let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); + let imm8 = imm8.to_u8(); codegen_inline_asm_inner( fx, @@ -1195,7 +1195,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>( .span_fatal(span, "Func argument for `_mm_sha1rnds4_epu32` is not a constant"); }; - let func = func.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", func)); + let func = func.to_u8(); codegen_inline_asm_inner( fx, diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs index 65eeaf156d8..ca910dccb0d 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs @@ -147,8 +147,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let total_len = lane_count * 2; - let indexes = - idx.iter().map(|idx| idx.unwrap_leaf().try_to_u32().unwrap()).collect::>(); + let indexes = idx.iter().map(|idx| idx.unwrap_leaf().to_u32()).collect::>(); for &idx in &indexes { assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len); @@ -282,9 +281,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( fx.tcx.dcx().span_fatal(span, "Index argument for `simd_insert` is not a constant"); }; - let idx: u32 = idx_const - .try_to_u32() - .unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const)); + let idx: u32 = idx_const.to_u32(); let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx); if u64::from(idx) >= lane_count { fx.tcx.dcx().span_fatal( @@ -330,9 +327,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( return; }; - let idx = idx_const - .try_to_u32() - .unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const)); + let idx = idx_const.to_u32(); let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx); if u64::from(idx) >= lane_count { fx.tcx.dcx().span_fatal( diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs index 512a96450a4..1aa28daeafc 100644 --- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs +++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs @@ -327,7 +327,7 @@ impl<'tcx> CValue<'tcx> { let val = match layout.ty.kind() { ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => { - let const_val = const_val.assert_bits(layout.size); + let const_val = const_val.to_bits(layout.size); let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64); let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64); fx.bcx.ins().iconcat(lsb, msb) @@ -339,7 +339,7 @@ impl<'tcx> CValue<'tcx> { | ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) => { - let raw_val = const_val.size().truncate(const_val.assert_bits(layout.size)); + let raw_val = const_val.size().truncate(const_val.to_bits(layout.size)); fx.bcx.ins().iconst(clif_ty, raw_val as i64) } ty::Float(FloatTy::F32) => { diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index 78d943192db..548c23cc794 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -166,7 +166,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() }; match cv { Scalar::Int(int) => { - let data = int.assert_bits(layout.size(self)); + let data = int.to_bits(layout.size(self)); // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code // the paths for floating-point values. diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index ab8036a1410..4ffc92eb633 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -244,7 +244,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() }; match cv { Scalar::Int(int) => { - let data = int.assert_bits(layout.size(self)); + let data = int.to_bits(layout.size(self)); let llval = self.const_uint_big(self.type_ix(bitsize), data); if matches!(layout.primitive(), Pointer(_)) { unsafe { llvm::LLVMConstIntToPtr(llval, llty) } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 7b1038d5617..5622c5e2420 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1221,7 +1221,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( .iter() .enumerate() .map(|(arg_idx, val)| { - let idx = val.unwrap_leaf().try_to_i32().unwrap(); + let idx = val.unwrap_leaf().to_i32(); if idx >= i32::try_from(total_len).unwrap() { bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds { span, diff --git a/compiler/rustc_codegen_ssa/src/common.rs b/compiler/rustc_codegen_ssa/src/common.rs index e4a36b3f591..27b0f127e92 100644 --- a/compiler/rustc_codegen_ssa/src/common.rs +++ b/compiler/rustc_codegen_ssa/src/common.rs @@ -163,7 +163,7 @@ pub fn asm_const_to_str<'tcx>( let mir::ConstValue::Scalar(scalar) = const_value else { span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value) }; - let value = scalar.assert_bits(ty_and_layout.size); + let value = scalar.assert_scalar_int().to_bits(ty_and_layout.size); match ty_and_layout.ty.kind() { ty::Uint(_) => value.to_string(), ty::Int(int_ty) => match int_ty.normalize(tcx.sess.target.pointer_width) { diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index 5312f1f946f..66993476bef 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -95,10 +95,10 @@ fn const_to_valtree_inner<'tcx>( } ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => { let val = ecx.read_immediate(place)?; - let val = val.to_scalar(); + let val = val.to_scalar_int().unwrap(); *num_nodes += 1; - Ok(ty::ValTree::Leaf(val.assert_int())) + Ok(ty::ValTree::Leaf(val)) } ty::Pat(base, ..) => { @@ -125,7 +125,7 @@ fn const_to_valtree_inner<'tcx>( let val = val.to_scalar(); // We are in the CTFE machine, so ptr-to-int casts will fail. // This can only be `Ok` if `val` already is an integer. - let Ok(val) = val.try_to_int() else { + let Ok(val) = val.try_to_scalar_int() else { return Err(ValTreeCreationError::NonSupportedType); }; // It's just a ScalarInt! @@ -411,7 +411,7 @@ fn valtree_into_mplace<'tcx>( ty::Adt(def, _) if def.is_enum() => { // First element of valtree corresponds to variant let scalar_int = branches[0].unwrap_leaf(); - let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap()); + let variant_idx = VariantIdx::from_u32(scalar_int.to_u32()); let variant = def.variant(variant_idx); debug!(?variant); diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs index 67fbf9642bf..0dbee8c1d94 100644 --- a/compiler/rustc_const_eval/src/interpret/discriminant.rs +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -123,14 +123,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // (`tag_bits` itself is only used for error messages below.) let tag_bits = tag_val .to_scalar() - .try_to_int() + .try_to_scalar_int() .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))? - .assert_bits(tag_layout.size); + .to_bits(tag_layout.size); // Cast bits from tag layout to discriminant layout. // After the checks we did above, this cannot fail, as // discriminants are int-like. let discr_val = self.int_to_int_or_float(&tag_val, discr_layout).unwrap(); - let discr_bits = discr_val.to_scalar().assert_bits(discr_layout.size); + let discr_bits = discr_val.to_scalar().to_bits(discr_layout.size)?; // Convert discriminant to variant index, and catch invalid discriminants. let index = match *ty.kind() { ty::Adt(adt, _) => { @@ -152,7 +152,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // discriminant (encoded in niche/tag) and variant index are the same. let variants_start = niche_variants.start().as_u32(); let variants_end = niche_variants.end().as_u32(); - let variant = match tag_val.try_to_int() { + let variant = match tag_val.try_to_scalar_int() { Err(dbg_val) => { // So this is a pointer then, and casting to an int failed. // Can only happen during CTFE. @@ -167,7 +167,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { untagged_variant } Ok(tag_bits) => { - let tag_bits = tag_bits.assert_bits(tag_layout.size); + let tag_bits = tag_bits.to_bits(tag_layout.size); // We need to use machine arithmetic to get the relative variant idx: // variant_index_relative = tag_val - niche_start_val let tag_val = ImmTy::from_uint(tag_bits, tag_layout); @@ -175,7 +175,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let variant_index_relative_val = self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?; let variant_index_relative = - variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size); + variant_index_relative_val.to_scalar().to_bits(tag_val.layout.size)?; // Check if this is in the range that indicates an actual discriminant. if variant_index_relative <= u128::from(variants_end - variants_start) { let variant_index_relative = u32::try_from(variant_index_relative) @@ -294,8 +294,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { ImmTy::from_uint(variant_index_relative, tag_layout); let tag = self .binary_op(mir::BinOp::Add, &variant_index_relative_val, &niche_start_val)? - .to_scalar() - .assert_int(); + .to_scalar_int()?; Ok(Some((tag, tag_field))) } } diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 18b76443cd9..dac5c10addc 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -519,7 +519,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`. // First, check x % y != 0 (or if that computation overflows). let rem = self.binary_op(BinOp::Rem, a, b)?; - if rem.to_scalar().assert_bits(a.layout.size) != 0 { + if rem.to_scalar().to_bits(a.layout.size)? != 0 { throw_ub_custom!( fluent::const_eval_exact_div_has_remainder, a = format!("{a}"), diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 521f28b7123..7eb73e9b52f 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -1344,7 +1344,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Test if this value might be null. /// If the machine does not support ptr-to-int casts, this is conservative. pub fn scalar_may_be_null(&self, scalar: Scalar) -> InterpResult<'tcx, bool> { - Ok(match scalar.try_to_int() { + Ok(match scalar.try_to_scalar_int() { Ok(int) => int.is_null(), Err(_) => { // Can only happen during CTFE. diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index bbb2c2f3938..0a7e9853763 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -87,6 +87,12 @@ impl Immediate { } } + #[inline] + #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) + pub fn to_scalar_int(self) -> ScalarInt { + self.to_scalar().try_to_scalar_int().unwrap() + } + #[inline] #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) pub fn to_scalar_pair(self) -> (Scalar, Scalar) { @@ -219,19 +225,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { Self::from_scalar(Scalar::from(s), layout) } - #[inline] - pub fn try_from_uint(i: impl Into, layout: TyAndLayout<'tcx>) -> Option { - Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout)) - } #[inline] pub fn from_uint(i: impl Into, layout: TyAndLayout<'tcx>) -> Self { Self::from_scalar(Scalar::from_uint(i, layout.size), layout) } - #[inline] - pub fn try_from_int(i: impl Into, layout: TyAndLayout<'tcx>) -> Option { - Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout)) - } #[inline] pub fn from_int(i: impl Into, layout: TyAndLayout<'tcx>) -> Self { Self::from_scalar(Scalar::from_int(i, layout.size), layout) @@ -276,7 +274,8 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { #[inline] pub fn to_const_int(self) -> ConstInt { assert!(self.layout.ty.is_integral()); - let int = self.to_scalar().assert_int(); + let int = self.imm.to_scalar_int(); + assert_eq!(int.size(), self.layout.size); ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral()) } diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index 6d005dfcd86..c821c98073d 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -95,10 +95,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let l = left.to_scalar_int()?; let r = right.to_scalar_int()?; // Prepare to convert the values to signed or unsigned form. - let l_signed = || l.assert_int(left.layout.size); - let l_unsigned = || l.assert_uint(left.layout.size); - let r_signed = || r.assert_int(right.layout.size); - let r_unsigned = || r.assert_uint(right.layout.size); + let l_signed = || l.to_int(left.layout.size); + let l_unsigned = || l.to_uint(left.layout.size); + let r_signed = || r.to_int(right.layout.size); + let r_unsigned = || r.to_uint(right.layout.size); let throw_ub_on_overflow = match bin_op { AddUnchecked => Some(sym::unchecked_add), diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 3407c7b8c79..f532f6bbe37 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -653,8 +653,8 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { let WrappingRange { start, end } = valid_range; let max_value = size.unsigned_int_max(); assert!(end <= max_value); - let bits = match scalar.try_to_int() { - Ok(int) => int.assert_bits(size), + let bits = match scalar.try_to_scalar_int() { + Ok(int) => int.to_bits(size), Err(_) => { // So this is a pointer then, and casting to an int failed. // Can only happen during CTFE. diff --git a/compiler/rustc_hir_typeck/src/pat.rs b/compiler/rustc_hir_typeck/src/pat.rs index be91e7d45b6..9476dc70483 100644 --- a/compiler/rustc_hir_typeck/src/pat.rs +++ b/compiler/rustc_hir_typeck/src/pat.rs @@ -2385,11 +2385,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { min_len: u64, ) -> (Option>, Ty<'tcx>) { let len = match len.eval(self.tcx, self.param_env, span) { - // FIXME(BoxyUwU): Assert the `Ty` is a `usize`? Ok((_, val)) => val .try_to_scalar() - .and_then(|scalar| scalar.try_to_int().ok()) - .and_then(|int| int.try_to_target_usize(self.tcx).ok()), + .and_then(|scalar| scalar.try_to_scalar_int().ok()) + .map(|int| int.to_target_usize(self.tcx)), Err(ErrorHandled::Reported(..)) => { let guar = self.error_scrutinee_unfixed_length(span); return (Some(Ty::new_error(self.tcx, guar)), arr_ty); diff --git a/compiler/rustc_middle/src/mir/consts.rs b/compiler/rustc_middle/src/mir/consts.rs index cc8979dd990..89f5acacf9d 100644 --- a/compiler/rustc_middle/src/mir/consts.rs +++ b/compiler/rustc_middle/src/mir/consts.rs @@ -84,11 +84,11 @@ impl<'tcx> ConstValue<'tcx> { } pub fn try_to_scalar_int(&self) -> Option { - self.try_to_scalar()?.try_to_int().ok() + self.try_to_scalar()?.try_to_scalar_int().ok() } pub fn try_to_bits(&self, size: Size) -> Option { - self.try_to_scalar_int()?.try_to_bits(size).ok() + Some(self.try_to_scalar_int()?.to_bits(size)) } pub fn try_to_bool(&self) -> Option { @@ -96,7 +96,7 @@ impl<'tcx> ConstValue<'tcx> { } pub fn try_to_target_usize(&self, tcx: TyCtxt<'tcx>) -> Option { - self.try_to_scalar_int()?.try_to_target_usize(tcx).ok() + Some(self.try_to_scalar_int()?.to_target_usize(tcx)) } pub fn try_to_bits_for_ty( @@ -300,7 +300,7 @@ impl<'tcx> Const<'tcx> { #[inline] pub fn try_to_bits(self, size: Size) -> Option { - self.try_to_scalar_int()?.try_to_bits(size).ok() + Some(self.try_to_scalar_int()?.to_bits(size)) } #[inline] @@ -367,7 +367,7 @@ impl<'tcx> Const<'tcx> { tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> Option { - self.try_eval_scalar(tcx, param_env)?.try_to_int().ok() + self.try_eval_scalar(tcx, param_env)?.try_to_scalar_int().ok() } #[inline] @@ -375,7 +375,7 @@ impl<'tcx> Const<'tcx> { let int = self.try_eval_scalar_int(tcx, param_env)?; let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(self.ty())).ok()?.size; - int.try_to_bits(size).ok() + Some(int.to_bits(size)) } /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type. @@ -391,7 +391,7 @@ impl<'tcx> Const<'tcx> { tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> Option { - self.try_eval_scalar_int(tcx, param_env)?.try_to_target_usize(tcx).ok() + Some(self.try_eval_scalar_int(tcx, param_env)?.to_target_usize(tcx)) } #[inline] diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs index 85357265687..70e5ad0635b 100644 --- a/compiler/rustc_middle/src/mir/interpret/value.rs +++ b/compiler/rustc_middle/src/mir/interpret/value.rs @@ -122,16 +122,12 @@ impl Scalar { Scalar::Int(c.into()) } - #[inline] - pub fn try_from_uint(i: impl Into, size: Size) -> Option { - ScalarInt::try_from_uint(i, size).map(Scalar::Int) - } - #[inline] pub fn from_uint(i: impl Into, size: Size) -> Self { let i = i.into(); - Self::try_from_uint(i, size) + ScalarInt::try_from_uint(i, size) .unwrap_or_else(|| bug!("Unsigned value {:#x} does not fit in {} bits", i, size.bits())) + .into() } #[inline] @@ -164,16 +160,12 @@ impl Scalar { Self::from_uint(i, cx.data_layout().pointer_size) } - #[inline] - pub fn try_from_int(i: impl Into, size: Size) -> Option { - ScalarInt::try_from_int(i, size).map(Scalar::Int) - } - #[inline] pub fn from_int(i: impl Into, size: Size) -> Self { let i = i.into(); - Self::try_from_int(i, size) + ScalarInt::try_from_int(i, size) .unwrap_or_else(|| bug!("Signed value {:#x} does not fit in {} bits", i, size.bits())) + .into() } #[inline] @@ -227,7 +219,7 @@ impl Scalar { } /// This is almost certainly not the method you want! You should dispatch on the type - /// and use `to_{u8,u16,...}`/`scalar_to_ptr` to perform ptr-to-int / int-to-ptr casts as needed. + /// and use `to_{u8,u16,...}`/`to_pointer` to perform ptr-to-int / int-to-ptr casts as needed. /// /// This method only exists for the benefit of low-level operations that truly need to treat the /// scalar in whatever form it is. @@ -289,7 +281,7 @@ impl<'tcx, Prov: Provenance> Scalar { /// The error type is `AllocId`, not `CtfeProvenance`, since `AllocId` is the "minimal" /// component all provenance types must have. #[inline] - pub fn try_to_int(self) -> Result> { + pub fn try_to_scalar_int(self) -> Result> { match self { Scalar::Int(int) => Ok(int), Scalar::Ptr(ptr, sz) => { @@ -307,13 +299,13 @@ impl<'tcx, Prov: Provenance> Scalar { #[inline(always)] pub fn to_scalar_int(self) -> InterpResult<'tcx, ScalarInt> { - self.try_to_int().map_err(|_| err_unsup!(ReadPointerAsInt(None)).into()) + self.try_to_scalar_int().map_err(|_| err_unsup!(ReadPointerAsInt(None)).into()) } #[inline(always)] #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) - pub fn assert_int(self) -> ScalarInt { - self.try_to_int().unwrap() + pub fn assert_scalar_int(self) -> ScalarInt { + self.try_to_scalar_int().expect("got a pointer where a ScalarInt was expected") } /// This throws UB (instead of ICEing) on a size mismatch since size mismatches can arise in @@ -330,13 +322,6 @@ impl<'tcx, Prov: Provenance> Scalar { }) } - #[inline(always)] - #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) - pub fn assert_bits(self, target_size: Size) -> u128 { - self.to_bits(target_size) - .unwrap_or_else(|_| panic!("assertion failed: {self:?} fits {target_size:?}")) - } - pub fn to_bool(self) -> InterpResult<'tcx, bool> { let val = self.to_u8()?; match val { diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs index 454897aa672..7c8b0ec671a 100644 --- a/compiler/rustc_middle/src/thir.rs +++ b/compiler/rustc_middle/src/thir.rs @@ -1033,8 +1033,8 @@ impl<'tcx> PatRangeBoundary<'tcx> { if let (Some(a), Some(b)) = (a.try_to_scalar_int(), b.try_to_scalar_int()) { let sz = ty.primitive_size(tcx); let cmp = match ty.kind() { - ty::Uint(_) | ty::Char => a.assert_uint(sz).cmp(&b.assert_uint(sz)), - ty::Int(_) => a.assert_int(sz).cmp(&b.assert_int(sz)), + ty::Uint(_) | ty::Char => a.to_uint(sz).cmp(&b.to_uint(sz)), + ty::Int(_) => a.to_int(sz).cmp(&b.to_int(sz)), _ => unreachable!(), }; return Some(cmp); diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs index cc1daeb6419..12f0c38b054 100644 --- a/compiler/rustc_middle/src/ty/consts.rs +++ b/compiler/rustc_middle/src/ty/consts.rs @@ -376,7 +376,7 @@ impl<'tcx> Const<'tcx> { param_env: ParamEnv<'tcx>, ) -> Option<(Ty<'tcx>, ScalarInt)> { let (ty, scalar) = self.try_eval_scalar(tcx, param_env)?; - let val = scalar.try_to_int().ok()?; + let val = scalar.try_to_scalar_int().ok()?; Some((ty, val)) } @@ -388,7 +388,7 @@ impl<'tcx> Const<'tcx> { let (ty, scalar) = self.try_eval_scalar_int(tcx, param_env)?; let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size; // if `ty` does not depend on generic parameters, use an empty param_env - scalar.try_to_bits(size).ok() + Some(scalar.to_bits(size)) } #[inline] @@ -405,7 +405,7 @@ impl<'tcx> Const<'tcx> { param_env: ParamEnv<'tcx>, ) -> Option { let (_, scalar) = self.try_eval_scalar_int(tcx, param_env)?; - scalar.try_to_target_usize(tcx).ok() + Some(scalar.to_target_usize(tcx)) } #[inline] diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs index 40ac87873a0..52320dd141b 100644 --- a/compiler/rustc_middle/src/ty/consts/int.rs +++ b/compiler/rustc_middle/src/ty/consts/int.rs @@ -246,6 +246,10 @@ impl ScalarInt { Self::try_from_uint(i, tcx.data_layout.pointer_size) } + /// Try to convert this ScalarInt to the raw underlying bits. + /// Fails if the size is wrong. Generally a wrong size should lead to a panic, + /// but Miri sometimes wants to be resilient to size mismatches, + /// so the interpreter will generally use this `try` method. #[inline] pub fn try_to_bits(self, target_size: Size) -> Result { assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); @@ -258,165 +262,149 @@ impl ScalarInt { } #[inline] - pub fn assert_bits(self, target_size: Size) -> u128 { + pub fn to_bits(self, target_size: Size) -> u128 { self.try_to_bits(target_size).unwrap_or_else(|size| { bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes()) }) } - /// Tries to convert the `ScalarInt` to an unsigned integer of the given size. - /// Fails if the size of the `ScalarInt` is not equal to `size` and returns the - /// `ScalarInt`s size in that case. + /// Extracts the bits from the scalar without checking the size. #[inline] - pub fn try_to_uint(self, size: Size) -> Result { - self.try_to_bits(size) + pub fn to_bits_unchecked(self) -> u128 { + self.check_data(); + self.data + } + + /// Converts the `ScalarInt` to an unsigned integer of the given size. + /// Panics if the size of the `ScalarInt` is not equal to `size`. + #[inline] + pub fn to_uint(self, size: Size) -> u128 { + self.to_bits(size) + } + + /// Converts the `ScalarInt` to `u8`. + /// Panics if the `size` of the `ScalarInt`in not equal to 1 byte. + #[inline] + pub fn to_u8(self) -> u8 { + self.to_uint(Size::from_bits(8)).try_into().unwrap() + } + + /// Converts the `ScalarInt` to `u16`. + /// Panics if the size of the `ScalarInt` in not equal to 2 bytes. + #[inline] + pub fn to_u16(self) -> u16 { + self.to_uint(Size::from_bits(16)).try_into().unwrap() + } + + /// Converts the `ScalarInt` to `u32`. + /// Panics if the `size` of the `ScalarInt` in not equal to 4 bytes. + #[inline] + pub fn to_u32(self) -> u32 { + self.to_uint(Size::from_bits(32)).try_into().unwrap() + } + + /// Converts the `ScalarInt` to `u64`. + /// Panics if the `size` of the `ScalarInt` in not equal to 8 bytes. + #[inline] + pub fn to_u64(self) -> u64 { + self.to_uint(Size::from_bits(64)).try_into().unwrap() + } + + /// Converts the `ScalarInt` to `u128`. + /// Panics if the `size` of the `ScalarInt` in not equal to 16 bytes. + #[inline] + pub fn to_u128(self) -> u128 { + self.to_uint(Size::from_bits(128)) } #[inline] - pub fn assert_uint(self, size: Size) -> u128 { - self.assert_bits(size) + pub fn to_target_usize(&self, tcx: TyCtxt<'_>) -> u64 { + self.to_uint(tcx.data_layout.pointer_size).try_into().unwrap() } - // Tries to convert the `ScalarInt` to `u8`. Fails if the `size` of the `ScalarInt` - // in not equal to 1 byte and returns the `size` value of the `ScalarInt` in - // that case. + /// Converts the `ScalarInt` to `bool`. + /// Panics if the `size` of the `ScalarInt` is not equal to 1 byte. + /// Errors if it is not a valid `bool`. #[inline] - pub fn try_to_u8(self) -> Result { - self.try_to_uint(Size::from_bits(8)).map(|v| u8::try_from(v).unwrap()) - } - - /// Tries to convert the `ScalarInt` to `u16`. Fails if the size of the `ScalarInt` - /// in not equal to 2 bytes and returns the `size` value of the `ScalarInt` in - /// that case. - #[inline] - pub fn try_to_u16(self) -> Result { - self.try_to_uint(Size::from_bits(16)).map(|v| u16::try_from(v).unwrap()) - } - - /// Tries to convert the `ScalarInt` to `u32`. Fails if the `size` of the `ScalarInt` - /// in not equal to 4 bytes and returns the `size` value of the `ScalarInt` in - /// that case. - #[inline] - pub fn try_to_u32(self) -> Result { - self.try_to_uint(Size::from_bits(32)).map(|v| u32::try_from(v).unwrap()) - } - - /// Tries to convert the `ScalarInt` to `u64`. Fails if the `size` of the `ScalarInt` - /// in not equal to 8 bytes and returns the `size` value of the `ScalarInt` in - /// that case. - #[inline] - pub fn try_to_u64(self) -> Result { - self.try_to_uint(Size::from_bits(64)).map(|v| u64::try_from(v).unwrap()) - } - - /// Tries to convert the `ScalarInt` to `u128`. Fails if the `size` of the `ScalarInt` - /// in not equal to 16 bytes and returns the `size` value of the `ScalarInt` in - /// that case. - #[inline] - pub fn try_to_u128(self) -> Result { - self.try_to_uint(Size::from_bits(128)) - } - - #[inline] - pub fn try_to_target_usize(&self, tcx: TyCtxt<'_>) -> Result { - self.try_to_uint(tcx.data_layout.pointer_size).map(|v| u64::try_from(v).unwrap()) - } - - // Tries to convert the `ScalarInt` to `bool`. Fails if the `size` of the `ScalarInt` - // in not equal to 1 byte or if the value is not 0 or 1 and returns the `size` - // value of the `ScalarInt` in that case. - #[inline] - pub fn try_to_bool(self) -> Result { - match self.try_to_u8()? { + pub fn try_to_bool(self) -> Result { + match self.to_u8() { 0 => Ok(false), 1 => Ok(true), - _ => Err(self.size()), + _ => Err(()), } } - /// Tries to convert the `ScalarInt` to a signed integer of the given size. - /// Fails if the size of the `ScalarInt` is not equal to `size` and returns the - /// `ScalarInt`s size in that case. + /// Converts the `ScalarInt` to a signed integer of the given size. + /// Panics if the size of the `ScalarInt` is not equal to `size`. #[inline] - pub fn try_to_int(self, size: Size) -> Result { - let b = self.try_to_bits(size)?; - Ok(size.sign_extend(b) as i128) - } - - #[inline] - pub fn assert_int(self, size: Size) -> i128 { - let b = self.assert_bits(size); + pub fn to_int(self, size: Size) -> i128 { + let b = self.to_bits(size); size.sign_extend(b) as i128 } - /// Tries to convert the `ScalarInt` to i8. - /// Fails if the size of the `ScalarInt` is not equal to 1 byte - /// and returns the `ScalarInt`s size in that case. - pub fn try_to_i8(self) -> Result { - self.try_to_int(Size::from_bits(8)).map(|v| i8::try_from(v).unwrap()) + /// Converts the `ScalarInt` to i8. + /// Panics if the size of the `ScalarInt` is not equal to 1 byte. + pub fn to_i8(self) -> i8 { + self.to_int(Size::from_bits(8)).try_into().unwrap() } - /// Tries to convert the `ScalarInt` to i16. - /// Fails if the size of the `ScalarInt` is not equal to 2 bytes - /// and returns the `ScalarInt`s size in that case. - pub fn try_to_i16(self) -> Result { - self.try_to_int(Size::from_bits(16)).map(|v| i16::try_from(v).unwrap()) + /// Converts the `ScalarInt` to i16. + /// Panics if the size of the `ScalarInt` is not equal to 2 bytes. + pub fn to_i16(self) -> i16 { + self.to_int(Size::from_bits(16)).try_into().unwrap() } - /// Tries to convert the `ScalarInt` to i32. - /// Fails if the size of the `ScalarInt` is not equal to 4 bytes - /// and returns the `ScalarInt`s size in that case. - pub fn try_to_i32(self) -> Result { - self.try_to_int(Size::from_bits(32)).map(|v| i32::try_from(v).unwrap()) + /// Converts the `ScalarInt` to i32. + /// Panics if the size of the `ScalarInt` is not equal to 4 bytes. + pub fn to_i32(self) -> i32 { + self.to_int(Size::from_bits(32)).try_into().unwrap() } - /// Tries to convert the `ScalarInt` to i64. - /// Fails if the size of the `ScalarInt` is not equal to 8 bytes - /// and returns the `ScalarInt`s size in that case. - pub fn try_to_i64(self) -> Result { - self.try_to_int(Size::from_bits(64)).map(|v| i64::try_from(v).unwrap()) + /// Converts the `ScalarInt` to i64. + /// Panics if the size of the `ScalarInt` is not equal to 8 bytes. + pub fn to_i64(self) -> i64 { + self.to_int(Size::from_bits(64)).try_into().unwrap() } - /// Tries to convert the `ScalarInt` to i128. - /// Fails if the size of the `ScalarInt` is not equal to 16 bytes - /// and returns the `ScalarInt`s size in that case. - pub fn try_to_i128(self) -> Result { - self.try_to_int(Size::from_bits(128)) + /// Converts the `ScalarInt` to i128. + /// Panics if the size of the `ScalarInt` is not equal to 16 bytes. + pub fn to_i128(self) -> i128 { + self.to_int(Size::from_bits(128)) } #[inline] - pub fn try_to_target_isize(&self, tcx: TyCtxt<'_>) -> Result { - self.try_to_int(tcx.data_layout.pointer_size).map(|v| i64::try_from(v).unwrap()) + pub fn to_target_isize(&self, tcx: TyCtxt<'_>) -> i64 { + self.to_int(tcx.data_layout.pointer_size).try_into().unwrap() } #[inline] - pub fn try_to_float(self) -> Result { + pub fn to_float(self) -> F { // Going through `to_uint` to check size and truncation. - Ok(F::from_bits(self.try_to_bits(Size::from_bits(F::BITS))?)) + F::from_bits(self.to_bits(Size::from_bits(F::BITS))) } #[inline] - pub fn try_to_f16(self) -> Result { - self.try_to_float() + pub fn to_f16(self) -> Half { + self.to_float() } #[inline] - pub fn try_to_f32(self) -> Result { - self.try_to_float() + pub fn to_f32(self) -> Single { + self.to_float() } #[inline] - pub fn try_to_f64(self) -> Result { - self.try_to_float() + pub fn to_f64(self) -> Double { + self.to_float() } #[inline] - pub fn try_to_f128(self) -> Result { - self.try_to_float() + pub fn to_f128(self) -> Quad { + self.to_float() } } -macro_rules! from { +macro_rules! from_x_for_scalar_int { ($($ty:ty),*) => { $( impl From<$ty> for ScalarInt { @@ -432,30 +420,29 @@ macro_rules! from { } } -macro_rules! try_from { +macro_rules! from_scalar_int_for_x { ($($ty:ty),*) => { $( - impl TryFrom for $ty { - type Error = Size; + impl From for $ty { #[inline] - fn try_from(int: ScalarInt) -> Result { + fn from(int: ScalarInt) -> Self { // The `unwrap` cannot fail because to_bits (if it succeeds) // is guaranteed to return a value that fits into the size. - int.try_to_bits(Size::from_bytes(std::mem::size_of::<$ty>())) - .map(|u| u.try_into().unwrap()) + int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>())) + .try_into().unwrap() } } )* } } -from!(u8, u16, u32, u64, u128, bool); -try_from!(u8, u16, u32, u64, u128); +from_x_for_scalar_int!(u8, u16, u32, u64, u128, bool); +from_scalar_int_for_x!(u8, u16, u32, u64, u128); impl TryFrom for bool { - type Error = Size; + type Error = (); #[inline] - fn try_from(int: ScalarInt) -> Result { + fn try_from(int: ScalarInt) -> Result { int.try_to_bool() } } @@ -463,7 +450,7 @@ impl TryFrom for bool { impl From for ScalarInt { #[inline] fn from(c: char) -> Self { - Self { data: c as u128, size: NonZero::new(std::mem::size_of::() as u8).unwrap() } + (c as u32).into() } } @@ -476,10 +463,7 @@ impl TryFrom for char { #[inline] fn try_from(int: ScalarInt) -> Result { - let Ok(bits) = int.try_to_bits(Size::from_bytes(std::mem::size_of::())) else { - return Err(CharTryFromScalarInt); - }; - match char::from_u32(bits.try_into().unwrap()) { + match char::from_u32(int.to_u32()) { Some(c) => Ok(c), None => Err(CharTryFromScalarInt), } @@ -494,11 +478,10 @@ impl From for ScalarInt { } } -impl TryFrom for Half { - type Error = Size; +impl From for Half { #[inline] - fn try_from(int: ScalarInt) -> Result { - int.try_to_bits(Size::from_bytes(2)).map(Self::from_bits) + fn from(int: ScalarInt) -> Self { + Self::from_bits(int.to_bits(Size::from_bytes(2))) } } @@ -510,11 +493,10 @@ impl From for ScalarInt { } } -impl TryFrom for Single { - type Error = Size; +impl From for Single { #[inline] - fn try_from(int: ScalarInt) -> Result { - int.try_to_bits(Size::from_bytes(4)).map(Self::from_bits) + fn from(int: ScalarInt) -> Self { + Self::from_bits(int.to_bits(Size::from_bytes(4))) } } @@ -526,11 +508,10 @@ impl From for ScalarInt { } } -impl TryFrom for Double { - type Error = Size; +impl From for Double { #[inline] - fn try_from(int: ScalarInt) -> Result { - int.try_to_bits(Size::from_bytes(8)).map(Self::from_bits) + fn from(int: ScalarInt) -> Self { + Self::from_bits(int.to_bits(Size::from_bytes(8))) } } @@ -542,11 +523,10 @@ impl From for ScalarInt { } } -impl TryFrom for Quad { - type Error = Size; +impl From for Quad { #[inline] - fn try_from(int: ScalarInt) -> Result { - int.try_to_bits(Size::from_bytes(16)).map(Self::from_bits) + fn from(int: ScalarInt) -> Self { + Self::from_bits(int.to_bits(Size::from_bytes(16))) } } diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs index 96bc5515a56..efc91357af8 100644 --- a/compiler/rustc_middle/src/ty/consts/valtree.rs +++ b/compiler/rustc_middle/src/ty/consts/valtree.rs @@ -79,7 +79,7 @@ impl<'tcx> ValTree<'tcx> { } pub fn try_to_target_usize(self, tcx: TyCtxt<'tcx>) -> Option { - self.try_to_scalar_int().and_then(|s| s.try_to_target_usize(tcx).ok()) + self.try_to_scalar_int().map(|s| s.to_target_usize(tcx)) } /// Get the values inside the ValTree as a slice of bytes. This only works for @@ -100,8 +100,9 @@ impl<'tcx> ValTree<'tcx> { _ => return None, } - Some(tcx.arena.alloc_from_iter( - self.unwrap_branch().into_iter().map(|v| v.unwrap_leaf().try_to_u8().unwrap()), - )) + Some( + tcx.arena + .alloc_from_iter(self.unwrap_branch().into_iter().map(|v| v.unwrap_leaf().to_u8())), + ) } } diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 49d46eb3c4b..662eafd0ccb 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -1652,7 +1652,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { if let ty::ConstKind::Value(_, ty::ValTree::Leaf(int)) = len.kind() { match self.tcx().try_get_global_alloc(prov.alloc_id()) { Some(GlobalAlloc::Memory(alloc)) => { - let len = int.assert_bits(self.tcx().data_layout.pointer_size); + let len = int.to_bits(self.tcx().data_layout.pointer_size); let range = AllocRange { start: offset, size: Size::from_bytes(len) }; if let Ok(byte_str) = @@ -1730,7 +1730,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { } // Pointer types ty::Ref(..) | ty::RawPtr(_, _) | ty::FnPtr(_) => { - let data = int.assert_bits(self.tcx().data_layout.pointer_size); + let data = int.to_bits(self.tcx().data_layout.pointer_size); self.typed_value( |this| { write!(this, "0x{data:x}")?; diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs index 193f0d124bb..601e5d4d3dc 100644 --- a/compiler/rustc_mir_build/src/build/mod.rs +++ b/compiler/rustc_mir_build/src/build/mod.rs @@ -15,11 +15,10 @@ use rustc_index::{Idx, IndexSlice, IndexVec}; use rustc_infer::infer::{InferCtxt, TyCtxtInferExt}; use rustc_middle::hir::place::PlaceBase as HirPlaceBase; use rustc_middle::middle::region; -use rustc_middle::mir::interpret::Scalar; use rustc_middle::mir::*; use rustc_middle::query::TyCtxtAt; use rustc_middle::thir::{self, ExprId, LintLevel, LocalVarId, Param, ParamId, PatKind, Thir}; -use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt}; +use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt, TypeVisitableExt}; use rustc_middle::{bug, span_bug}; use rustc_span::symbol::sym; use rustc_span::Span; @@ -1014,14 +1013,14 @@ fn parse_float_into_constval<'tcx>( float_ty: ty::FloatTy, neg: bool, ) -> Option> { - parse_float_into_scalar(num, float_ty, neg).map(ConstValue::Scalar) + parse_float_into_scalar(num, float_ty, neg).map(|s| ConstValue::Scalar(s.into())) } pub(crate) fn parse_float_into_scalar( num: Symbol, float_ty: ty::FloatTy, neg: bool, -) -> Option { +) -> Option { let num = num.as_str(); match float_ty { // FIXME(f16_f128): When available, compare to the library parser as with `f32` and `f64` @@ -1030,7 +1029,7 @@ pub(crate) fn parse_float_into_scalar( if neg { f = -f; } - Some(Scalar::from_f16(f)) + Some(ScalarInt::from(f)) } ty::FloatTy::F32 => { let Ok(rust_f) = num.parse::() else { return None }; @@ -1053,7 +1052,7 @@ pub(crate) fn parse_float_into_scalar( f = -f; } - Some(Scalar::from_f32(f)) + Some(ScalarInt::from(f)) } ty::FloatTy::F64 => { let Ok(rust_f) = num.parse::() else { return None }; @@ -1076,7 +1075,7 @@ pub(crate) fn parse_float_into_scalar( f = -f; } - Some(Scalar::from_f64(f)) + Some(ScalarInt::from(f)) } // FIXME(f16_f128): When available, compare to the library parser as with `f32` and `f64` ty::FloatTy::F128 => { @@ -1084,7 +1083,7 @@ pub(crate) fn parse_float_into_scalar( if neg { f = -f; } - Some(Scalar::from_f128(f)) + Some(ScalarInt::from(f)) } } } diff --git a/compiler/rustc_mir_build/src/thir/constant.rs b/compiler/rustc_mir_build/src/thir/constant.rs index 31bc72184ca..a98e046d4dc 100644 --- a/compiler/rustc_mir_build/src/thir/constant.rs +++ b/compiler/rustc_mir_build/src/thir/constant.rs @@ -58,11 +58,9 @@ pub(crate) fn lit_to_const<'tcx>( } (ast::LitKind::Bool(b), ty::Bool) => ty::ValTree::from_scalar_int((*b).into()), (ast::LitKind::Float(n, _), ty::Float(fty)) => { - let bits = parse_float_into_scalar(*n, *fty, neg) - .ok_or_else(|| { - tcx.dcx().bug(format!("couldn't parse float literal: {:?}", lit_input.lit)) - })? - .assert_int(); + let bits = parse_float_into_scalar(*n, *fty, neg).ok_or_else(|| { + tcx.dcx().bug(format!("couldn't parse float literal: {:?}", lit_input.lit)) + })?; ty::ValTree::from_scalar_int(bits) } (ast::LitKind::Char(c), ty::Char) => ty::ValTree::from_scalar_int((*c).into()), diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs index 36495101d3f..192d706bce2 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs @@ -282,8 +282,7 @@ impl<'tcx> ConstToPat<'tcx> { } ty::Adt(adt_def, args) if adt_def.is_enum() => { let (&variant_index, fields) = cv.unwrap_branch().split_first().unwrap(); - let variant_index = - VariantIdx::from_u32(variant_index.unwrap_leaf().try_to_u32().ok().unwrap()); + let variant_index = VariantIdx::from_u32(variant_index.unwrap_leaf().to_u32()); PatKind::Variant { adt_def: *adt_def, args, @@ -371,8 +370,8 @@ impl<'tcx> ConstToPat<'tcx> { let v = cv.unwrap_leaf(); let is_nan = match flt { ty::FloatTy::F16 => unimplemented!("f16_f128"), - ty::FloatTy::F32 => v.try_to_f32().unwrap().is_nan(), - ty::FloatTy::F64 => v.try_to_f64().unwrap().is_nan(), + ty::FloatTy::F32 => v.to_f32().is_nan(), + ty::FloatTy::F64 => v.to_f64().is_nan(), ty::FloatTy::F128 => unimplemented!("f16_f128"), }; if is_nan { diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index eba5d13d33f..0fd85eb345d 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -326,7 +326,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> { // This allows the set of visited edges to grow monotonically with the lattice. FlatSet::Bottom => TerminatorEdges::None, FlatSet::Elem(scalar) => { - let choice = scalar.assert_bits(scalar.size()); + let choice = scalar.assert_scalar_int().to_bits_unchecked(); TerminatorEdges::Single(targets.target_for_value(choice)) } FlatSet::Top => TerminatorEdges::SwitchInt { discr, targets }, @@ -609,7 +609,7 @@ fn propagatable_scalar( map: &Map, ) -> Option { if let FlatSet::Elem(value) = state.get_idx(place, map) - && value.try_to_int().is_ok() + && value.try_to_scalar_int().is_ok() { // Do not attempt to propagate pointers, as we may fail to preserve their identity. Some(value) @@ -670,7 +670,7 @@ fn try_write_constant<'tcx>( let FlatSet::Elem(Scalar::Int(discr)) = state.get_idx(discr, map) else { throw_machine_stop_str!("discriminant with provenance") }; - let discr_bits = discr.assert_bits(discr.size()); + let discr_bits = discr.to_bits(discr.size()); let Some((variant, _)) = def.discriminants(*ecx.tcx).find(|(_, var)| discr_bits == var.val) else { throw_machine_stop_str!("illegal discriminant for enum") }; diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index ebfb372329e..ab7b210b4d0 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -471,7 +471,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let slice = self.evaluated[slice].as_ref()?; let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap(); let len = slice.len(&self.ecx).ok()?; - let imm = ImmTy::try_from_uint(len, usize_layout)?; + let imm = ImmTy::from_uint(len, usize_layout); imm.into() } NullaryOp(null_op, ty) => { @@ -492,7 +492,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { NullOp::UbChecks => return None, }; let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap(); - let imm = ImmTy::try_from_uint(val, usize_layout)?; + let imm = ImmTy::from_uint(val, usize_layout); imm.into() } UnaryOp(un_op, operand) => { @@ -1180,7 +1180,7 @@ fn op_to_prop_const<'tcx>( // If this constant has scalar ABI, return it as a `ConstValue::Scalar`. if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi && let Ok(scalar) = ecx.read_scalar(op) - && scalar.try_to_int().is_ok() + && scalar.try_to_scalar_int().is_ok() { return Some(ConstValue::Scalar(scalar)); } diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index 47bbddbc31d..6a20b46e7f9 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -356,15 +356,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { debug!("check_binary_op: reporting assert for {:?}", location); let panic = AssertKind::Overflow( op, - match l { - Some(l) => l.to_const_int(), - // Invent a dummy value, the diagnostic ignores it anyway - None => ConstInt::new( - ScalarInt::try_from_uint(1_u8, left_size).unwrap(), - left_ty.is_signed(), - left_ty.is_ptr_sized_integral(), - ), - }, + // Invent a dummy value, the diagnostic ignores it anyway + ConstInt::new( + ScalarInt::try_from_uint(1_u8, left_size).unwrap(), + left_ty.is_signed(), + left_ty.is_ptr_sized_integral(), + ), r.to_const_int(), ); self.report_assert_as_lint(location, AssertLintKind::ArithmeticOverflow, panic); @@ -787,8 +784,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { TerminatorKind::SwitchInt { ref discr, ref targets } => { if let Some(ref value) = self.eval_operand(discr) && let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value)) - && let Ok(constant) = value_const.try_to_int() - && let Ok(constant) = constant.try_to_bits(constant.size()) + && let Ok(constant) = value_const.to_bits(value_const.size()) { // We managed to evaluate the discriminant, so we know we only need to visit // one target. diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs index 1411d9be223..6ab4ec6fe7e 100644 --- a/compiler/rustc_mir_transform/src/match_branches.rs +++ b/compiler/rustc_mir_transform/src/match_branches.rs @@ -372,7 +372,7 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp { } fn int_equal(l: ScalarInt, r: impl Into, size: Size) -> bool { - l.assert_int(l.size()) == ScalarInt::try_from_uint(r, size).unwrap().assert_int(size) + l.to_bits_unchecked() == ScalarInt::try_from_uint(r, size).unwrap().to_bits_unchecked() } // We first compare the two branches, and then the other branches need to fulfill the same conditions. diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index 7ec59cc983f..ecd1179ca99 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -500,14 +500,14 @@ impl<'tcx> Validator<'_, 'tcx> { } _ => None, }; - match rhs_val.map(|x| x.assert_uint(sz)) { + match rhs_val.map(|x| x.to_uint(sz)) { // for the zero test, int vs uint does not matter Some(x) if x != 0 => {} // okay _ => return Err(Unpromotable), // value not known or 0 -- not okay } // Furthermore, for signed divison, we also have to exclude `int::MIN / -1`. if lhs_ty.is_signed() { - match rhs_val.map(|x| x.assert_int(sz)) { + match rhs_val.map(|x| x.to_int(sz)) { Some(-1) | None => { // The RHS is -1 or unknown, so we have to be careful. // But is the LHS int::MIN? @@ -518,7 +518,7 @@ impl<'tcx> Validator<'_, 'tcx> { _ => None, }; let lhs_min = sz.signed_int_min(); - match lhs_val.map(|x| x.assert_int(sz)) { + match lhs_val.map(|x| x.to_int(sz)) { Some(x) if x != lhs_min => {} // okay _ => return Err(Unpromotable), // value not known or int::MIN -- not okay } diff --git a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs index 03907babf2b..e174cccdad6 100644 --- a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs +++ b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs @@ -49,7 +49,7 @@ impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral { let layout = tcx .layout_of(param_env.and(opt.branch_value_ty)) .expect("if we have an evaluated constant we must know the layout"); - int.assert_bits(layout.size) + int.to_bits(layout.size) } Scalar::Ptr(..) => continue, }; diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs index 3b4d4c93877..586c1254995 100644 --- a/compiler/rustc_mir_transform/src/validate.rs +++ b/compiler/rustc_mir_transform/src/validate.rs @@ -5,12 +5,12 @@ use rustc_index::bit_set::BitSet; use rustc_index::IndexVec; use rustc_infer::traits::Reveal; use rustc_middle::mir::coverage::CoverageKind; -use rustc_middle::mir::interpret::Scalar; use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor}; use rustc_middle::mir::*; use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::{ - self, CoroutineArgsExt, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt, Variance, + self, CoroutineArgsExt, InstanceDef, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt, + Variance, }; use rustc_middle::{bug, span_bug}; use rustc_target::abi::{Size, FIRST_VARIANT}; @@ -1478,7 +1478,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { }); for (value, _) in targets.iter() { - if Scalar::<()>::try_from_uint(value, size).is_none() { + if ScalarInt::try_from_uint(value, size).is_none() { self.fail( location, format!("the value {value:#x} is not a proper {switch_ty:?}"), diff --git a/compiler/rustc_pattern_analysis/src/rustc.rs b/compiler/rustc_pattern_analysis/src/rustc.rs index 81c5f355231..8391c694c64 100644 --- a/compiler/rustc_pattern_analysis/src/rustc.rs +++ b/compiler/rustc_pattern_analysis/src/rustc.rs @@ -6,11 +6,12 @@ use rustc_hir::def_id::DefId; use rustc_hir::HirId; use rustc_index::{Idx, IndexVec}; use rustc_middle::middle::stability::EvalResult; -use rustc_middle::mir::interpret::Scalar; use rustc_middle::mir::{self, Const}; use rustc_middle::thir::{self, FieldPat, Pat, PatKind, PatRange, PatRangeBoundary}; use rustc_middle::ty::layout::IntegerExt; -use rustc_middle::ty::{self, FieldDef, OpaqueTypeKey, Ty, TyCtxt, TypeVisitableExt, VariantDef}; +use rustc_middle::ty::{ + self, FieldDef, OpaqueTypeKey, ScalarInt, Ty, TyCtxt, TypeVisitableExt, VariantDef, +}; use rustc_middle::{bug, span_bug}; use rustc_session::lint; use rustc_span::{ErrorGuaranteed, Span, DUMMY_SP}; @@ -701,9 +702,9 @@ impl<'p, 'tcx: 'p> RustcPatCtxt<'p, 'tcx> { ty::Int(_) => miint.as_finite_int(size.bits()).unwrap(), _ => miint.as_finite_uint().unwrap(), }; - match Scalar::try_from_uint(bits, size) { + match ScalarInt::try_from_uint(bits, size) { Some(scalar) => { - let value = mir::Const::from_scalar(tcx, scalar, ty.inner()); + let value = mir::Const::from_scalar(tcx, scalar.into(), ty.inner()); PatRangeBoundary::Finite(value) } // The value doesn't fit. Since `x >= 0` and 0 always encodes the minimum value diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs index 604b68d2cd4..eae1a9dfaa2 100644 --- a/compiler/rustc_transmute/src/layout/tree.rs +++ b/compiler/rustc_transmute/src/layout/tree.rs @@ -420,7 +420,7 @@ pub(crate) mod rustc { fn from_tag(tag: ScalarInt, tcx: TyCtxt<'tcx>) -> Self { use rustc_target::abi::Endian; let size = tag.size(); - let bits = tag.assert_bits(size); + let bits = tag.to_bits(size); let bytes: [u8; 16]; let bytes = match tcx.data_layout.endian { Endian::Little => { diff --git a/compiler/rustc_ty_utils/src/consts.rs b/compiler/rustc_ty_utils/src/consts.rs index 1aec40e95f6..58f812fc7cf 100644 --- a/compiler/rustc_ty_utils/src/consts.rs +++ b/compiler/rustc_ty_utils/src/consts.rs @@ -47,7 +47,7 @@ fn destructure_const<'tcx>( ty::Adt(def, args) => { let (variant_idx, branches) = if def.is_enum() { let (head, rest) = branches.split_first().unwrap(); - (VariantIdx::from_u32(head.unwrap_leaf().try_to_u32().unwrap()), rest) + (VariantIdx::from_u32(head.unwrap_leaf().to_u32()), rest) } else { (FIRST_VARIANT, branches) }; diff --git a/src/librustdoc/clean/utils.rs b/src/librustdoc/clean/utils.rs index 7fc3d4508d7..7c83d438719 100644 --- a/src/librustdoc/clean/utils.rs +++ b/src/librustdoc/clean/utils.rs @@ -431,8 +431,7 @@ fn print_const_with_custom_print_scalar<'tcx>( (mir::Const::Val(mir::ConstValue::Scalar(int), _), ty::Int(i)) => { let ty = ct.ty(); let size = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size; - let data = int.assert_bits(size); - let sign_extended_data = size.sign_extend(data) as i128; + let sign_extended_data = int.assert_scalar_int().to_int(size); let mut output = if with_underscores { format_integer_with_underscore_sep(&sign_extended_data.to_string()) } else { diff --git a/src/tools/clippy/clippy_lints/src/large_const_arrays.rs b/src/tools/clippy/clippy_lints/src/large_const_arrays.rs index 77d05020c82..7f8197c0cc0 100644 --- a/src/tools/clippy/clippy_lints/src/large_const_arrays.rs +++ b/src/tools/clippy/clippy_lints/src/large_const_arrays.rs @@ -55,7 +55,7 @@ impl<'tcx> LateLintPass<'tcx> for LargeConstArrays { && let ty = cx.tcx.type_of(item.owner_id).instantiate_identity() && let ty::Array(element_type, cst) = ty.kind() && let ConstKind::Value(_, ty::ValTree::Leaf(element_count)) = cst.kind() - && let Ok(element_count) = element_count.try_to_target_usize(cx.tcx) + && let element_count = element_count.to_target_usize(cx.tcx) && let Ok(element_size) = cx.layout_of(*element_type).map(|l| l.size.bytes()) && self.maximum_allowed_size < u128::from(element_count) * u128::from(element_size) { diff --git a/src/tools/clippy/clippy_lints/src/large_stack_arrays.rs b/src/tools/clippy/clippy_lints/src/large_stack_arrays.rs index f0f3f53647b..c9bfc9c85d9 100644 --- a/src/tools/clippy/clippy_lints/src/large_stack_arrays.rs +++ b/src/tools/clippy/clippy_lints/src/large_stack_arrays.rs @@ -65,7 +65,7 @@ impl<'tcx> LateLintPass<'tcx> for LargeStackArrays { && !self.is_from_vec_macro(cx, expr.span) && let ty::Array(element_type, cst) = cx.typeck_results().expr_ty(expr).kind() && let ConstKind::Value(_, ty::ValTree::Leaf(element_count)) = cst.kind() - && let Ok(element_count) = element_count.try_to_target_usize(cx.tcx) + && let element_count = element_count.to_target_usize(cx.tcx) && let Ok(element_size) = cx.layout_of(*element_type).map(|l| l.size.bytes()) && !cx.tcx.hir().parent_iter(expr.hir_id).any(|(_, node)| { matches!( diff --git a/src/tools/clippy/clippy_lints/src/non_copy_const.rs b/src/tools/clippy/clippy_lints/src/non_copy_const.rs index 76d9cee18aa..20a97645af9 100644 --- a/src/tools/clippy/clippy_lints/src/non_copy_const.rs +++ b/src/tools/clippy/clippy_lints/src/non_copy_const.rs @@ -199,7 +199,7 @@ impl<'tcx> NonCopyConst<'tcx> { .any(|field| Self::is_value_unfrozen_raw_inner(cx, *field, ty)), ty::Adt(def, args) if def.is_enum() => { let (&variant_index, fields) = val.unwrap_branch().split_first().unwrap(); - let variant_index = VariantIdx::from_u32(variant_index.unwrap_leaf().try_to_u32().ok().unwrap()); + let variant_index = VariantIdx::from_u32(variant_index.unwrap_leaf().to_u32()); fields .iter() .copied() diff --git a/src/tools/clippy/clippy_lints/src/zero_repeat_side_effects.rs b/src/tools/clippy/clippy_lints/src/zero_repeat_side_effects.rs index 848b49130dc..8796b8f61d1 100644 --- a/src/tools/clippy/clippy_lints/src/zero_repeat_side_effects.rs +++ b/src/tools/clippy/clippy_lints/src/zero_repeat_side_effects.rs @@ -56,8 +56,7 @@ impl LateLintPass<'_> for ZeroRepeatSideEffects { } else if let ExprKind::Repeat(inner_expr, _) = expr.kind && let ty::Array(_, cst) = cx.typeck_results().expr_ty(expr).kind() && let ConstKind::Value(_, ty::ValTree::Leaf(element_count)) = cst.kind() - && let Ok(element_count) = element_count.try_to_target_usize(cx.tcx) - && element_count == 0 + && element_count.to_target_usize(cx.tcx) == 0 { inner_check(cx, expr, inner_expr, false); } diff --git a/src/tools/clippy/clippy_utils/src/consts.rs b/src/tools/clippy/clippy_utils/src/consts.rs index 5c9cad2b45d..e9e1aa7e445 100644 --- a/src/tools/clippy/clippy_utils/src/consts.rs +++ b/src/tools/clippy/clippy_utils/src/consts.rs @@ -810,14 +810,14 @@ pub fn mir_to_const<'tcx>(lcx: &LateContext<'tcx>, result: mir::Const<'tcx>) -> (ConstValue::Scalar(Scalar::Int(int)), _) => match result.ty().kind() { ty::Adt(adt_def, _) if adt_def.is_struct() => Some(Constant::Adt(result)), ty::Bool => Some(Constant::Bool(int == ScalarInt::TRUE)), - ty::Uint(_) | ty::Int(_) => Some(Constant::Int(int.assert_bits(int.size()))), + ty::Uint(_) | ty::Int(_) => Some(Constant::Int(int.to_bits(int.size()))), ty::Float(FloatTy::F32) => Some(Constant::F32(f32::from_bits( int.try_into().expect("invalid f32 bit representation"), ))), ty::Float(FloatTy::F64) => Some(Constant::F64(f64::from_bits( int.try_into().expect("invalid f64 bit representation"), ))), - ty::RawPtr(_, _) => Some(Constant::RawPtr(int.assert_bits(int.size()))), + ty::RawPtr(_, _) => Some(Constant::RawPtr(int.to_bits(int.size()))), _ => None, }, (_, ty::Ref(_, inner_ty, _)) if matches!(inner_ty.kind(), ty::Str) => { diff --git a/src/tools/clippy/clippy_utils/src/ty.rs b/src/tools/clippy/clippy_utils/src/ty.rs index f0dac6f5d9c..6e5626297c9 100644 --- a/src/tools/clippy/clippy_utils/src/ty.rs +++ b/src/tools/clippy/clippy_utils/src/ty.rs @@ -23,7 +23,7 @@ use rustc_middle::ty::{ }; use rustc_span::symbol::Ident; use rustc_span::{sym, Span, Symbol, DUMMY_SP}; -use rustc_target::abi::{Size, VariantIdx}; +use rustc_target::abi::VariantIdx; use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _; use rustc_trait_selection::traits::query::normalize::QueryNormalizeExt; use rustc_trait_selection::traits::{Obligation, ObligationCause}; @@ -865,22 +865,8 @@ impl core::ops::Add for EnumValue { pub fn read_explicit_enum_value(tcx: TyCtxt<'_>, id: DefId) -> Option { if let Ok(ConstValue::Scalar(Scalar::Int(value))) = tcx.const_eval_poly(id) { match tcx.type_of(id).instantiate_identity().kind() { - ty::Int(_) => Some(EnumValue::Signed(match value.size().bytes() { - 1 => i128::from(value.assert_bits(Size::from_bytes(1)) as u8 as i8), - 2 => i128::from(value.assert_bits(Size::from_bytes(2)) as u16 as i16), - 4 => i128::from(value.assert_bits(Size::from_bytes(4)) as u32 as i32), - 8 => i128::from(value.assert_bits(Size::from_bytes(8)) as u64 as i64), - 16 => value.assert_bits(Size::from_bytes(16)) as i128, - _ => return None, - })), - ty::Uint(_) => Some(EnumValue::Unsigned(match value.size().bytes() { - 1 => value.assert_bits(Size::from_bytes(1)), - 2 => value.assert_bits(Size::from_bytes(2)), - 4 => value.assert_bits(Size::from_bytes(4)), - 8 => value.assert_bits(Size::from_bytes(8)), - 16 => value.assert_bits(Size::from_bytes(16)), - _ => return None, - })), + ty::Int(_) => Some(EnumValue::Signed(value.to_int(value.size()))), + ty::Uint(_) => Some(EnumValue::Unsigned(value.to_uint(value.size()))), _ => None, } } else { diff --git a/src/tools/miri/src/intrinsics/simd.rs b/src/tools/miri/src/intrinsics/simd.rs index 8ba4964ff89..acdfc6ab67d 100644 --- a/src/tools/miri/src/intrinsics/simd.rs +++ b/src/tools/miri/src/intrinsics/simd.rs @@ -600,8 +600,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { for i in 0..dest_len { let src_index: u64 = index[usize::try_from(i).unwrap()] .unwrap_leaf() - .try_to_u32() - .unwrap() + .to_u32() .into(); let dest = this.project_index(&dest, i)?; diff --git a/src/tools/miri/src/shims/unix/socket.rs b/src/tools/miri/src/shims/unix/socket.rs index a0fa3bcee34..93ad7d24294 100644 --- a/src/tools/miri/src/shims/unix/socket.rs +++ b/src/tools/miri/src/shims/unix/socket.rs @@ -51,9 +51,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let fds = &mut this.machine.fds; let sv0 = fds.insert_fd(FileDescriptor::new(SocketPair)); - let sv0 = Scalar::try_from_int(sv0, sv.layout.size).unwrap(); + let sv0 = Scalar::from_int(sv0, sv.layout.size); let sv1 = fds.insert_fd(FileDescriptor::new(SocketPair)); - let sv1 = Scalar::try_from_int(sv1, sv.layout.size).unwrap(); + let sv1 = Scalar::from_int(sv1, sv.layout.size); this.write_scalar(sv0, &sv)?; this.write_scalar(sv1, &sv.offset(sv.layout.size, sv.layout, this)?)?;