mirror of
https://github.com/rust-lang/rust.git
synced 2025-02-10 05:53:10 +00:00
Auto merge of #77227 - oli-obk:const_val_🌳_prelude, r=RalfJung
Refactorings in preparation for const value trees cc #72396 This PR changes the `Scalar::Bits { data: u128, size: u8 }` variant to `Scalar::Bits(ScalarInt)` where `ScalarInt` contains the same information, but is `repr(packed)`. The reason for using a packed struct is to allow enum variant packing to keep the original size of `Scalar` instead of adding another word to its size due to padding. Other than that the PR just gets rid of all the inspection of the internal fields of `Scalar::Bits` which were frankly scary. These fields have invariants that we need to uphold and we can't do that without making the fields private. r? `@ghost`
This commit is contained in:
commit
75f1db1102
@ -499,7 +499,7 @@ fn codegen_stmt<'tcx>(
|
||||
UnOp::Neg => match layout.ty.kind() {
|
||||
ty::Int(IntTy::I128) => {
|
||||
// FIXME remove this case once ineg.i128 works
|
||||
let zero = CValue::const_val(fx, layout, 0);
|
||||
let zero = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
|
||||
crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
|
||||
}
|
||||
ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
|
||||
@ -585,13 +585,11 @@ fn codegen_stmt<'tcx>(
|
||||
.discriminant_for_variant(fx.tcx, *index)
|
||||
.unwrap();
|
||||
let discr = if discr.ty.is_signed() {
|
||||
rustc_middle::mir::interpret::sign_extend(
|
||||
discr.val,
|
||||
fx.layout_of(discr.ty).size,
|
||||
)
|
||||
fx.layout_of(discr.ty).size.sign_extend(discr.val)
|
||||
} else {
|
||||
discr.val
|
||||
};
|
||||
let discr = discr.into();
|
||||
|
||||
let discr = CValue::const_val(fx, fx.layout_of(to_ty), discr);
|
||||
lval.write_cvalue(fx, discr);
|
||||
|
@ -186,9 +186,8 @@ pub(crate) fn codegen_const_value<'tcx>(
|
||||
}
|
||||
|
||||
match x {
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(u64::from(size), layout.size.bytes());
|
||||
CValue::const_val(fx, layout, data)
|
||||
Scalar::Int(int) => {
|
||||
CValue::const_val(fx, layout, int)
|
||||
}
|
||||
Scalar::Ptr(ptr) => {
|
||||
let alloc_kind = fx.tcx.get_global_alloc(ptr.alloc_id);
|
||||
|
@ -30,7 +30,8 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
|
||||
.ty
|
||||
.discriminant_for_variant(fx.tcx, variant_index)
|
||||
.unwrap()
|
||||
.val;
|
||||
.val
|
||||
.into();
|
||||
let discr = CValue::const_val(fx, ptr.layout(), to);
|
||||
ptr.write_cvalue(fx, discr);
|
||||
}
|
||||
@ -49,7 +50,7 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
|
||||
let niche = place.place_field(fx, mir::Field::new(tag_field));
|
||||
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
|
||||
let niche_value = u128::from(niche_value).wrapping_add(niche_start);
|
||||
let niche_llval = CValue::const_val(fx, niche.layout(), niche_value);
|
||||
let niche_llval = CValue::const_val(fx, niche.layout(), niche_value.into());
|
||||
niche.write_cvalue(fx, niche_llval);
|
||||
}
|
||||
}
|
||||
@ -77,7 +78,7 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
|
||||
.ty
|
||||
.discriminant_for_variant(fx.tcx, *index)
|
||||
.map_or(u128::from(index.as_u32()), |discr| discr.val);
|
||||
return CValue::const_val(fx, dest_layout, discr_val);
|
||||
return CValue::const_val(fx, dest_layout, discr_val.into());
|
||||
}
|
||||
Variants::Multiple {
|
||||
tag,
|
||||
|
@ -1064,7 +1064,8 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
|
||||
|
||||
fx.bcx.ins().call_indirect(f_sig, f, &[data]);
|
||||
|
||||
let ret_val = CValue::const_val(fx, ret.layout(), 0);
|
||||
let layout = ret.layout();
|
||||
let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
|
||||
ret.write_cvalue(fx, ret_val);
|
||||
};
|
||||
|
||||
|
@ -231,15 +231,16 @@ impl<'tcx> CValue<'tcx> {
|
||||
pub(crate) fn const_val(
|
||||
fx: &mut FunctionCx<'_, 'tcx, impl Module>,
|
||||
layout: TyAndLayout<'tcx>,
|
||||
const_val: u128,
|
||||
const_val: ty::ScalarInt,
|
||||
) -> CValue<'tcx> {
|
||||
assert_eq!(const_val.size(), layout.size);
|
||||
use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
|
||||
|
||||
let clif_ty = fx.clif_type(layout.ty).unwrap();
|
||||
|
||||
if let ty::Bool = layout.ty.kind() {
|
||||
assert!(
|
||||
const_val == 0 || const_val == 1,
|
||||
const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
|
||||
"Invalid bool 0x{:032X}",
|
||||
const_val
|
||||
);
|
||||
@ -247,6 +248,7 @@ impl<'tcx> CValue<'tcx> {
|
||||
|
||||
let val = match layout.ty.kind() {
|
||||
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
|
||||
let const_val = const_val.to_bits(layout.size).unwrap();
|
||||
let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
|
||||
let msb = fx
|
||||
.bcx
|
||||
@ -259,7 +261,7 @@ impl<'tcx> CValue<'tcx> {
|
||||
fx
|
||||
.bcx
|
||||
.ins()
|
||||
.iconst(clif_ty, u64::try_from(const_val).expect("uint") as i64)
|
||||
.iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
|
||||
}
|
||||
ty::Float(FloatTy::F32) => {
|
||||
fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
|
||||
|
@ -12,7 +12,7 @@ use rustc_codegen_ssa::mir::place::PlaceRef;
|
||||
use rustc_codegen_ssa::traits::*;
|
||||
use rustc_middle::bug;
|
||||
use rustc_middle::mir::interpret::{Allocation, GlobalAlloc, Scalar};
|
||||
use rustc_middle::ty::layout::TyAndLayout;
|
||||
use rustc_middle::ty::{layout::TyAndLayout, ScalarInt};
|
||||
use rustc_span::symbol::Symbol;
|
||||
use rustc_target::abi::{self, AddressSpace, HasDataLayout, LayoutOf, Pointer, Size};
|
||||
|
||||
@ -230,12 +230,12 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
|
||||
fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value {
|
||||
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
|
||||
match cv {
|
||||
Scalar::Raw { size: 0, .. } => {
|
||||
Scalar::Int(ScalarInt::ZST) => {
|
||||
assert_eq!(0, layout.value.size(self).bytes());
|
||||
self.const_undef(self.type_ix(0))
|
||||
}
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(size as u64, layout.value.size(self).bytes());
|
||||
Scalar::Int(int) => {
|
||||
let data = int.assert_bits(layout.value.size(self));
|
||||
let llval = self.const_uint_big(self.type_ix(bitsize), data);
|
||||
if layout.value == Pointer {
|
||||
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
|
||||
|
@ -29,7 +29,6 @@ use rustc_hir::def::CtorKind;
|
||||
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
|
||||
use rustc_index::vec::{Idx, IndexVec};
|
||||
use rustc_middle::ich::NodeIdHashingMode;
|
||||
use rustc_middle::mir::interpret::truncate;
|
||||
use rustc_middle::mir::{self, Field, GeneratorLayout};
|
||||
use rustc_middle::ty::layout::{self, IntegerExt, PrimitiveExt, TyAndLayout};
|
||||
use rustc_middle::ty::subst::GenericArgKind;
|
||||
@ -1693,7 +1692,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
|
||||
let value = (i.as_u32() as u128)
|
||||
.wrapping_sub(niche_variants.start().as_u32() as u128)
|
||||
.wrapping_add(niche_start);
|
||||
let value = truncate(value, tag.value.size(cx));
|
||||
let value = tag.value.size(cx).truncate(value);
|
||||
// NOTE(eddyb) do *NOT* remove this assert, until
|
||||
// we pass the full 128-bit value to LLVM, otherwise
|
||||
// truncation will be silent and remain undetected.
|
||||
|
@ -6,7 +6,6 @@ use rustc_errors::Applicability;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::{is_range_literal, ExprKind, Node};
|
||||
use rustc_index::vec::Idx;
|
||||
use rustc_middle::mir::interpret::{sign_extend, truncate};
|
||||
use rustc_middle::ty::layout::{IntegerExt, SizeSkeleton};
|
||||
use rustc_middle::ty::subst::SubstsRef;
|
||||
use rustc_middle::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable};
|
||||
@ -218,11 +217,11 @@ fn report_bin_hex_error(
|
||||
cx.struct_span_lint(OVERFLOWING_LITERALS, expr.span, |lint| {
|
||||
let (t, actually) = match ty {
|
||||
attr::IntType::SignedInt(t) => {
|
||||
let actually = sign_extend(val, size) as i128;
|
||||
let actually = size.sign_extend(val) as i128;
|
||||
(t.name_str(), actually.to_string())
|
||||
}
|
||||
attr::IntType::UnsignedInt(t) => {
|
||||
let actually = truncate(val, size);
|
||||
let actually = size.truncate(val);
|
||||
(t.name_str(), actually.to_string())
|
||||
}
|
||||
};
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
|
||||
#![feature(array_windows)]
|
||||
#![feature(assoc_char_funcs)]
|
||||
#![feature(backtrace)]
|
||||
#![feature(bool_to_option)]
|
||||
#![feature(box_patterns)]
|
||||
|
@ -110,7 +110,7 @@ use rustc_hir::def_id::DefId;
|
||||
use rustc_macros::HashStable;
|
||||
use rustc_middle::ty::print::with_no_trimmed_paths;
|
||||
use rustc_serialize::{Decodable, Encodable};
|
||||
use rustc_target::abi::{Endian, Size};
|
||||
use rustc_target::abi::Endian;
|
||||
|
||||
use crate::mir;
|
||||
use crate::ty::codec::{TyDecoder, TyEncoder};
|
||||
@ -590,39 +590,6 @@ pub fn read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, i
|
||||
uint
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Methods to facilitate working with signed integers stored in a u128
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/// Truncates `value` to `size` bits and then sign-extend it to 128 bits
|
||||
/// (i.e., if it is negative, fill with 1's on the left).
|
||||
#[inline]
|
||||
pub fn sign_extend(value: u128, size: Size) -> u128 {
|
||||
let size = size.bits();
|
||||
if size == 0 {
|
||||
// Truncated until nothing is left.
|
||||
return 0;
|
||||
}
|
||||
// Sign-extend it.
|
||||
let shift = 128 - size;
|
||||
// Shift the unsigned value to the left, then shift back to the right as signed
|
||||
// (essentially fills with FF on the left).
|
||||
(((value << shift) as i128) >> shift) as u128
|
||||
}
|
||||
|
||||
/// Truncates `value` to `size` bits.
|
||||
#[inline]
|
||||
pub fn truncate(value: u128, size: Size) -> u128 {
|
||||
let size = size.bits();
|
||||
if size == 0 {
|
||||
// Truncated until nothing is left.
|
||||
return 0;
|
||||
}
|
||||
let shift = 128 - size;
|
||||
// Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
|
||||
(value << shift) >> shift
|
||||
}
|
||||
|
||||
/// Computes the unsigned absolute value without wrapping or panicking.
|
||||
#[inline]
|
||||
pub fn uabs(value: i64) -> u64 {
|
||||
|
@ -8,9 +8,9 @@ use rustc_apfloat::{
|
||||
use rustc_macros::HashStable;
|
||||
use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
|
||||
|
||||
use crate::ty::{ParamEnv, Ty, TyCtxt};
|
||||
use crate::ty::{ParamEnv, ScalarInt, Ty, TyCtxt};
|
||||
|
||||
use super::{sign_extend, truncate, AllocId, Allocation, InterpResult, Pointer, PointerArithmetic};
|
||||
use super::{AllocId, Allocation, InterpResult, Pointer, PointerArithmetic};
|
||||
|
||||
/// Represents the result of const evaluation via the `eval_to_allocation` query.
|
||||
#[derive(Clone, HashStable, TyEncodable, TyDecodable)]
|
||||
@ -103,12 +103,7 @@ impl<'tcx> ConstValue<'tcx> {
|
||||
#[derive(HashStable)]
|
||||
pub enum Scalar<Tag = ()> {
|
||||
/// The raw bytes of a simple value.
|
||||
Raw {
|
||||
/// The first `size` bytes of `data` are the value.
|
||||
/// Do not try to read less or more bytes than that. The remaining bytes must be 0.
|
||||
data: u128,
|
||||
size: u8,
|
||||
},
|
||||
Int(ScalarInt),
|
||||
|
||||
/// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
|
||||
/// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
|
||||
@ -125,16 +120,7 @@ impl<Tag: fmt::Debug> fmt::Debug for Scalar<Tag> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Scalar::Ptr(ptr) => write!(f, "{:?}", ptr),
|
||||
&Scalar::Raw { data, size } => {
|
||||
Scalar::check_data(data, size);
|
||||
if size == 0 {
|
||||
write!(f, "<ZST>")
|
||||
} else {
|
||||
// Format as hex number wide enough to fit any value of the given `size`.
|
||||
// So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
|
||||
write!(f, "0x{:>0width$x}", data, width = (size * 2) as usize)
|
||||
}
|
||||
}
|
||||
Scalar::Int(int) => write!(f, "{:?}", int),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -143,7 +129,7 @@ impl<Tag: fmt::Debug> fmt::Display for Scalar<Tag> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Scalar::Ptr(ptr) => write!(f, "pointer to {}", ptr),
|
||||
Scalar::Raw { .. } => fmt::Debug::fmt(self, f),
|
||||
Scalar::Int { .. } => fmt::Debug::fmt(self, f),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -163,21 +149,6 @@ impl<Tag> From<Double> for Scalar<Tag> {
|
||||
}
|
||||
|
||||
impl Scalar<()> {
|
||||
/// Make sure the `data` fits in `size`.
|
||||
/// This is guaranteed by all constructors here, but since the enum variants are public,
|
||||
/// it could still be violated (even though no code outside this file should
|
||||
/// construct `Scalar`s).
|
||||
#[inline(always)]
|
||||
fn check_data(data: u128, size: u8) {
|
||||
debug_assert_eq!(
|
||||
truncate(data, Size::from_bytes(u64::from(size))),
|
||||
data,
|
||||
"Scalar value {:#x} exceeds size of {} bytes",
|
||||
data,
|
||||
size
|
||||
);
|
||||
}
|
||||
|
||||
/// Tag this scalar with `new_tag` if it is a pointer, leave it unchanged otherwise.
|
||||
///
|
||||
/// Used by `MemPlace::replace_tag`.
|
||||
@ -185,12 +156,14 @@ impl Scalar<()> {
|
||||
pub fn with_tag<Tag>(self, new_tag: Tag) -> Scalar<Tag> {
|
||||
match self {
|
||||
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.with_tag(new_tag)),
|
||||
Scalar::Raw { data, size } => Scalar::Raw { data, size },
|
||||
Scalar::Int(int) => Scalar::Int(int),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag> Scalar<Tag> {
|
||||
pub const ZST: Self = Scalar::Int(ScalarInt::ZST);
|
||||
|
||||
/// Erase the tag from the scalar, if any.
|
||||
///
|
||||
/// Used by error reporting code to avoid having the error type depend on `Tag`.
|
||||
@ -198,18 +171,13 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
pub fn erase_tag(self) -> Scalar {
|
||||
match self {
|
||||
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_tag()),
|
||||
Scalar::Raw { data, size } => Scalar::Raw { data, size },
|
||||
Scalar::Int(int) => Scalar::Int(int),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn null_ptr(cx: &impl HasDataLayout) -> Self {
|
||||
Scalar::Raw { data: 0, size: cx.data_layout().pointer_size.bytes() as u8 }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn zst() -> Self {
|
||||
Scalar::Raw { data: 0, size: 0 }
|
||||
Scalar::Int(ScalarInt::null(cx.data_layout().pointer_size))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@ -220,10 +188,7 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
f_ptr: impl FnOnce(Pointer<Tag>) -> InterpResult<'tcx, Pointer<Tag>>,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
match self {
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(u64::from(size), dl.pointer_size.bytes());
|
||||
Ok(Scalar::Raw { data: u128::from(f_int(u64::try_from(data).unwrap())?), size })
|
||||
}
|
||||
Scalar::Int(int) => Ok(Scalar::Int(int.ptr_sized_op(dl, f_int)?)),
|
||||
Scalar::Ptr(ptr) => Ok(Scalar::Ptr(f_ptr(ptr)?)),
|
||||
}
|
||||
}
|
||||
@ -264,24 +229,17 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
|
||||
#[inline]
|
||||
pub fn from_bool(b: bool) -> Self {
|
||||
// Guaranteed to be truncated and does not need sign extension.
|
||||
Scalar::Raw { data: b as u128, size: 1 }
|
||||
Scalar::Int(b.into())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_char(c: char) -> Self {
|
||||
// Guaranteed to be truncated and does not need sign extension.
|
||||
Scalar::Raw { data: c as u128, size: 4 }
|
||||
Scalar::Int(c.into())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
|
||||
let i = i.into();
|
||||
if truncate(i, size) == i {
|
||||
Some(Scalar::Raw { data: i, size: size.bytes() as u8 })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
ScalarInt::try_from_uint(i, size).map(Scalar::Int)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -293,26 +251,22 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
|
||||
#[inline]
|
||||
pub fn from_u8(i: u8) -> Self {
|
||||
// Guaranteed to be truncated and does not need sign extension.
|
||||
Scalar::Raw { data: i.into(), size: 1 }
|
||||
Scalar::Int(i.into())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_u16(i: u16) -> Self {
|
||||
// Guaranteed to be truncated and does not need sign extension.
|
||||
Scalar::Raw { data: i.into(), size: 2 }
|
||||
Scalar::Int(i.into())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_u32(i: u32) -> Self {
|
||||
// Guaranteed to be truncated and does not need sign extension.
|
||||
Scalar::Raw { data: i.into(), size: 4 }
|
||||
Scalar::Int(i.into())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_u64(i: u64) -> Self {
|
||||
// Guaranteed to be truncated and does not need sign extension.
|
||||
Scalar::Raw { data: i.into(), size: 8 }
|
||||
Scalar::Int(i.into())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -322,14 +276,7 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
|
||||
#[inline]
|
||||
pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
|
||||
let i = i.into();
|
||||
// `into` performed sign extension, we have to truncate
|
||||
let truncated = truncate(i as u128, size);
|
||||
if sign_extend(truncated, size) as i128 == i {
|
||||
Some(Scalar::Raw { data: truncated, size: size.bytes() as u8 })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
ScalarInt::try_from_int(i, size).map(Scalar::Int)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -366,14 +313,12 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
|
||||
#[inline]
|
||||
pub fn from_f32(f: Single) -> Self {
|
||||
// We trust apfloat to give us properly truncated data.
|
||||
Scalar::Raw { data: f.to_bits(), size: 4 }
|
||||
Scalar::Int(f.into())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_f64(f: Double) -> Self {
|
||||
// We trust apfloat to give us properly truncated data.
|
||||
Scalar::Raw { data: f.to_bits(), size: 8 }
|
||||
Scalar::Int(f.into())
|
||||
}
|
||||
|
||||
/// This is very rarely the method you want! You should dispatch on the type
|
||||
@ -388,11 +333,7 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
) -> Result<u128, Pointer<Tag>> {
|
||||
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
|
||||
match self {
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(target_size.bytes(), u64::from(size));
|
||||
Scalar::check_data(data, size);
|
||||
Ok(data)
|
||||
}
|
||||
Scalar::Int(int) => Ok(int.assert_bits(target_size)),
|
||||
Scalar::Ptr(ptr) => {
|
||||
assert_eq!(target_size, cx.data_layout().pointer_size);
|
||||
Err(ptr)
|
||||
@ -406,16 +347,13 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
|
||||
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
|
||||
match self {
|
||||
Scalar::Raw { data, size } => {
|
||||
if target_size.bytes() != u64::from(size) {
|
||||
throw_ub!(ScalarSizeMismatch {
|
||||
target_size: target_size.bytes(),
|
||||
data_size: u64::from(size),
|
||||
});
|
||||
}
|
||||
Scalar::check_data(data, size);
|
||||
Ok(data)
|
||||
}
|
||||
Scalar::Int(int) => int.to_bits(target_size).map_err(|size| {
|
||||
err_ub!(ScalarSizeMismatch {
|
||||
target_size: target_size.bytes(),
|
||||
data_size: size.bytes(),
|
||||
})
|
||||
.into()
|
||||
}),
|
||||
Scalar::Ptr(_) => throw_unsup!(ReadPointerAsBytes),
|
||||
}
|
||||
}
|
||||
@ -425,18 +363,26 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
self.to_bits(target_size).expect("expected Raw bits but got a Pointer")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn assert_int(self) -> ScalarInt {
|
||||
match self {
|
||||
Scalar::Ptr(_) => bug!("expected an int but got an abstract pointer"),
|
||||
Scalar::Int(int) => int,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn assert_ptr(self) -> Pointer<Tag> {
|
||||
match self {
|
||||
Scalar::Ptr(p) => p,
|
||||
Scalar::Raw { .. } => bug!("expected a Pointer but got Raw bits"),
|
||||
Scalar::Int { .. } => bug!("expected a Pointer but got Raw bits"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Do not call this method! Dispatch based on the type instead.
|
||||
#[inline]
|
||||
pub fn is_bits(self) -> bool {
|
||||
matches!(self, Scalar::Raw { .. })
|
||||
matches!(self, Scalar::Int { .. })
|
||||
}
|
||||
|
||||
/// Do not call this method! Dispatch based on the type instead.
|
||||
@ -502,7 +448,7 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
fn to_signed_with_bit_width(self, bits: u64) -> InterpResult<'static, i128> {
|
||||
let sz = Size::from_bits(bits);
|
||||
let b = self.to_bits(sz)?;
|
||||
Ok(sign_extend(b, sz) as i128)
|
||||
Ok(sz.sign_extend(b) as i128)
|
||||
}
|
||||
|
||||
/// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer.
|
||||
@ -533,7 +479,7 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'static, i64> {
|
||||
let sz = cx.data_layout().pointer_size;
|
||||
let b = self.to_bits(sz)?;
|
||||
let b = sign_extend(b, sz) as i128;
|
||||
let b = sz.sign_extend(b) as i128;
|
||||
Ok(i64::try_from(b).unwrap())
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,6 @@ use rustc_index::vec::{Idx, IndexVec};
|
||||
use rustc_serialize::{Decodable, Encodable};
|
||||
use rustc_span::symbol::Symbol;
|
||||
use rustc_span::{Span, DUMMY_SP};
|
||||
use rustc_target::abi;
|
||||
use rustc_target::asm::InlineAsmRegOrRegClass;
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::{self, Debug, Display, Formatter, Write};
|
||||
@ -1952,10 +1951,10 @@ impl<'tcx> Operand<'tcx> {
|
||||
.layout_of(param_env_and_ty)
|
||||
.unwrap_or_else(|e| panic!("could not compute layout for {:?}: {:?}", ty, e))
|
||||
.size;
|
||||
let scalar_size = abi::Size::from_bytes(match val {
|
||||
Scalar::Raw { size, .. } => size,
|
||||
let scalar_size = match val {
|
||||
Scalar::Int(int) => int.size(),
|
||||
_ => panic!("Invalid scalar type {:?}", val),
|
||||
});
|
||||
};
|
||||
scalar_size == type_size
|
||||
});
|
||||
Operand::Constant(box Constant {
|
||||
|
@ -132,7 +132,7 @@ impl<'tcx> Const<'tcx> {
|
||||
#[inline]
|
||||
/// Creates an interned zst constant.
|
||||
pub fn zero_sized(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> &'tcx Self {
|
||||
Self::from_scalar(tcx, Scalar::zst(), ty)
|
||||
Self::from_scalar(tcx, Scalar::ZST, ty)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -1,31 +1,32 @@
|
||||
use crate::mir::interpret::truncate;
|
||||
use rustc_target::abi::Size;
|
||||
use rustc_apfloat::ieee::{Double, Single};
|
||||
use rustc_apfloat::Float;
|
||||
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
|
||||
use rustc_target::abi::{Size, TargetDataLayout};
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
/// A type for representing any integer. Only used for printing.
|
||||
// FIXME: Use this for the integer-tree representation needed for type level ints and
|
||||
// const generics?
|
||||
pub struct ConstInt {
|
||||
/// Number of bytes of the integer. Only 1, 2, 4, 8, 16 are legal values.
|
||||
size: u8,
|
||||
/// The "untyped" variant of `ConstInt`.
|
||||
int: ScalarInt,
|
||||
/// Whether the value is of a signed integer type.
|
||||
signed: bool,
|
||||
/// Whether the value is a `usize` or `isize` type.
|
||||
is_ptr_sized_integral: bool,
|
||||
/// Raw memory of the integer. All bytes beyond the `size` are unused and must be zero.
|
||||
raw: u128,
|
||||
}
|
||||
|
||||
impl ConstInt {
|
||||
pub fn new(raw: u128, size: Size, signed: bool, is_ptr_sized_integral: bool) -> Self {
|
||||
assert!(raw <= truncate(u128::MAX, size));
|
||||
Self { raw, size: size.bytes() as u8, signed, is_ptr_sized_integral }
|
||||
pub fn new(int: ScalarInt, signed: bool, is_ptr_sized_integral: bool) -> Self {
|
||||
Self { int, signed, is_ptr_sized_integral }
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for ConstInt {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let Self { size, signed, raw, is_ptr_sized_integral } = *self;
|
||||
let Self { int, signed, is_ptr_sized_integral } = *self;
|
||||
let size = int.size().bytes();
|
||||
let raw = int.data;
|
||||
if signed {
|
||||
let bit_size = size * 8;
|
||||
let min = 1u128 << (bit_size - 1);
|
||||
@ -73,7 +74,7 @@ impl std::fmt::Debug for ConstInt {
|
||||
Ok(())
|
||||
}
|
||||
} else {
|
||||
let max = truncate(u128::MAX, Size::from_bytes(size));
|
||||
let max = Size::from_bytes(size).truncate(u128::MAX);
|
||||
if raw == max {
|
||||
match (size, is_ptr_sized_integral) {
|
||||
(_, true) => write!(fmt, "usize::MAX"),
|
||||
@ -109,3 +110,257 @@ impl std::fmt::Debug for ConstInt {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The raw bytes of a simple value.
|
||||
///
|
||||
/// This is a packed struct in order to allow this type to be optimally embedded in enums
|
||||
/// (like Scalar).
|
||||
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
#[repr(packed)]
|
||||
pub struct ScalarInt {
|
||||
/// The first `size` bytes of `data` are the value.
|
||||
/// Do not try to read less or more bytes than that. The remaining bytes must be 0.
|
||||
data: u128,
|
||||
size: u8,
|
||||
}
|
||||
|
||||
// Cannot derive these, as the derives take references to the fields, and we
|
||||
// can't take references to fields of packed structs.
|
||||
impl<CTX> crate::ty::HashStable<CTX> for ScalarInt {
|
||||
fn hash_stable(&self, hcx: &mut CTX, hasher: &mut crate::ty::StableHasher) {
|
||||
// Using a block `{self.data}` here to force a copy instead of using `self.data`
|
||||
// directly, because `hash_stable` takes `&self` and would thus borrow `self.data`.
|
||||
// Since `Self` is a packed struct, that would create a possibly unaligned reference,
|
||||
// which is UB.
|
||||
{ self.data }.hash_stable(hcx, hasher);
|
||||
self.size.hash_stable(hcx, hasher);
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Encoder> Encodable<S> for ScalarInt {
|
||||
fn encode(&self, s: &mut S) -> Result<(), S::Error> {
|
||||
s.emit_u128(self.data)?;
|
||||
s.emit_u8(self.size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Decoder> Decodable<D> for ScalarInt {
|
||||
fn decode(d: &mut D) -> Result<ScalarInt, D::Error> {
|
||||
Ok(ScalarInt { data: d.read_u128()?, size: d.read_u8()? })
|
||||
}
|
||||
}
|
||||
|
||||
impl ScalarInt {
|
||||
pub const TRUE: ScalarInt = ScalarInt { data: 1_u128, size: 1 };
|
||||
|
||||
pub const FALSE: ScalarInt = ScalarInt { data: 0_u128, size: 1 };
|
||||
|
||||
pub const ZST: ScalarInt = ScalarInt { data: 0_u128, size: 0 };
|
||||
|
||||
#[inline]
|
||||
pub fn size(self) -> Size {
|
||||
Size::from_bytes(self.size)
|
||||
}
|
||||
|
||||
/// Make sure the `data` fits in `size`.
|
||||
/// This is guaranteed by all constructors here, but having had this check saved us from
|
||||
/// bugs many times in the past, so keeping it around is definitely worth it.
|
||||
#[inline(always)]
|
||||
fn check_data(self) {
|
||||
// Using a block `{self.data}` here to force a copy instead of using `self.data`
|
||||
// directly, because `assert_eq` takes references to its arguments and formatting
|
||||
// arguments and would thus borrow `self.data`. Since `Self`
|
||||
// is a packed struct, that would create a possibly unaligned reference, which
|
||||
// is UB.
|
||||
debug_assert_eq!(
|
||||
self.size().truncate(self.data),
|
||||
{ self.data },
|
||||
"Scalar value {:#x} exceeds size of {} bytes",
|
||||
{ self.data },
|
||||
self.size
|
||||
);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn null(size: Size) -> Self {
|
||||
Self { data: 0, size: size.bytes() as u8 }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_null(self) -> bool {
|
||||
self.data == 0
|
||||
}
|
||||
|
||||
pub(crate) fn ptr_sized_op<E>(
|
||||
self,
|
||||
dl: &TargetDataLayout,
|
||||
f_int: impl FnOnce(u64) -> Result<u64, E>,
|
||||
) -> Result<Self, E> {
|
||||
assert_eq!(u64::from(self.size), dl.pointer_size.bytes());
|
||||
Ok(Self::try_from_uint(f_int(u64::try_from(self.data).unwrap())?, self.size()).unwrap())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
|
||||
let data = i.into();
|
||||
if size.truncate(data) == data {
|
||||
Some(Self { data, size: size.bytes() as u8 })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
|
||||
let i = i.into();
|
||||
// `into` performed sign extension, we have to truncate
|
||||
let truncated = size.truncate(i as u128);
|
||||
if size.sign_extend(truncated) as i128 == i {
|
||||
Some(Self { data: truncated, size: size.bytes() as u8 })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn assert_bits(self, target_size: Size) -> u128 {
|
||||
self.to_bits(target_size).unwrap_or_else(|size| {
|
||||
bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes())
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_bits(self, target_size: Size) -> Result<u128, Size> {
|
||||
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
|
||||
if target_size.bytes() == u64::from(self.size) {
|
||||
self.check_data();
|
||||
Ok(self.data)
|
||||
} else {
|
||||
Err(self.size())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! from {
|
||||
($($ty:ty),*) => {
|
||||
$(
|
||||
impl From<$ty> for ScalarInt {
|
||||
#[inline]
|
||||
fn from(u: $ty) -> Self {
|
||||
Self {
|
||||
data: u128::from(u),
|
||||
size: std::mem::size_of::<$ty>() as u8,
|
||||
}
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! try_from {
|
||||
($($ty:ty),*) => {
|
||||
$(
|
||||
impl TryFrom<ScalarInt> for $ty {
|
||||
type Error = Size;
|
||||
#[inline]
|
||||
fn try_from(int: ScalarInt) -> Result<Self, Size> {
|
||||
// The `unwrap` cannot fail because to_bits (if it succeeds)
|
||||
// is guaranteed to return a value that fits into the size.
|
||||
int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
|
||||
.map(|u| u.try_into().unwrap())
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
from!(u8, u16, u32, u64, u128, bool);
|
||||
try_from!(u8, u16, u32, u64, u128);
|
||||
|
||||
impl From<char> for ScalarInt {
|
||||
#[inline]
|
||||
fn from(c: char) -> Self {
|
||||
Self { data: c as u128, size: std::mem::size_of::<char>() as u8 }
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<ScalarInt> for char {
|
||||
type Error = Size;
|
||||
#[inline]
|
||||
fn try_from(int: ScalarInt) -> Result<Self, Size> {
|
||||
int.to_bits(Size::from_bytes(std::mem::size_of::<char>()))
|
||||
.map(|u| char::from_u32(u.try_into().unwrap()).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Single> for ScalarInt {
|
||||
#[inline]
|
||||
fn from(f: Single) -> Self {
|
||||
// We trust apfloat to give us properly truncated data.
|
||||
Self { data: f.to_bits(), size: 4 }
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<ScalarInt> for Single {
|
||||
type Error = Size;
|
||||
#[inline]
|
||||
fn try_from(int: ScalarInt) -> Result<Self, Size> {
|
||||
int.to_bits(Size::from_bytes(4)).map(Self::from_bits)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Double> for ScalarInt {
|
||||
#[inline]
|
||||
fn from(f: Double) -> Self {
|
||||
// We trust apfloat to give us properly truncated data.
|
||||
Self { data: f.to_bits(), size: 8 }
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<ScalarInt> for Double {
|
||||
type Error = Size;
|
||||
#[inline]
|
||||
fn try_from(int: ScalarInt) -> Result<Self, Size> {
|
||||
int.to_bits(Size::from_bytes(8)).map(Self::from_bits)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ScalarInt {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if self.size == 0 {
|
||||
self.check_data();
|
||||
write!(f, "<ZST>")
|
||||
} else {
|
||||
// Dispatch to LowerHex below.
|
||||
write!(f, "0x{:x}", self)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::LowerHex for ScalarInt {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.check_data();
|
||||
// Format as hex number wide enough to fit any value of the given `size`.
|
||||
// So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
|
||||
// Using a block `{self.data}` here to force a copy instead of using `self.data`
|
||||
// directly, because `write!` takes references to its formatting arguments and
|
||||
// would thus borrow `self.data`. Since `Self`
|
||||
// is a packed struct, that would create a possibly unaligned reference, which
|
||||
// is UB.
|
||||
write!(f, "{:01$x}", { self.data }, self.size as usize * 2)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::UpperHex for ScalarInt {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.check_data();
|
||||
// Format as hex number wide enough to fit any value of the given `size`.
|
||||
// So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
|
||||
// Using a block `{self.data}` here to force a copy instead of using `self.data`
|
||||
// directly, because `write!` takes references to its formatting arguments and
|
||||
// would thus borrow `self.data`. Since `Self`
|
||||
// is a packed struct, that would create a possibly unaligned reference, which
|
||||
// is UB.
|
||||
write!(f, "{:01$X}", { self.data }, self.size as usize * 2)
|
||||
}
|
||||
}
|
||||
|
@ -844,7 +844,7 @@ impl<'tcx> CommonConsts<'tcx> {
|
||||
|
||||
CommonConsts {
|
||||
unit: mk_const(ty::Const {
|
||||
val: ty::ConstKind::Value(ConstValue::Scalar(Scalar::zst())),
|
||||
val: ty::ConstKind::Value(ConstValue::Scalar(Scalar::ZST)),
|
||||
ty: types.unit,
|
||||
}),
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ pub use self::trait_def::TraitDef;
|
||||
|
||||
pub use self::query::queries;
|
||||
|
||||
pub use self::consts::{Const, ConstInt, ConstKind, InferConst};
|
||||
pub use self::consts::{Const, ConstInt, ConstKind, InferConst, ScalarInt};
|
||||
|
||||
pub mod _match;
|
||||
pub mod adjustment;
|
||||
|
@ -1,12 +1,9 @@
|
||||
use crate::middle::cstore::{ExternCrate, ExternCrateSource};
|
||||
use crate::mir::interpret::{AllocId, ConstValue, GlobalAlloc, Pointer, Scalar};
|
||||
use crate::ty::layout::IntegerExt;
|
||||
use crate::ty::subst::{GenericArg, GenericArgKind, Subst};
|
||||
use crate::ty::{self, ConstInt, DefIdTree, ParamConst, Ty, TyCtxt, TypeFoldable};
|
||||
use crate::ty::{self, ConstInt, DefIdTree, ParamConst, ScalarInt, Ty, TyCtxt, TypeFoldable};
|
||||
use rustc_apfloat::ieee::{Double, Single};
|
||||
use rustc_apfloat::Float;
|
||||
use rustc_ast as ast;
|
||||
use rustc_attr::{SignedInt, UnsignedInt};
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def::{self, CtorKind, DefKind, Namespace};
|
||||
@ -15,12 +12,13 @@ use rustc_hir::definitions::{DefPathData, DefPathDataName, DisambiguatedDefPathD
|
||||
use rustc_hir::ItemKind;
|
||||
use rustc_session::config::TrimmedDefPaths;
|
||||
use rustc_span::symbol::{kw, Ident, Symbol};
|
||||
use rustc_target::abi::{Integer, Size};
|
||||
use rustc_target::abi::Size;
|
||||
use rustc_target::spec::abi::Abi;
|
||||
|
||||
use std::cell::Cell;
|
||||
use std::char;
|
||||
use std::collections::BTreeMap;
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt::{self, Write as _};
|
||||
use std::ops::{ControlFlow, Deref, DerefMut};
|
||||
|
||||
@ -960,11 +958,7 @@ pub trait PrettyPrinter<'tcx>:
|
||||
ty::Array(
|
||||
ty::TyS { kind: ty::Uint(ast::UintTy::U8), .. },
|
||||
ty::Const {
|
||||
val:
|
||||
ty::ConstKind::Value(ConstValue::Scalar(Scalar::Raw {
|
||||
data,
|
||||
..
|
||||
})),
|
||||
val: ty::ConstKind::Value(ConstValue::Scalar(int)),
|
||||
..
|
||||
},
|
||||
),
|
||||
@ -974,8 +968,9 @@ pub trait PrettyPrinter<'tcx>:
|
||||
),
|
||||
) => match self.tcx().get_global_alloc(ptr.alloc_id) {
|
||||
Some(GlobalAlloc::Memory(alloc)) => {
|
||||
if let Ok(byte_str) = alloc.get_bytes(&self.tcx(), ptr, Size::from_bytes(*data))
|
||||
{
|
||||
let bytes = int.assert_bits(self.tcx().data_layout.pointer_size);
|
||||
let size = Size::from_bytes(bytes);
|
||||
if let Ok(byte_str) = alloc.get_bytes(&self.tcx(), ptr, size) {
|
||||
p!(pretty_print_byte_str(byte_str))
|
||||
} else {
|
||||
p!("<too short allocation>")
|
||||
@ -987,32 +982,28 @@ pub trait PrettyPrinter<'tcx>:
|
||||
None => p!("<dangling pointer>"),
|
||||
},
|
||||
// Bool
|
||||
(Scalar::Raw { data: 0, .. }, ty::Bool) => p!("false"),
|
||||
(Scalar::Raw { data: 1, .. }, ty::Bool) => p!("true"),
|
||||
(Scalar::Int(int), ty::Bool) if int == ScalarInt::FALSE => p!("false"),
|
||||
(Scalar::Int(int), ty::Bool) if int == ScalarInt::TRUE => p!("true"),
|
||||
// Float
|
||||
(Scalar::Raw { data, .. }, ty::Float(ast::FloatTy::F32)) => {
|
||||
p!(write("{}f32", Single::from_bits(data)))
|
||||
(Scalar::Int(int), ty::Float(ast::FloatTy::F32)) => {
|
||||
p!(write("{}f32", Single::try_from(int).unwrap()))
|
||||
}
|
||||
(Scalar::Raw { data, .. }, ty::Float(ast::FloatTy::F64)) => {
|
||||
p!(write("{}f64", Double::from_bits(data)))
|
||||
(Scalar::Int(int), ty::Float(ast::FloatTy::F64)) => {
|
||||
p!(write("{}f64", Double::try_from(int).unwrap()))
|
||||
}
|
||||
// Int
|
||||
(Scalar::Raw { data, .. }, ty::Uint(ui)) => {
|
||||
let size = Integer::from_attr(&self.tcx(), UnsignedInt(*ui)).size();
|
||||
let int = ConstInt::new(data, size, false, ty.is_ptr_sized_integral());
|
||||
if print_ty { p!(write("{:#?}", int)) } else { p!(write("{:?}", int)) }
|
||||
}
|
||||
(Scalar::Raw { data, .. }, ty::Int(i)) => {
|
||||
let size = Integer::from_attr(&self.tcx(), SignedInt(*i)).size();
|
||||
let int = ConstInt::new(data, size, true, ty.is_ptr_sized_integral());
|
||||
(Scalar::Int(int), ty::Uint(_) | ty::Int(_)) => {
|
||||
let int =
|
||||
ConstInt::new(int, matches!(ty.kind(), ty::Int(_)), ty.is_ptr_sized_integral());
|
||||
if print_ty { p!(write("{:#?}", int)) } else { p!(write("{:?}", int)) }
|
||||
}
|
||||
// Char
|
||||
(Scalar::Raw { data, .. }, ty::Char) if char::from_u32(data as u32).is_some() => {
|
||||
p!(write("{:?}", char::from_u32(data as u32).unwrap()))
|
||||
(Scalar::Int(int), ty::Char) if char::try_from(int).is_ok() => {
|
||||
p!(write("{:?}", char::try_from(int).unwrap()))
|
||||
}
|
||||
// Raw pointers
|
||||
(Scalar::Raw { data, .. }, ty::RawPtr(_)) => {
|
||||
(Scalar::Int(int), ty::RawPtr(_)) => {
|
||||
let data = int.assert_bits(self.tcx().data_layout.pointer_size);
|
||||
self = self.typed_value(
|
||||
|mut this| {
|
||||
write!(this, "0x{:x}", data)?;
|
||||
@ -1034,14 +1025,16 @@ pub trait PrettyPrinter<'tcx>:
|
||||
)?;
|
||||
}
|
||||
// For function type zsts just printing the path is enough
|
||||
(Scalar::Raw { size: 0, .. }, ty::FnDef(d, s)) => p!(print_value_path(*d, s)),
|
||||
(Scalar::Int(int), ty::FnDef(d, s)) if int == ScalarInt::ZST => {
|
||||
p!(print_value_path(*d, s))
|
||||
}
|
||||
// Nontrivial types with scalar bit representation
|
||||
(Scalar::Raw { data, size }, _) => {
|
||||
(Scalar::Int(int), _) => {
|
||||
let print = |mut this: Self| {
|
||||
if size == 0 {
|
||||
if int.size() == Size::ZERO {
|
||||
write!(this, "transmute(())")?;
|
||||
} else {
|
||||
write!(this, "transmute(0x{:01$x})", data, size as usize * 2)?;
|
||||
write!(this, "transmute(0x{:x})", int)?;
|
||||
}
|
||||
Ok(this)
|
||||
};
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
use crate::ich::NodeIdHashingMode;
|
||||
use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
|
||||
use crate::mir::interpret::{sign_extend, truncate};
|
||||
use crate::ty::fold::TypeFolder;
|
||||
use crate::ty::layout::IntegerExt;
|
||||
use crate::ty::query::TyCtxtAt;
|
||||
@ -38,7 +37,7 @@ impl<'tcx> fmt::Display for Discr<'tcx> {
|
||||
let size = ty::tls::with(|tcx| Integer::from_attr(&tcx, SignedInt(ity)).size());
|
||||
let x = self.val;
|
||||
// sign extend the raw representation to be an i128
|
||||
let x = sign_extend(x, size) as i128;
|
||||
let x = size.sign_extend(x) as i128;
|
||||
write!(fmt, "{}", x)
|
||||
}
|
||||
_ => write!(fmt, "{}", self.val),
|
||||
@ -47,7 +46,7 @@ impl<'tcx> fmt::Display for Discr<'tcx> {
|
||||
}
|
||||
|
||||
fn signed_min(size: Size) -> i128 {
|
||||
sign_extend(1_u128 << (size.bits() - 1), size) as i128
|
||||
size.sign_extend(1_u128 << (size.bits() - 1)) as i128
|
||||
}
|
||||
|
||||
fn signed_max(size: Size) -> i128 {
|
||||
@ -77,14 +76,14 @@ impl<'tcx> Discr<'tcx> {
|
||||
let (val, oflo) = if signed {
|
||||
let min = signed_min(size);
|
||||
let max = signed_max(size);
|
||||
let val = sign_extend(self.val, size) as i128;
|
||||
let val = size.sign_extend(self.val) as i128;
|
||||
assert!(n < (i128::MAX as u128));
|
||||
let n = n as i128;
|
||||
let oflo = val > max - n;
|
||||
let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
|
||||
// zero the upper bits
|
||||
let val = val as u128;
|
||||
let val = truncate(val, size);
|
||||
let val = size.truncate(val);
|
||||
(val, oflo)
|
||||
} else {
|
||||
let max = unsigned_max(size);
|
||||
@ -650,7 +649,7 @@ impl<'tcx> ty::TyS<'tcx> {
|
||||
let val = match self.kind() {
|
||||
ty::Int(_) | ty::Uint(_) => {
|
||||
let (size, signed) = int_size_and_signed(tcx, self);
|
||||
let val = if signed { truncate(signed_min(size) as u128, size) } else { 0 };
|
||||
let val = if signed { size.truncate(signed_min(size) as u128) } else { 0 };
|
||||
Some(val)
|
||||
}
|
||||
ty::Char => Some(0),
|
||||
|
@ -14,7 +14,7 @@ use rustc_middle::ty::print::with_no_trimmed_paths;
|
||||
use rustc_middle::ty::{self, subst::Subst, TyCtxt};
|
||||
use rustc_span::source_map::Span;
|
||||
use rustc_target::abi::{Abi, LayoutOf};
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::convert::TryInto;
|
||||
|
||||
pub fn note_on_undefined_behavior_error() -> &'static str {
|
||||
"The rules on what exactly is undefined behavior aren't clear, \
|
||||
@ -137,15 +137,16 @@ pub(super) fn op_to_const<'tcx>(
|
||||
let alloc = ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory();
|
||||
ConstValue::ByRef { alloc, offset: ptr.offset }
|
||||
}
|
||||
Scalar::Raw { data, .. } => {
|
||||
Scalar::Int(int) => {
|
||||
assert!(mplace.layout.is_zst());
|
||||
assert_eq!(
|
||||
u64::try_from(data).unwrap() % mplace.layout.align.abi.bytes(),
|
||||
int.assert_bits(ecx.tcx.data_layout.pointer_size)
|
||||
% u128::from(mplace.layout.align.abi.bytes()),
|
||||
0,
|
||||
"this MPlaceTy must come from a validated constant, thus we can assume the \
|
||||
alignment is correct",
|
||||
);
|
||||
ConstValue::Scalar(Scalar::zst())
|
||||
ConstValue::Scalar(Scalar::ZST)
|
||||
}
|
||||
};
|
||||
match immediate {
|
||||
@ -161,7 +162,7 @@ pub(super) fn op_to_const<'tcx>(
|
||||
Scalar::Ptr(ptr) => {
|
||||
(ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory(), ptr.offset.bytes())
|
||||
}
|
||||
Scalar::Raw { .. } => (
|
||||
Scalar::Int { .. } => (
|
||||
ecx.tcx
|
||||
.intern_const_alloc(Allocation::from_byte_aligned_bytes(b"" as &[u8])),
|
||||
0,
|
||||
|
@ -181,9 +181,9 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
|
||||
fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> bool {
|
||||
match (a, b) {
|
||||
// Comparisons between integers are always known.
|
||||
(Scalar::Raw { .. }, Scalar::Raw { .. }) => a == b,
|
||||
(Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
|
||||
// Equality with integers can never be known for sure.
|
||||
(Scalar::Raw { .. }, Scalar::Ptr(_)) | (Scalar::Ptr(_), Scalar::Raw { .. }) => false,
|
||||
(Scalar::Int { .. }, Scalar::Ptr(_)) | (Scalar::Ptr(_), Scalar::Int { .. }) => false,
|
||||
// FIXME: return `true` for when both sides are the same pointer, *except* that
|
||||
// some things (like functions and vtables) do not have stable addresses
|
||||
// so we need to be careful around them (see e.g. #73722).
|
||||
@ -194,13 +194,13 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
|
||||
fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> bool {
|
||||
match (a, b) {
|
||||
// Comparisons between integers are always known.
|
||||
(Scalar::Raw { .. }, Scalar::Raw { .. }) => a != b,
|
||||
(Scalar::Int(_), Scalar::Int(_)) => a != b,
|
||||
// Comparisons of abstract pointers with null pointers are known if the pointer
|
||||
// is in bounds, because if they are in bounds, the pointer can't be null.
|
||||
(Scalar::Raw { data: 0, .. }, Scalar::Ptr(ptr))
|
||||
| (Scalar::Ptr(ptr), Scalar::Raw { data: 0, .. }) => !self.memory.ptr_may_be_null(ptr),
|
||||
// Inequality with integers other than null can never be known for sure.
|
||||
(Scalar::Raw { .. }, Scalar::Ptr(_)) | (Scalar::Ptr(_), Scalar::Raw { .. }) => false,
|
||||
(Scalar::Int(int), Scalar::Ptr(ptr)) | (Scalar::Ptr(ptr), Scalar::Int(int)) => {
|
||||
int.is_null() && !self.memory.ptr_may_be_null(ptr)
|
||||
}
|
||||
// FIXME: return `true` for at least some comparisons where we can reliably
|
||||
// determine the result of runtime inequality tests at compile-time.
|
||||
// Examples include comparison of addresses in different static items.
|
||||
|
@ -13,8 +13,7 @@ use rustc_span::symbol::sym;
|
||||
use rustc_target::abi::{Integer, LayoutOf, Variants};
|
||||
|
||||
use super::{
|
||||
truncate, util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy,
|
||||
PlaceTy,
|
||||
util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy,
|
||||
};
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
@ -209,7 +208,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
RawPtr(_) => self.pointer_size(),
|
||||
_ => bug!(),
|
||||
};
|
||||
let v = truncate(v, size);
|
||||
let v = size.truncate(v);
|
||||
Scalar::from_uint(v, size)
|
||||
}
|
||||
|
||||
|
@ -9,9 +9,7 @@ use rustc_index::vec::IndexVec;
|
||||
use rustc_macros::HashStable;
|
||||
use rustc_middle::ich::StableHashingContext;
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::mir::interpret::{
|
||||
sign_extend, truncate, GlobalId, InterpResult, Pointer, Scalar,
|
||||
};
|
||||
use rustc_middle::mir::interpret::{GlobalId, InterpResult, Pointer, Scalar};
|
||||
use rustc_middle::ty::layout::{self, TyAndLayout};
|
||||
use rustc_middle::ty::{
|
||||
self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable,
|
||||
@ -443,12 +441,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
#[inline(always)]
|
||||
pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
|
||||
assert!(ty.abi.is_signed());
|
||||
sign_extend(value, ty.size)
|
||||
ty.size.sign_extend(value)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
|
||||
truncate(value, ty.size)
|
||||
ty.size.truncate(value)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -211,14 +211,8 @@ impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> {
|
||||
#[inline]
|
||||
pub fn to_const_int(self) -> ConstInt {
|
||||
assert!(self.layout.ty.is_integral());
|
||||
ConstInt::new(
|
||||
self.to_scalar()
|
||||
.expect("to_const_int doesn't work on scalar pairs")
|
||||
.assert_bits(self.layout.size),
|
||||
self.layout.size,
|
||||
self.layout.ty.is_signed(),
|
||||
self.layout.ty.is_ptr_sized_integral(),
|
||||
)
|
||||
let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
|
||||
ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
|
||||
}
|
||||
}
|
||||
|
||||
@ -262,7 +256,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
}
|
||||
return Ok(Some(ImmTy {
|
||||
// zero-sized type
|
||||
imm: Scalar::zst().into(),
|
||||
imm: Scalar::ZST.into(),
|
||||
layout: mplace.layout,
|
||||
}));
|
||||
}
|
||||
@ -361,7 +355,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
||||
let field_layout = op.layout.field(self, field)?;
|
||||
if field_layout.is_zst() {
|
||||
let immediate = Scalar::zst().into();
|
||||
let immediate = Scalar::ZST.into();
|
||||
return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
|
||||
}
|
||||
let offset = op.layout.fields.offset(field);
|
||||
@ -446,7 +440,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let layout = self.layout_of_local(frame, local, layout)?;
|
||||
let op = if layout.is_zst() {
|
||||
// Do not read from ZST, they might not be initialized
|
||||
Operand::Immediate(Scalar::zst().into())
|
||||
Operand::Immediate(Scalar::ZST.into())
|
||||
} else {
|
||||
M::access_local(&self, frame, local)?
|
||||
};
|
||||
@ -544,7 +538,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
|
||||
Ok(match scalar {
|
||||
Scalar::Ptr(ptr) => Scalar::Ptr(self.global_base_pointer(ptr)?),
|
||||
Scalar::Raw { data, size } => Scalar::Raw { data, size },
|
||||
Scalar::Int(int) => Scalar::Int(int),
|
||||
})
|
||||
};
|
||||
// Early-return cases.
|
||||
|
@ -14,9 +14,9 @@ use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
|
||||
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, VariantIdx, Variants};
|
||||
|
||||
use super::{
|
||||
mir_assign_valid_types, truncate, AllocId, AllocMap, Allocation, AllocationExtra, ConstAlloc,
|
||||
ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand,
|
||||
Pointer, PointerArithmetic, Scalar, ScalarMaybeUninit,
|
||||
mir_assign_valid_types, AllocId, AllocMap, Allocation, AllocationExtra, ConstAlloc, ImmTy,
|
||||
Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand, Pointer,
|
||||
PointerArithmetic, Scalar, ScalarMaybeUninit,
|
||||
};
|
||||
|
||||
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
|
||||
@ -721,12 +721,8 @@ where
|
||||
dest.layout.size,
|
||||
"Size mismatch when writing pointer"
|
||||
),
|
||||
Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Raw { size, .. })) => {
|
||||
assert_eq!(
|
||||
Size::from_bytes(size),
|
||||
dest.layout.size,
|
||||
"Size mismatch when writing bits"
|
||||
)
|
||||
Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Int(int))) => {
|
||||
assert_eq!(int.size(), dest.layout.size, "Size mismatch when writing bits")
|
||||
}
|
||||
Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size
|
||||
Immediate::ScalarPair(_, _) => {
|
||||
@ -1077,7 +1073,7 @@ where
|
||||
// their computation, but the in-memory tag is the smallest possible
|
||||
// representation
|
||||
let size = tag_layout.value.size(self);
|
||||
let tag_val = truncate(discr_val, size);
|
||||
let tag_val = size.truncate(discr_val);
|
||||
|
||||
let tag_dest = self.place_field(dest, tag_field)?;
|
||||
self.write_scalar(Scalar::from_uint(tag_val, size), tag_dest)?;
|
||||
|
@ -19,7 +19,9 @@ use rustc_middle::mir::{
|
||||
};
|
||||
use rustc_middle::ty::layout::{HasTyCtxt, LayoutError, TyAndLayout};
|
||||
use rustc_middle::ty::subst::{InternalSubsts, Subst};
|
||||
use rustc_middle::ty::{self, ConstInt, ConstKind, Instance, ParamEnv, Ty, TyCtxt, TypeFoldable};
|
||||
use rustc_middle::ty::{
|
||||
self, ConstInt, ConstKind, Instance, ParamEnv, ScalarInt, Ty, TyCtxt, TypeFoldable,
|
||||
};
|
||||
use rustc_session::lint;
|
||||
use rustc_span::{def_id::DefId, Span};
|
||||
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TargetDataLayout};
|
||||
@ -27,10 +29,9 @@ use rustc_trait_selection::traits;
|
||||
|
||||
use crate::const_eval::ConstEvalErr;
|
||||
use crate::interpret::{
|
||||
self, compile_time_machine, truncate, AllocId, Allocation, ConstValue, CtfeValidationMode,
|
||||
Frame, ImmTy, Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemPlace, Memory,
|
||||
MemoryKind, OpTy, Operand as InterpOperand, PlaceTy, Pointer, Scalar, ScalarMaybeUninit,
|
||||
StackPopCleanup,
|
||||
self, compile_time_machine, AllocId, Allocation, ConstValue, CtfeValidationMode, Frame, ImmTy,
|
||||
Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemPlace, Memory, MemoryKind, OpTy,
|
||||
Operand as InterpOperand, PlaceTy, Pointer, Scalar, ScalarMaybeUninit, StackPopCleanup,
|
||||
};
|
||||
use crate::transform::MirPass;
|
||||
|
||||
@ -578,8 +579,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
Some(l) => l.to_const_int(),
|
||||
// Invent a dummy value, the diagnostic ignores it anyway
|
||||
None => ConstInt::new(
|
||||
1,
|
||||
left_size,
|
||||
ScalarInt::try_from_uint(1_u8, left_size).unwrap(),
|
||||
left_ty.is_signed(),
|
||||
left_ty.is_ptr_sized_integral(),
|
||||
),
|
||||
@ -745,7 +745,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
}
|
||||
}
|
||||
BinOp::BitOr => {
|
||||
if arg_value == truncate(u128::MAX, const_arg.layout.size)
|
||||
if arg_value == const_arg.layout.size.truncate(u128::MAX)
|
||||
|| (const_arg.layout.ty.is_bool() && arg_value == 1)
|
||||
{
|
||||
this.ecx.write_immediate(*const_arg, dest)?;
|
||||
|
@ -26,22 +26,26 @@ use rustc_middle::{
|
||||
pub struct SimplifyComparisonIntegral;
|
||||
|
||||
impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral {
|
||||
fn run_pass(&self, _: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||
trace!("Running SimplifyComparisonIntegral on {:?}", body.source);
|
||||
|
||||
let helper = OptimizationFinder { body };
|
||||
let opts = helper.find_optimizations();
|
||||
let mut storage_deads_to_insert = vec![];
|
||||
let mut storage_deads_to_remove: Vec<(usize, BasicBlock)> = vec![];
|
||||
let param_env = tcx.param_env(body.source.def_id());
|
||||
for opt in opts {
|
||||
trace!("SUCCESS: Applying {:?}", opt);
|
||||
// replace terminator with a switchInt that switches on the integer directly
|
||||
let bbs = &mut body.basic_blocks_mut();
|
||||
let bb = &mut bbs[opt.bb_idx];
|
||||
// We only use the bits for the untyped, not length checked `values` field. Thus we are
|
||||
// not using any of the convenience wrappers here and directly access the bits.
|
||||
let new_value = match opt.branch_value_scalar {
|
||||
Scalar::Raw { data, .. } => data,
|
||||
Scalar::Int(int) => {
|
||||
let layout = tcx
|
||||
.layout_of(param_env.and(opt.branch_value_ty))
|
||||
.expect("if we have an evaluated constant we must know the layout");
|
||||
int.assert_bits(layout.size)
|
||||
}
|
||||
Scalar::Ptr(_) => continue,
|
||||
};
|
||||
const FALSE: u128 = 0;
|
||||
|
@ -630,7 +630,7 @@ pub fn write_allocations<'tcx>(
|
||||
ConstValue::Scalar(interpret::Scalar::Ptr(ptr)) => {
|
||||
Either::Left(Either::Left(std::iter::once(ptr.alloc_id)))
|
||||
}
|
||||
ConstValue::Scalar(interpret::Scalar::Raw { .. }) => {
|
||||
ConstValue::Scalar(interpret::Scalar::Int { .. }) => {
|
||||
Either::Left(Either::Right(std::iter::empty()))
|
||||
}
|
||||
ConstValue::ByRef { alloc, .. } | ConstValue::Slice { data: alloc, .. } => {
|
||||
|
@ -17,7 +17,6 @@ use crate::build::Builder;
|
||||
use crate::thir::{self, *};
|
||||
use rustc_attr::{SignedInt, UnsignedInt};
|
||||
use rustc_hir::RangeEnd;
|
||||
use rustc_middle::mir::interpret::truncate;
|
||||
use rustc_middle::mir::Place;
|
||||
use rustc_middle::ty;
|
||||
use rustc_middle::ty::layout::IntegerExt;
|
||||
@ -161,13 +160,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
}
|
||||
ty::Int(ity) => {
|
||||
let size = Integer::from_attr(&tcx, SignedInt(ity)).size();
|
||||
let max = truncate(u128::MAX, size);
|
||||
let max = size.truncate(u128::MAX);
|
||||
let bias = 1u128 << (size.bits() - 1);
|
||||
(Some((0, max, size)), bias)
|
||||
}
|
||||
ty::Uint(uty) => {
|
||||
let size = Integer::from_attr(&tcx, UnsignedInt(uty)).size();
|
||||
let max = truncate(u128::MAX, size);
|
||||
let max = size.truncate(u128::MAX);
|
||||
(Some((0, max, size)), 0)
|
||||
}
|
||||
_ => (None, 0),
|
||||
|
@ -1,6 +1,6 @@
|
||||
use rustc_ast as ast;
|
||||
use rustc_middle::mir::interpret::{
|
||||
truncate, Allocation, ConstValue, LitToConstError, LitToConstInput, Scalar,
|
||||
Allocation, ConstValue, LitToConstError, LitToConstInput, Scalar,
|
||||
};
|
||||
use rustc_middle::ty::{self, ParamEnv, TyCtxt};
|
||||
use rustc_span::symbol::Symbol;
|
||||
@ -16,7 +16,7 @@ crate fn lit_to_const<'tcx>(
|
||||
let param_ty = ParamEnv::reveal_all().and(ty);
|
||||
let width = tcx.layout_of(param_ty).map_err(|_| LitToConstError::Reported)?.size;
|
||||
trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits());
|
||||
let result = truncate(n, width);
|
||||
let result = width.truncate(n);
|
||||
trace!("trunc result: {}", result);
|
||||
Ok(ConstValue::Scalar(Scalar::from_uint(result, width)))
|
||||
};
|
||||
|
@ -304,7 +304,7 @@ use rustc_arena::TypedArena;
|
||||
use rustc_attr::{SignedInt, UnsignedInt};
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_hir::{HirId, RangeEnd};
|
||||
use rustc_middle::mir::interpret::{truncate, ConstValue};
|
||||
use rustc_middle::mir::interpret::ConstValue;
|
||||
use rustc_middle::mir::Field;
|
||||
use rustc_middle::ty::layout::IntegerExt;
|
||||
use rustc_middle::ty::{self, Const, Ty, TyCtxt};
|
||||
@ -1608,7 +1608,7 @@ fn all_constructors<'p, 'tcx>(pcx: PatCtxt<'_, 'p, 'tcx>) -> Vec<Constructor<'tc
|
||||
}
|
||||
&ty::Uint(uty) => {
|
||||
let size = Integer::from_attr(&cx.tcx, UnsignedInt(uty)).size();
|
||||
let max = truncate(u128::MAX, size);
|
||||
let max = size.truncate(u128::MAX);
|
||||
vec![make_range(0, max)]
|
||||
}
|
||||
_ if cx.is_uninhabited(pcx.ty) => vec![],
|
||||
|
@ -15,7 +15,7 @@ use rustc_hir::def::{CtorKind, CtorOf, DefKind, Res};
|
||||
use rustc_hir::pat_util::EnumerateAndAdjustIterator;
|
||||
use rustc_hir::RangeEnd;
|
||||
use rustc_index::vec::Idx;
|
||||
use rustc_middle::mir::interpret::{get_slice_bytes, sign_extend, ConstValue};
|
||||
use rustc_middle::mir::interpret::{get_slice_bytes, ConstValue};
|
||||
use rustc_middle::mir::interpret::{ErrorHandled, LitToConstError, LitToConstInput};
|
||||
use rustc_middle::mir::UserTypeProjection;
|
||||
use rustc_middle::mir::{BorrowKind, Field, Mutability};
|
||||
@ -1082,8 +1082,8 @@ crate fn compare_const_vals<'tcx>(
|
||||
use rustc_attr::SignedInt;
|
||||
use rustc_middle::ty::layout::IntegerExt;
|
||||
let size = rustc_target::abi::Integer::from_attr(&tcx, SignedInt(ity)).size();
|
||||
let a = sign_extend(a, size);
|
||||
let b = sign_extend(b, size);
|
||||
let a = size.sign_extend(a);
|
||||
let b = size.sign_extend(b);
|
||||
Some((a as i128).cmp(&(b as i128)))
|
||||
}
|
||||
_ => Some(a.cmp(&b)),
|
||||
|
@ -237,7 +237,7 @@ impl Printer<'tcx> for SymbolPrinter<'tcx> {
|
||||
|
||||
fn print_const(mut self, ct: &'tcx ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
|
||||
// only print integers
|
||||
if let ty::ConstKind::Value(ConstValue::Scalar(Scalar::Raw { .. })) = ct.val {
|
||||
if let ty::ConstKind::Value(ConstValue::Scalar(Scalar::Int { .. })) = ct.val {
|
||||
if ct.ty.is_integral() {
|
||||
return self.pretty_print_const(ct, true);
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def_id::{CrateNum, DefId};
|
||||
use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
|
||||
use rustc_middle::mir::interpret::sign_extend;
|
||||
use rustc_middle::ty::print::{Print, Printer};
|
||||
use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst};
|
||||
use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeFoldable};
|
||||
@ -527,7 +526,7 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
|
||||
let param_env = ty::ParamEnv::reveal_all();
|
||||
ct.try_eval_bits(self.tcx, param_env, ct.ty).and_then(|b| {
|
||||
let sz = self.tcx.layout_of(param_env.and(ct.ty)).ok()?.size;
|
||||
let val = sign_extend(b, sz) as i128;
|
||||
let val = sz.sign_extend(b) as i128;
|
||||
if val < 0 {
|
||||
neg = true;
|
||||
}
|
||||
|
@ -306,6 +306,35 @@ impl Size {
|
||||
let bytes = self.bytes().checked_mul(count)?;
|
||||
if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
|
||||
}
|
||||
|
||||
/// Truncates `value` to `self` bits and then sign-extends it to 128 bits
|
||||
/// (i.e., if it is negative, fill with 1's on the left).
|
||||
#[inline]
|
||||
pub fn sign_extend(self, value: u128) -> u128 {
|
||||
let size = self.bits();
|
||||
if size == 0 {
|
||||
// Truncated until nothing is left.
|
||||
return 0;
|
||||
}
|
||||
// Sign-extend it.
|
||||
let shift = 128 - size;
|
||||
// Shift the unsigned value to the left, then shift back to the right as signed
|
||||
// (essentially fills with sign bit on the left).
|
||||
(((value << shift) as i128) >> shift) as u128
|
||||
}
|
||||
|
||||
/// Truncates `value` to `self` bits.
|
||||
#[inline]
|
||||
pub fn truncate(self, value: u128) -> u128 {
|
||||
let size = self.bits();
|
||||
if size == 0 {
|
||||
// Truncated until nothing is left.
|
||||
return 0;
|
||||
}
|
||||
let shift = 128 - size;
|
||||
// Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
|
||||
(value << shift) >> shift
|
||||
}
|
||||
}
|
||||
|
||||
// Panicking addition, subtraction and multiplication for convenience.
|
||||
|
@ -14,7 +14,7 @@ use rustc_data_structures::fx::FxHashSet;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def::{DefKind, Res};
|
||||
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
|
||||
use rustc_middle::mir::interpret::{sign_extend, ConstValue, Scalar};
|
||||
use rustc_middle::mir::interpret::ConstValue;
|
||||
use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
|
||||
use rustc_middle::ty::{self, DefIdTree, Ty};
|
||||
use rustc_span::symbol::{kw, sym, Symbol};
|
||||
@ -499,13 +499,14 @@ fn print_const_with_custom_print_scalar(cx: &DocContext<'_>, ct: &'tcx ty::Const
|
||||
// Use a slightly different format for integer types which always shows the actual value.
|
||||
// For all other types, fallback to the original `pretty_print_const`.
|
||||
match (ct.val, ct.ty.kind()) {
|
||||
(ty::ConstKind::Value(ConstValue::Scalar(Scalar::Raw { data, .. })), ty::Uint(ui)) => {
|
||||
format!("{}{}", format_integer_with_underscore_sep(&data.to_string()), ui.name_str())
|
||||
(ty::ConstKind::Value(ConstValue::Scalar(int)), ty::Uint(ui)) => {
|
||||
format!("{}{}", format_integer_with_underscore_sep(&int.to_string()), ui.name_str())
|
||||
}
|
||||
(ty::ConstKind::Value(ConstValue::Scalar(Scalar::Raw { data, .. })), ty::Int(i)) => {
|
||||
(ty::ConstKind::Value(ConstValue::Scalar(int)), ty::Int(i)) => {
|
||||
let ty = cx.tcx.lift(ct.ty).unwrap();
|
||||
let size = cx.tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size;
|
||||
let sign_extended_data = sign_extend(data, size) as i128;
|
||||
let data = int.assert_bits(size);
|
||||
let sign_extended_data = size.sign_extend(data) as i128;
|
||||
|
||||
format!(
|
||||
"{}{}",
|
||||
|
@ -8,8 +8,9 @@ use rustc_hir::def::{DefKind, Res};
|
||||
use rustc_hir::{BinOp, BinOpKind, Block, Expr, ExprKind, HirId, QPath, UnOp};
|
||||
use rustc_lint::LateContext;
|
||||
use rustc_middle::ty::subst::{Subst, SubstsRef};
|
||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||
use rustc_middle::ty::{self, Ty, TyCtxt, ScalarInt};
|
||||
use rustc_middle::{bug, span_bug};
|
||||
use rustc_middle::mir::interpret::Scalar;
|
||||
use rustc_span::symbol::Symbol;
|
||||
use std::cmp::Ordering::{self, Equal};
|
||||
use std::convert::TryInto;
|
||||
@ -500,21 +501,21 @@ impl<'a, 'tcx> ConstEvalLateContext<'a, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn miri_to_const(result: &ty::Const<'_>) -> Option<Constant> {
|
||||
use rustc_middle::mir::interpret::{ConstValue, Scalar};
|
||||
use rustc_middle::mir::interpret::{ConstValue};
|
||||
match result.val {
|
||||
ty::ConstKind::Value(ConstValue::Scalar(Scalar::Raw { data: d, .. })) => {
|
||||
ty::ConstKind::Value(ConstValue::Scalar(Scalar::Int(int))) => {
|
||||
match result.ty.kind() {
|
||||
ty::Bool => Some(Constant::Bool(d == 1)),
|
||||
ty::Uint(_) | ty::Int(_) => Some(Constant::Int(d)),
|
||||
ty::Bool => Some(Constant::Bool(int == ScalarInt::TRUE)),
|
||||
ty::Uint(_) | ty::Int(_) => Some(Constant::Int(int.assert_bits(int.size()))),
|
||||
ty::Float(FloatTy::F32) => Some(Constant::F32(f32::from_bits(
|
||||
d.try_into().expect("invalid f32 bit representation"),
|
||||
int.try_into().expect("invalid f32 bit representation"),
|
||||
))),
|
||||
ty::Float(FloatTy::F64) => Some(Constant::F64(f64::from_bits(
|
||||
d.try_into().expect("invalid f64 bit representation"),
|
||||
int.try_into().expect("invalid f64 bit representation"),
|
||||
))),
|
||||
ty::RawPtr(type_and_mut) => {
|
||||
if let ty::Uint(_) = type_and_mut.ty.kind() {
|
||||
return Some(Constant::RawPtr(d));
|
||||
return Some(Constant::RawPtr(int.assert_bits(int.size())));
|
||||
}
|
||||
None
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user