CTFE/Miri engine Pointer type overhaul: make Scalar-to-Pointer conversion infallible

This resolves all the problems we had around "normalizing" the representation of a Scalar in case it carries a Pointer value: we can just use Pointer if we want to have a value taht we are sure is already normalized.
This commit is contained in:
Ralf Jung 2021-07-12 18:22:15 +02:00
parent 5aff6dd07a
commit d4f7dd6702
34 changed files with 839 additions and 724 deletions

View File

@ -244,7 +244,8 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
}
Scalar::Ptr(ptr) => {
let (base_addr, base_addr_space) = match self.tcx.global_alloc(ptr.alloc_id) {
let (alloc_id, offset) = ptr.into_parts();
let (base_addr, base_addr_space) = match self.tcx.global_alloc(alloc_id) {
GlobalAlloc::Memory(alloc) => {
let init = const_alloc_to_llvm(self, alloc);
let value = match alloc.mutability {
@ -252,7 +253,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
_ => self.static_addr_of(init, alloc.align, None),
};
if !self.sess().fewer_names() {
llvm::set_value_name(value, format!("{:?}", ptr.alloc_id).as_bytes());
llvm::set_value_name(value, format!("{:?}", alloc_id).as_bytes());
}
(value, AddressSpace::DATA)
}
@ -269,7 +270,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let llval = unsafe {
llvm::LLVMConstInBoundsGEP(
self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)),
&self.const_usize(ptr.offset.bytes()),
&self.const_usize(offset.bytes()),
1,
)
};

View File

@ -25,7 +25,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
let pointer_size = dl.pointer_size.bytes() as usize;
let mut next_offset = 0;
for &(offset, ((), alloc_id)) in alloc.relocations().iter() {
for &(offset, alloc_id) in alloc.relocations().iter() {
let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;

View File

@ -25,7 +25,7 @@ use crate::ty;
/// module provides higher-level access.
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
#[derive(HashStable)]
pub struct Allocation<Tag = (), Extra = ()> {
pub struct Allocation<Tag = AllocId, Extra = ()> {
/// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer.
bytes: Vec<u8>,
@ -154,25 +154,17 @@ impl<Tag> Allocation<Tag> {
}
}
impl Allocation<()> {
/// Add Tag and Extra fields
pub fn with_tags_and_extra<T, E>(
impl Allocation {
/// Convert Tag and add Extra fields
pub fn with_prov_and_extra<Tag, Extra>(
self,
mut tagger: impl FnMut(AllocId) -> T,
extra: E,
) -> Allocation<T, E> {
mut tagger: impl FnMut(AllocId) -> Tag,
extra: Extra,
) -> Allocation<Tag, Extra> {
Allocation {
bytes: self.bytes,
relocations: Relocations::from_presorted(
self.relocations
.iter()
// The allocations in the relocations (pointers stored *inside* this allocation)
// all get the base pointer tag.
.map(|&(offset, ((), alloc))| {
let tag = tagger(alloc);
(offset, (tag, alloc))
})
.collect(),
self.relocations.iter().map(|&(offset, tag)| (offset, tagger(tag))).collect(),
),
init_mask: self.init_mask,
align: self.align,
@ -339,8 +331,8 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
self.check_relocations(cx, range)?;
} else {
// Maybe a pointer.
if let Some(&(tag, alloc_id)) = self.relocations.get(&range.start) {
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag);
if let Some(&prov) = self.relocations.get(&range.start) {
let ptr = Pointer::new(prov, Size::from_bytes(bits));
return Ok(ScalarMaybeUninit::Scalar(ptr.into()));
}
}
@ -371,9 +363,12 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
}
};
let bytes = match val.to_bits_or_ptr(range.size, cx) {
Err(val) => u128::from(val.offset.bytes()),
Ok(data) => data,
let (bytes, provenance) = match val.to_bits_or_ptr(range.size, cx) {
Err(val) => {
let (provenance, offset) = val.into_parts();
(u128::from(offset.bytes()), Some(provenance))
}
Ok(data) => (data, None),
};
let endian = cx.data_layout().endian;
@ -381,8 +376,8 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
write_target_uint(endian, dst, bytes).unwrap();
// See if we have to also write a relocation.
if let Scalar::Ptr(val) = val {
self.relocations.insert(range.start, (val.tag, val.alloc_id));
if let Some(provenance) = provenance {
self.relocations.insert(range.start, provenance);
}
Ok(())
@ -392,11 +387,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
/// Relocations.
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
/// Returns all relocations overlapping with the given pointer-offset pair.
pub fn get_relocations(
&self,
cx: &impl HasDataLayout,
range: AllocRange,
) -> &[(Size, (Tag, AllocId))] {
pub fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Tag)] {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
@ -582,24 +573,24 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
}
}
/// Relocations.
/// "Relocations" stores the provenance information of pointers stored in memory.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
pub struct Relocations<Tag = (), Id = AllocId>(SortedMap<Size, (Tag, Id)>);
pub struct Relocations<Tag = AllocId>(SortedMap<Size, Tag>);
impl<Tag, Id> Relocations<Tag, Id> {
impl<Tag> Relocations<Tag> {
pub fn new() -> Self {
Relocations(SortedMap::new())
}
// The caller must guarantee that the given relocations are already sorted
// by address and contain no duplicates.
pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self {
pub fn from_presorted(r: Vec<(Size, Tag)>) -> Self {
Relocations(SortedMap::from_presorted_elements(r))
}
}
impl<Tag> Deref for Relocations<Tag> {
type Target = SortedMap<Size, (Tag, AllocId)>;
type Target = SortedMap<Size, Tag>;
fn deref(&self) -> &Self::Target {
&self.0
@ -614,7 +605,7 @@ impl<Tag> DerefMut for Relocations<Tag> {
/// A partial, owned list of relocations to transfer into another allocation.
pub struct AllocationRelocations<Tag> {
relative_relocations: Vec<(Size, (Tag, AllocId))>,
relative_relocations: Vec<(Size, Tag)>,
}
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {

View File

@ -238,7 +238,9 @@ pub enum UndefinedBehaviorInfo<'tcx> {
PointerUseAfterFree(AllocId),
/// Used a pointer outside the bounds it is valid for.
PointerOutOfBounds {
ptr: Pointer,
alloc_id: AllocId,
offset: Size,
size: Size,
msg: CheckInAllocMsg,
allocation_size: Size,
},
@ -307,19 +309,19 @@ impl fmt::Display for UndefinedBehaviorInfo<'_> {
InvalidVtableAlignment(msg) => write!(f, "invalid vtable: alignment {}", msg),
UnterminatedCString(p) => write!(
f,
"reading a null-terminated string starting at {} with no null found before end of allocation",
"reading a null-terminated string starting at {:?} with no null found before end of allocation",
p,
),
PointerUseAfterFree(a) => {
write!(f, "pointer to {} was dereferenced after this allocation got freed", a)
}
PointerOutOfBounds { ptr, msg, allocation_size } => write!(
PointerOutOfBounds { alloc_id, offset, size, msg, allocation_size } => write!(
f,
"{}pointer must be in-bounds at offset {}, \
but is outside bounds of {} which has size {}",
"{}pointer must be in-bounds for {} bytes at offset {}, but {} has size {}",
msg,
ptr.offset.bytes(),
ptr.alloc_id,
size.bytes(),
offset.bytes(),
alloc_id,
allocation_size.bytes()
),
DanglingIntPointer(0, CheckInAllocMsg::InboundsTest) => {
@ -348,13 +350,13 @@ impl fmt::Display for UndefinedBehaviorInfo<'_> {
}
InvalidTag(val) => write!(f, "enum value has invalid tag: {}", val),
InvalidFunctionPointer(p) => {
write!(f, "using {} as function pointer but it does not point to a function", p)
write!(f, "using {:?} as function pointer but it does not point to a function", p)
}
InvalidStr(err) => write!(f, "this string is not valid UTF-8: {}", err),
InvalidUninitBytes(Some((alloc, access))) => write!(
f,
"reading {} byte{} of memory starting at {}, \
but {} byte{} {} uninitialized starting at {}, \
"reading {} byte{} of memory starting at {:?}, \
but {} byte{} {} uninitialized starting at {:?}, \
and this operation requires initialized memory",
access.access_size.bytes(),
pluralize!(access.access_size.bytes()),

View File

@ -127,7 +127,7 @@ pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMay
pub use self::allocation::{alloc_range, AllocRange, Allocation, InitMask, Relocations};
pub use self::pointer::{Pointer, PointerArithmetic};
pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
/// Uniquely identifies one of the following:
/// - A constant

View File

@ -83,55 +83,74 @@ pub trait PointerArithmetic: HasDataLayout {
impl<T: HasDataLayout> PointerArithmetic for T {}
/// This trait abstracts over the kind of provenance that is associated with a `Pointer`. It is
/// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
/// some global state.
pub trait Provenance: Copy {
/// Says whether the `offset` field of `Pointer` is the actual physical address.
/// If `true, ptr-to-int casts work by simply discarding the provenance.
/// If `false`, ptr-to-int casts are not supported.
const OFFSET_IS_ADDR: bool;
/// Determines how a pointer should be printed.
fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result
where
Self: Sized;
/// "Erasing" a tag converts it to the default tag type if possible. Used only for formatting purposes!
fn erase_for_fmt(self) -> AllocId;
}
impl Provenance for AllocId {
// With the `AllocId` as provenance, the `offset` is interpreted *relative to the allocation*,
// so ptr-to-int casts are not possible (since we do not know the global physical offset).
const OFFSET_IS_ADDR: bool = false;
fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Forward `alternate` flag to `alloc_id` printing.
if f.alternate() {
write!(f, "{:#?}", ptr.provenance)?;
} else {
write!(f, "{:?}", ptr.provenance)?;
}
// Print offset only if it is non-zero.
if ptr.offset.bytes() > 0 {
write!(f, "+0x{:x}", ptr.offset.bytes())?;
}
Ok(())
}
fn erase_for_fmt(self) -> AllocId {
self
}
}
/// Represents a pointer in the Miri engine.
///
/// `Pointer` is generic over the `Tag` associated with each pointer,
/// which is used to do provenance tracking during execution.
/// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable)]
pub struct Pointer<Tag = ()> {
pub alloc_id: AllocId,
pub offset: Size,
pub tag: Tag,
pub struct Pointer<Tag = AllocId> {
pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Tag` type)
pub provenance: Tag,
}
static_assert_size!(Pointer, 16);
/// Print the address of a pointer (without the tag)
fn print_ptr_addr<Tag>(ptr: &Pointer<Tag>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Forward `alternate` flag to `alloc_id` printing.
if f.alternate() {
write!(f, "{:#?}", ptr.alloc_id)?;
} else {
write!(f, "{:?}", ptr.alloc_id)?;
}
// Print offset only if it is non-zero.
if ptr.offset.bytes() > 0 {
write!(f, "+0x{:x}", ptr.offset.bytes())?;
}
Ok(())
}
//FIXME static_assert_size!(Pointer, 16);
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types.
// We have to use `Debug` output for the tag, because `()` does not implement
// `Display` so we cannot specialize that.
impl<Tag: fmt::Debug> fmt::Debug for Pointer<Tag> {
default fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
print_ptr_addr(self, f)?;
write!(f, "[{:?}]", self.tag)
}
}
// Specialization for no tag
impl fmt::Debug for Pointer<()> {
impl<Tag: Provenance> fmt::Debug for Pointer<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
print_ptr_addr(self, f)
Tag::fmt(self, f)
}
}
impl<Tag: fmt::Debug> fmt::Display for Pointer<Tag> {
impl<Tag: Provenance> fmt::Debug for Pointer<Option<Tag>> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(self, f)
match self.provenance {
Some(tag) => Tag::fmt(&Pointer::new(tag, self.offset), f),
None => write!(f, "0x{:x}", self.offset.bytes()),
}
}
}
@ -143,37 +162,66 @@ impl From<AllocId> for Pointer {
}
}
impl Pointer<()> {
impl<Tag> From<Pointer<Tag>> for Pointer<Option<Tag>> {
#[inline(always)]
pub fn new(alloc_id: AllocId, offset: Size) -> Self {
Pointer { alloc_id, offset, tag: () }
fn from(ptr: Pointer<Tag>) -> Self {
let (tag, offset) = ptr.into_parts();
Pointer::new(Some(tag), offset)
}
}
impl<Tag> Pointer<Option<Tag>> {
pub fn into_pointer_or_offset(self) -> Result<Pointer<Tag>, Size> {
match self.provenance {
Some(tag) => Ok(Pointer::new(tag, self.offset)),
None => Err(self.offset),
}
}
#[inline(always)]
pub fn with_tag<Tag>(self, tag: Tag) -> Pointer<Tag> {
Pointer::new_with_tag(self.alloc_id, self.offset, tag)
pub fn map_erase_for_fmt(self) -> Pointer<Option<AllocId>>
where
Tag: Provenance,
{
Pointer { offset: self.offset, provenance: self.provenance.map(Provenance::erase_for_fmt) }
}
}
impl<'tcx, Tag> Pointer<Tag> {
#[inline(always)]
pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self {
Pointer { alloc_id, offset, tag }
pub fn new(provenance: Tag, offset: Size) -> Self {
Pointer { provenance, offset }
}
/// Obtain the constituents of this pointer. Not that the meaning of the offset depends on the type `Tag`!
/// This function must only be used in the implementation of `Machine::ptr_get_alloc`,
/// and when a `Pointer` is taken apart to be stored efficiently in an `Allocation`.
#[inline(always)]
pub fn into_parts(self) -> (Tag, Size) {
(self.provenance, self.offset)
}
#[inline(always)]
pub fn erase_for_fmt(self) -> Pointer
where
Tag: Provenance,
{
Pointer { offset: self.offset, provenance: self.provenance.erase_for_fmt() }
}
#[inline]
pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
self.tag,
))
Ok(Pointer {
offset: Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
..self
})
}
#[inline]
pub fn overflowing_offset(self, i: Size, cx: &impl HasDataLayout) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes());
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
let ptr = Pointer { offset: Size::from_bytes(res), ..self };
(ptr, over)
}
#[inline(always)]
@ -183,26 +231,21 @@ impl<'tcx, Tag> Pointer<Tag> {
#[inline]
pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
self.tag,
))
Ok(Pointer {
offset: Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
..self
})
}
#[inline]
pub fn overflowing_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
let ptr = Pointer { offset: Size::from_bytes(res), ..self };
(ptr, over)
}
#[inline(always)]
pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
self.overflowing_signed_offset(i, cx).0
}
#[inline(always)]
pub fn erase_tag(self) -> Pointer {
Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () }
}
}

View File

@ -10,7 +10,9 @@ use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
use crate::ty::{Lift, ParamEnv, ScalarInt, Ty, TyCtxt};
use super::{AllocId, AllocRange, Allocation, InterpResult, Pointer, PointerArithmetic};
use super::{
AllocId, AllocRange, Allocation, InterpResult, Pointer, PointerArithmetic, Provenance,
};
/// Represents the result of const evaluation via the `eval_to_allocation` query.
#[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)]
@ -47,12 +49,6 @@ pub enum ConstValue<'tcx> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ConstValue<'_>, 32);
impl From<Scalar> for ConstValue<'tcx> {
fn from(s: Scalar) -> Self {
Self::Scalar(s)
}
}
impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> {
type Lifted = ConstValue<'tcx>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ConstValue<'tcx>> {
@ -70,7 +66,7 @@ impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> {
impl<'tcx> ConstValue<'tcx> {
#[inline]
pub fn try_to_scalar(&self) -> Option<Scalar> {
pub fn try_to_scalar(&self) -> Option<Scalar<AllocId>> {
match *self {
ConstValue::ByRef { .. } | ConstValue::Slice { .. } => None,
ConstValue::Scalar(val) => Some(val),
@ -120,9 +116,12 @@ impl<'tcx> ConstValue<'tcx> {
/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 16 bytes in
/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes
/// of a simple value or a pointer into another `Allocation`
///
/// These variants would be private if there was a convenient way to achieve that in Rust.
/// Do *not* match on a `Scalar`! Use the various `to_*` methods instead.
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable)]
pub enum Scalar<Tag = ()> {
pub enum Scalar<Tag = AllocId> {
/// The raw bytes of a simple value.
Int(ScalarInt),
@ -133,11 +132,11 @@ pub enum Scalar<Tag = ()> {
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(Scalar, 24);
//FIXME static_assert_size!(Scalar, 24);
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types.
impl<Tag: fmt::Debug> fmt::Debug for Scalar<Tag> {
impl<Tag: Provenance> fmt::Debug for Scalar<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Scalar::Ptr(ptr) => write!(f, "{:?}", ptr),
@ -146,11 +145,11 @@ impl<Tag: fmt::Debug> fmt::Debug for Scalar<Tag> {
}
}
impl<Tag: fmt::Debug> fmt::Display for Scalar<Tag> {
impl<Tag: Provenance> fmt::Display for Scalar<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Scalar::Ptr(ptr) => write!(f, "pointer to {}", ptr),
Scalar::Int { .. } => fmt::Debug::fmt(self, f),
Scalar::Ptr(ptr) => write!(f, "pointer to {:?}", ptr),
Scalar::Int(int) => write!(f, "{:?}", int),
}
}
}
@ -169,38 +168,38 @@ impl<Tag> From<Double> for Scalar<Tag> {
}
}
impl Scalar<()> {
/// Tag this scalar with `new_tag` if it is a pointer, leave it unchanged otherwise.
///
/// Used by `MemPlace::replace_tag`.
#[inline]
pub fn with_tag<Tag>(self, new_tag: Tag) -> Scalar<Tag> {
match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.with_tag(new_tag)),
Scalar::Int(int) => Scalar::Int(int),
}
impl<Tag> From<Pointer<Tag>> for Scalar<Tag> {
#[inline(always)]
fn from(ptr: Pointer<Tag>) -> Self {
Scalar::Ptr(ptr)
}
}
impl<Tag> From<ScalarInt> for Scalar<Tag> {
#[inline(always)]
fn from(ptr: ScalarInt) -> Self {
Scalar::Int(ptr)
}
}
impl<'tcx, Tag> Scalar<Tag> {
pub const ZST: Self = Scalar::Int(ScalarInt::ZST);
/// Erase the tag from the scalar, if any.
///
/// Used by error reporting code to avoid having the error type depend on `Tag`.
#[inline]
pub fn erase_tag(self) -> Scalar {
match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_tag()),
Scalar::Int(int) => Scalar::Int(int),
}
}
#[inline]
pub fn null_ptr(cx: &impl HasDataLayout) -> Self {
Scalar::Int(ScalarInt::null(cx.data_layout().pointer_size))
}
/// Create a Scalar from a pointer with an `Option<_>` tag (where `None` represents a plain integer).
pub fn from_maybe_pointer(ptr: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
match ptr.into_parts() {
(Some(tag), offset) => Scalar::Ptr(Pointer::new(tag, offset)),
(None, offset) => {
Scalar::Int(ScalarInt::try_from_uint(offset.bytes(), cx.pointer_size()).unwrap())
}
}
}
#[inline(always)]
fn ptr_op(
self,
@ -332,10 +331,11 @@ impl<'tcx, Tag> Scalar<Tag> {
Scalar::Int(f.into())
}
/// This is very rarely the method you want! You should dispatch on the type
/// and use `force_bits`/`assert_bits`/`force_ptr`/`assert_ptr`.
/// This is almost certainly not the method you want! You should dispatch on the type
/// and use `to_{u8,u16,...}`/`scalar_to_ptr` to perform ptr-to-int / int-to-ptr casts as needed.
///
/// This method only exists for the benefit of low-level memory operations
/// as well as the implementation of the `force_*` methods.
/// as well as the implementation of the above methods.
#[inline]
pub fn to_bits_or_ptr(
self,
@ -352,28 +352,13 @@ impl<'tcx, Tag> Scalar<Tag> {
}
}
/// This method is intentionally private!
/// It is just a helper for other methods in this file.
#[inline]
fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
match self {
Scalar::Int(int) => int.to_bits(target_size).map_err(|size| {
err_ub!(ScalarSizeMismatch {
target_size: target_size.bytes(),
data_size: size.bytes(),
})
.into()
}),
Scalar::Ptr(_) => throw_unsup!(ReadPointerAsBytes),
}
}
/// Do not call this method! It does not do ptr-to-int casts when needed.
#[inline(always)]
pub fn assert_bits(self, target_size: Size) -> u128 {
self.to_bits(target_size).expect("expected Raw bits but got a Pointer")
self.assert_int().assert_bits(target_size)
}
/// Do not call this method! It does not do ptr-to-int casts when needed.
#[inline]
pub fn assert_int(self) -> ScalarInt {
match self {
@ -382,6 +367,7 @@ impl<'tcx, Tag> Scalar<Tag> {
}
}
/// Do not call this method! It does not do int-to-ptr casts when needed.
#[inline]
pub fn assert_ptr(self) -> Pointer<Tag> {
match self {
@ -401,6 +387,44 @@ impl<'tcx, Tag> Scalar<Tag> {
pub fn is_ptr(self) -> bool {
matches!(self, Scalar::Ptr(_))
}
}
impl<'tcx, Tag: Provenance> Scalar<Tag> {
/// Erase the tag from the scalar, if any.
///
/// Used by error reporting code to avoid having the error type depend on `Tag`.
#[inline]
pub fn erase_for_fmt(self) -> Scalar {
match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_for_fmt()),
Scalar::Int(int) => Scalar::Int(int),
}
}
/// Fundamental scalar-to-int (cast) operation. Many convenience wrappers exist below, that you
/// likely want to use instead.
///
/// Will perform ptr-to-int casts if needed and possible.
#[inline]
pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
match self {
Scalar::Int(int) => int.to_bits(target_size).map_err(|size| {
err_ub!(ScalarSizeMismatch {
target_size: target_size.bytes(),
data_size: size.bytes(),
})
.into()
}),
Scalar::Ptr(ptr) => {
if Tag::OFFSET_IS_ADDR {
Ok(ptr.offset.bytes().into())
} else {
throw_unsup!(ReadPointerAsBytes)
}
}
}
}
pub fn to_bool(self) -> InterpResult<'tcx, bool> {
let val = self.to_u8()?;
@ -507,28 +531,14 @@ impl<'tcx, Tag> Scalar<Tag> {
}
}
impl<Tag> From<Pointer<Tag>> for Scalar<Tag> {
#[inline(always)]
fn from(ptr: Pointer<Tag>) -> Self {
Scalar::Ptr(ptr)
}
}
impl<Tag> From<ScalarInt> for Scalar<Tag> {
#[inline(always)]
fn from(ptr: ScalarInt) -> Self {
Scalar::Int(ptr)
}
}
#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)]
pub enum ScalarMaybeUninit<Tag = ()> {
pub enum ScalarMaybeUninit<Tag = AllocId> {
Scalar(Scalar<Tag>),
Uninit,
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ScalarMaybeUninit, 24);
//FIXME static_assert_size!(ScalarMaybeUninit, 24);
impl<Tag> From<Scalar<Tag>> for ScalarMaybeUninit<Tag> {
#[inline(always)]
@ -546,7 +556,7 @@ impl<Tag> From<Pointer<Tag>> for ScalarMaybeUninit<Tag> {
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types.
impl<Tag: fmt::Debug> fmt::Debug for ScalarMaybeUninit<Tag> {
impl<Tag: Provenance> fmt::Debug for ScalarMaybeUninit<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"),
@ -555,7 +565,7 @@ impl<Tag: fmt::Debug> fmt::Debug for ScalarMaybeUninit<Tag> {
}
}
impl<Tag: fmt::Debug> fmt::Display for ScalarMaybeUninit<Tag> {
impl<Tag: Provenance> fmt::Display for ScalarMaybeUninit<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"),
@ -564,18 +574,7 @@ impl<Tag: fmt::Debug> fmt::Display for ScalarMaybeUninit<Tag> {
}
}
impl<'tcx, Tag> ScalarMaybeUninit<Tag> {
/// Erase the tag from the scalar, if any.
///
/// Used by error reporting code to avoid having the error type depend on `Tag`.
#[inline]
pub fn erase_tag(self) -> ScalarMaybeUninit {
match self {
ScalarMaybeUninit::Scalar(s) => ScalarMaybeUninit::Scalar(s.erase_tag()),
ScalarMaybeUninit::Uninit => ScalarMaybeUninit::Uninit,
}
}
impl<Tag> ScalarMaybeUninit<Tag> {
#[inline]
pub fn check_init(self) -> InterpResult<'static, Scalar<Tag>> {
match self {
@ -583,6 +582,19 @@ impl<'tcx, Tag> ScalarMaybeUninit<Tag> {
ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)),
}
}
}
impl<'tcx, Tag: Provenance> ScalarMaybeUninit<Tag> {
/// Erase the tag from the scalar, if any.
///
/// Used by error reporting code to avoid having the error type depend on `Tag`.
#[inline]
pub fn erase_for_fmt(self) -> ScalarMaybeUninit {
match self {
ScalarMaybeUninit::Scalar(s) => ScalarMaybeUninit::Scalar(s.erase_for_fmt()),
ScalarMaybeUninit::Uninit => ScalarMaybeUninit::Uninit,
}
}
#[inline(always)]
pub fn to_bool(self) -> InterpResult<'tcx, bool> {

View File

@ -3,7 +3,7 @@
//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html
use crate::mir::coverage::{CodeRegion, CoverageKind};
use crate::mir::interpret::{Allocation, GlobalAlloc, Scalar};
use crate::mir::interpret::{Allocation, ConstValue, GlobalAlloc, Scalar};
use crate::mir::visit::MirVisitable;
use crate::ty::adjustment::PointerCast;
use crate::ty::codec::{TyDecoder, TyEncoder};
@ -2095,7 +2095,7 @@ impl<'tcx> Operand<'tcx> {
Operand::Constant(box Constant {
span,
user_ty: None,
literal: ConstantKind::Val(val.into(), ty),
literal: ConstantKind::Val(ConstValue::Scalar(val), ty),
})
}
@ -2458,7 +2458,7 @@ pub enum ConstantKind<'tcx> {
impl Constant<'tcx> {
pub fn check_static_ptr(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
match self.literal.const_for_ty()?.val.try_to_scalar() {
Some(Scalar::Ptr(ptr)) => match tcx.global_alloc(ptr.alloc_id) {
Some(Scalar::Ptr(ptr)) => match tcx.global_alloc(ptr.provenance) {
GlobalAlloc::Static(def_id) => {
assert!(!tcx.is_thread_local_static(def_id));
Some(def_id)

View File

@ -1,7 +1,6 @@
use std::convert::TryInto;
use crate::mir::interpret::ConstValue;
use crate::mir::interpret::Scalar;
use crate::mir::interpret::{AllocId, ConstValue, Scalar};
use crate::mir::Promoted;
use crate::ty::subst::{InternalSubsts, SubstsRef};
use crate::ty::ParamEnv;
@ -59,7 +58,7 @@ impl<'tcx> ConstKind<'tcx> {
}
#[inline]
pub fn try_to_scalar(self) -> Option<Scalar> {
pub fn try_to_scalar(self) -> Option<Scalar<AllocId>> {
self.try_to_value()?.try_to_scalar()
}

View File

@ -987,6 +987,7 @@ pub trait PrettyPrinter<'tcx>:
) -> Result<Self::Const, Self::Error> {
define_scoped_cx!(self);
let (alloc_id, offset) = ptr.into_parts();
match ty.kind() {
// Byte strings (&[u8; N])
ty::Ref(
@ -1002,10 +1003,10 @@ pub trait PrettyPrinter<'tcx>:
..
},
_,
) => match self.tcx().get_global_alloc(ptr.alloc_id) {
) => match self.tcx().get_global_alloc(alloc_id) {
Some(GlobalAlloc::Memory(alloc)) => {
let len = int.assert_bits(self.tcx().data_layout.pointer_size);
let range = AllocRange { start: ptr.offset, size: Size::from_bytes(len) };
let range = AllocRange { start: offset, size: Size::from_bytes(len) };
if let Ok(byte_str) = alloc.get_bytes(&self.tcx(), range) {
p!(pretty_print_byte_str(byte_str))
} else {
@ -1020,7 +1021,7 @@ pub trait PrettyPrinter<'tcx>:
ty::FnPtr(_) => {
// FIXME: We should probably have a helper method to share code with the "Byte strings"
// printing above (which also has to handle pointers to all sorts of things).
match self.tcx().get_global_alloc(ptr.alloc_id) {
match self.tcx().get_global_alloc(alloc_id) {
Some(GlobalAlloc::Function(instance)) => {
self = self.typed_value(
|this| this.print_value_path(instance.def_id(), instance.substs),
@ -1068,8 +1069,8 @@ pub trait PrettyPrinter<'tcx>:
ty::Char if char::try_from(int).is_ok() => {
p!(write("{:?}", char::try_from(int).unwrap()))
}
// Raw pointers
ty::RawPtr(_) | ty::FnPtr(_) => {
// Pointer types
ty::Ref(..) | ty::RawPtr(_) | ty::FnPtr(_) => {
let data = int.assert_bits(self.tcx().data_layout.pointer_size);
self = self.typed_value(
|mut this| {

View File

@ -597,7 +597,7 @@ fn check_const_value_eq<R: TypeRelation<'tcx>>(
}
(ConstValue::Scalar(Scalar::Ptr(a_val)), ConstValue::Scalar(Scalar::Ptr(b_val))) => {
a_val == b_val
|| match (tcx.global_alloc(a_val.alloc_id), tcx.global_alloc(b_val.alloc_id)) {
|| match (tcx.global_alloc(a_val.provenance), tcx.global_alloc(b_val.provenance)) {
(GlobalAlloc::Function(a_instance), GlobalAlloc::Function(b_instance)) => {
a_instance == b_instance
}

View File

@ -16,7 +16,6 @@ use crate::interpret::{
#[derive(Clone, Debug)]
pub enum ConstEvalErrKind {
NeedsRfc(String),
PtrToIntCast,
ConstAccessesStatic,
ModifiedGlobal,
AssertFailure(AssertKind<ConstInt>),
@ -49,12 +48,6 @@ impl fmt::Display for ConstEvalErrKind {
NeedsRfc(ref msg) => {
write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
}
PtrToIntCast => {
write!(
f,
"cannot cast pointer to integer because it was not created by cast from integer"
)
}
ConstAccessesStatic => write!(f, "constant accesses static"),
ModifiedGlobal => {
write!(f, "modifying a static's initial value from another static's initializer")

View File

@ -136,19 +136,18 @@ pub(super) fn op_to_const<'tcx>(
// by-val is if we are in destructure_const, i.e., if this is (a field of) something that we
// "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
// structs containing such.
op.try_as_mplace(ecx)
op.try_as_mplace()
};
let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr {
Scalar::Ptr(ptr) => {
let alloc = ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory();
ConstValue::ByRef { alloc, offset: ptr.offset }
let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr.into_parts() {
(Some(alloc_id), offset) => {
let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
ConstValue::ByRef { alloc, offset }
}
Scalar::Int(int) => {
(None, offset) => {
assert!(mplace.layout.is_zst());
assert_eq!(
int.assert_bits(ecx.tcx.data_layout.pointer_size)
% u128::from(mplace.layout.align.abi.bytes()),
offset.bytes() % mplace.layout.align.abi.bytes(),
0,
"this MPlaceTy must come from a validated constant, thus we can assume the \
alignment is correct",
@ -162,14 +161,14 @@ pub(super) fn op_to_const<'tcx>(
Err(imm) => match *imm {
Immediate::Scalar(x) => match x {
ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s),
ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place(ecx)),
ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()),
},
Immediate::ScalarPair(a, b) => {
let (data, start) = match a.check_init().unwrap() {
Scalar::Ptr(ptr) => {
(ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory(), ptr.offset.bytes())
let (data, start) = match ecx.scalar_to_ptr(a.check_init().unwrap()).into_parts() {
(Some(alloc_id), offset) => {
(ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
}
Scalar::Int { .. } => (
(None, _offset) => (
ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
b"" as &[u8],
)),
@ -369,6 +368,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
inner = true;
}
};
let alloc_id = mplace.ptr.provenance.unwrap();
if let Err(error) = validation {
// Validation failed, report an error. This is always a hard error.
let err = ConstEvalErr::new(&ecx, error, None);
@ -381,9 +381,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
"the raw bytes of the constant ({}",
display_allocation(
*ecx.tcx,
ecx.tcx
.global_alloc(mplace.ptr.assert_ptr().alloc_id)
.unwrap_memory()
ecx.tcx.global_alloc(alloc_id).unwrap_memory()
)
));
diag.emit();
@ -391,7 +389,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
))
} else {
// Convert to raw constant
Ok(ConstAlloc { alloc_id: mplace.ptr.assert_ptr().alloc_id, ty: mplace.layout.ty })
Ok(ConstAlloc { alloc_id, ty: mplace.layout.ty })
}
}
}

View File

@ -16,8 +16,8 @@ use rustc_target::abi::{Align, Size};
use rustc_target::spec::abi::Abi;
use crate::interpret::{
self, compile_time_machine, AllocId, Allocation, Frame, ImmTy, InterpCx, InterpResult, Memory,
OpTy, PlaceTy, Pointer, Scalar, StackPopUnwind,
self, compile_time_machine, AllocId, Allocation, Frame, ImmTy, InterpCx, InterpResult, OpTy,
PlaceTy, Scalar, StackPopUnwind,
};
use super::error::*;
@ -59,7 +59,7 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> {
pub steps_remaining: usize,
/// The virtual call stack.
pub(crate) stack: Vec<Frame<'mir, 'tcx, (), ()>>,
pub(crate) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
}
#[derive(Copy, Clone, Debug)]
@ -184,7 +184,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
// is in bounds, because if they are in bounds, the pointer can't be null.
// Inequality with integers other than null can never be known for sure.
(Scalar::Int(int), Scalar::Ptr(ptr)) | (Scalar::Ptr(ptr), Scalar::Int(int)) => {
int.is_null() && !self.memory.ptr_may_be_null(ptr)
int.is_null() && !self.memory.ptr_may_be_null(ptr.into())
}
// FIXME: return `true` for at least some comparisons where we can reliably
// determine the result of runtime inequality tests at compile-time.
@ -356,10 +356,6 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
Err(ConstEvalErrKind::Abort(msg).into())
}
fn ptr_to_int(_mem: &Memory<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx, u64> {
Err(ConstEvalErrKind::PtrToIntCast.into())
}
fn binary_ptr_op(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_bin_op: mir::BinOp,

View File

@ -35,7 +35,7 @@ pub(crate) fn const_caller_location(
if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
bug!("intern_const_alloc_recursive should not error in this case")
}
ConstValue::Scalar(loc_place.ptr)
ConstValue::Scalar(Scalar::Ptr(loc_place.ptr.into_pointer_or_offset().unwrap()))
}
/// Convert an evaluated constant to a type level constant
@ -179,9 +179,9 @@ pub(crate) fn deref_const<'tcx>(
let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
let op = ecx.const_to_op(val, None).unwrap();
let mplace = ecx.deref_operand(&op).unwrap();
if let Scalar::Ptr(ptr) = mplace.ptr {
if let Some(alloc_id) = mplace.ptr.provenance {
assert_eq!(
tcx.get_global_alloc(ptr.alloc_id).unwrap().unwrap_memory().mutability,
tcx.get_global_alloc(alloc_id).unwrap().unwrap_memory().mutability,
Mutability::Not,
"deref_const cannot be used with mutable allocations as \
that could allow pattern matching to observe mutable statics",

View File

@ -175,7 +175,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// (a) cast a raw ptr to usize, or
// (b) cast from an integer-like (including bool, char, enums).
// In both cases we want the bits.
let bits = self.force_bits(src.to_scalar()?, src.layout.size)?;
let bits = src.to_scalar()?.to_bits(src.layout.size)?;
Ok(self.cast_from_scalar(bits, src.layout, cast_ty).into())
}

View File

@ -8,7 +8,6 @@ use rustc_index::vec::IndexVec;
use rustc_macros::HashStable;
use rustc_middle::ich::StableHashingContext;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{GlobalId, InterpResult, Pointer, Scalar};
use rustc_middle::ty::layout::{self, TyAndLayout};
use rustc_middle::ty::{
self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable,
@ -18,8 +17,9 @@ use rustc_span::{Pos, Span};
use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout};
use super::{
Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, MemoryKind, Operand, Place,
PlaceTy, ScalarMaybeUninit, StackPopJump,
AllocId, GlobalId, Immediate, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory,
MemoryKind, Operand, Place, PlaceTy, Pointer, Provenance, Scalar, ScalarMaybeUninit,
StackPopJump,
};
use crate::transform::validate::equal_up_to_regions;
use crate::util::storage::AlwaysLiveLocals;
@ -80,7 +80,7 @@ impl Drop for SpanGuard {
}
/// A stack frame.
pub struct Frame<'mir, 'tcx, Tag = (), Extra = ()> {
pub struct Frame<'mir, 'tcx, Tag = AllocId, Extra = ()> {
////////////////////////////////////////////////////////////////////////////////
// Function and callsite information
////////////////////////////////////////////////////////////////////////////////
@ -161,7 +161,7 @@ pub enum StackPopCleanup {
/// State of a local variable including a memoized layout
#[derive(Clone, PartialEq, Eq, HashStable)]
pub struct LocalState<'tcx, Tag = ()> {
pub struct LocalState<'tcx, Tag = AllocId> {
pub value: LocalValue<Tag>,
/// Don't modify if `Some`, this is only used to prevent computing the layout twice
#[stable_hasher(ignore)]
@ -169,8 +169,8 @@ pub struct LocalState<'tcx, Tag = ()> {
}
/// Current value of a local variable
#[derive(Copy, Clone, PartialEq, Eq, Debug, HashStable)] // Miri debug-prints these
pub enum LocalValue<Tag = ()> {
#[derive(Copy, Clone, PartialEq, Eq, HashStable)]
pub enum LocalValue<Tag = AllocId> {
/// This local is not currently alive, and cannot be used at all.
Dead,
/// This local is alive but not yet initialized. It can be written to
@ -186,6 +186,18 @@ pub enum LocalValue<Tag = ()> {
Live(Operand<Tag>),
}
impl<Tag: Provenance> std::fmt::Debug for LocalValue<Tag> {
// Miri debug-prints these
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use LocalValue::*;
match self {
Dead => f.debug_tuple("Dead").finish(),
Uninitialized => f.debug_tuple("Uninitialized").finish(),
Live(o) => f.debug_tuple("Live").field(o).finish(),
}
}
}
impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
/// Read the local's value or error if the local is not yet live or not live anymore.
///
@ -406,20 +418,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
#[inline(always)]
pub fn force_ptr(
&self,
scalar: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
self.memory.force_ptr(scalar)
}
#[inline(always)]
pub fn force_bits(
&self,
scalar: Scalar<M::PointerTag>,
size: Size,
) -> InterpResult<'tcx, u128> {
self.memory.force_bits(scalar, size)
pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
self.memory.scalar_to_ptr(scalar)
}
/// Call this to turn untagged "global" pointers (obtained via `tcx`) into
@ -650,7 +650,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(Some((size, align)))
}
ty::Dynamic(..) => {
let vtable = metadata.unwrap_meta();
let vtable = self.scalar_to_ptr(metadata.unwrap_meta());
// Read size and align from vtable (already checks size).
Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
}
@ -898,8 +898,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
// All locals have a backing allocation, even if the allocation is empty
// due to the local having ZST type.
let ptr = ptr.assert_ptr();
trace!("deallocating local: {:?}", self.memory.dump_alloc(ptr.alloc_id));
trace!(
"deallocating local {:?}: {:?}",
local,
self.memory.dump_alloc(ptr.provenance.unwrap().erase_for_fmt())
);
self.memory.deallocate(ptr, None, MemoryKind::Stack)?;
};
Ok(())
@ -975,46 +978,45 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
match self.ecx.stack()[frame].locals[local].value {
LocalValue::Dead => write!(fmt, " is dead")?,
LocalValue::Uninitialized => write!(fmt, " is uninitialized")?,
LocalValue::Live(Operand::Indirect(mplace)) => match mplace.ptr {
Scalar::Ptr(ptr) => {
write!(
fmt,
" by align({}){} ref:",
mplace.align.bytes(),
match mplace.meta {
MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
}
)?;
allocs.push(ptr.alloc_id);
}
ptr => write!(fmt, " by integral ref: {:?}", ptr)?,
},
LocalValue::Live(Operand::Indirect(mplace)) => {
write!(
fmt,
" by align({}){} ref {:?}:",
mplace.align.bytes(),
match mplace.meta {
MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
},
mplace.ptr,
)?;
allocs.extend(mplace.ptr.map_erase_for_fmt().provenance);
}
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
write!(fmt, " {:?}", val)?;
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val {
allocs.push(ptr.alloc_id);
allocs.push(ptr.provenance.erase_for_fmt());
}
}
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
write!(fmt, " ({:?}, {:?})", val1, val2)?;
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val1 {
allocs.push(ptr.alloc_id);
allocs.push(ptr.provenance.erase_for_fmt());
}
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val2 {
allocs.push(ptr.alloc_id);
allocs.push(ptr.provenance.erase_for_fmt());
}
}
}
write!(fmt, ": {:?}", self.ecx.memory.dump_allocs(allocs))
}
Place::Ptr(mplace) => match mplace.ptr {
Scalar::Ptr(ptr) => write!(
Place::Ptr(mplace) => match mplace.ptr.map_erase_for_fmt().provenance {
Some(alloc_id) => write!(
fmt,
"by align({}) ref: {:?}",
"by align({}) ref {:?}: {:?}",
mplace.align.bytes(),
self.ecx.memory.dump_alloc(ptr.alloc_id)
mplace.ptr,
self.ecx.memory.dump_alloc(alloc_id)
),
ptr => write!(fmt, " integral by ref: {:?}", ptr),
},

View File

@ -20,18 +20,17 @@ use rustc_errors::ErrorReported;
use rustc_hir as hir;
use rustc_middle::mir::interpret::InterpResult;
use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
use rustc_target::abi::Size;
use rustc_ast::Mutability;
use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, Scalar, ValueVisitor};
use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, ValueVisitor};
use crate::const_eval;
pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
'mir,
'tcx,
MemoryKind = T,
PointerTag = (),
PointerTag = AllocId,
ExtraFnVal = !,
FrameExtra = (),
AllocExtra = (),
@ -136,7 +135,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval:
};
// link the alloc id to the actual allocation
let alloc = tcx.intern_const_alloc(alloc);
leftover_allocations.extend(alloc.relocations().iter().map(|&(_, ((), reloc))| reloc));
leftover_allocations.extend(alloc.relocations().iter().map(|&(_, alloc_id)| alloc_id));
tcx.set_alloc_id_memory(alloc_id, alloc);
None
}
@ -203,10 +202,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
if let ty::Dynamic(..) =
tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
{
if let Scalar::Ptr(vtable) = mplace.meta.unwrap_meta() {
let ptr = self.ecx.scalar_to_ptr(mplace.meta.unwrap_meta());
if let Some(alloc_id) = ptr.provenance {
// Explicitly choose const mode here, since vtables are immutable, even
// if the reference of the fat pointer is mutable.
self.intern_shallow(vtable.alloc_id, InternMode::Const, None);
self.intern_shallow(alloc_id, InternMode::Const, None);
} else {
// Validation will error (with a better message) on an invalid vtable pointer.
// Let validation show the error message, but make sure it *does* error.
@ -216,7 +216,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
}
// Check if we have encountered this pointer+layout combination before.
// Only recurse for allocation-backed pointers.
if let Scalar::Ptr(ptr) = mplace.ptr {
if let Some(alloc_id) = mplace.ptr.provenance {
// Compute the mode with which we intern this. Our goal here is to make as many
// statics as we can immutable so they can be placed in read-only memory by LLVM.
let ref_mode = match self.mode {
@ -259,7 +259,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
InternMode::Const
}
};
match self.intern_shallow(ptr.alloc_id, ref_mode, Some(referenced_ty)) {
match self.intern_shallow(alloc_id, ref_mode, Some(referenced_ty)) {
// No need to recurse, these are interned already and statics may have
// cycles, so we don't want to recurse there
Some(IsStaticOrFn) => {}
@ -321,7 +321,7 @@ where
leftover_allocations,
// The outermost allocation must exist, because we allocated it with
// `Memory::allocate`.
ret.ptr.assert_ptr().alloc_id,
ret.ptr.provenance.unwrap(),
base_intern_mode,
Some(ret.layout.ty),
);
@ -395,9 +395,9 @@ where
}
let alloc = tcx.intern_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc);
for &(_, ((), reloc)) in alloc.relocations().iter() {
if leftover_allocations.insert(reloc) {
todo.push(reloc);
for &(_, alloc_id) in alloc.relocations().iter() {
if leftover_allocations.insert(alloc_id) {
todo.push(alloc_id);
}
}
} else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
@ -430,9 +430,7 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
) -> InterpResult<'tcx, &'tcx Allocation> {
let dest = self.allocate(layout, MemoryKind::Stack)?;
f(self, &dest)?;
let ptr = dest.ptr.assert_ptr();
assert_eq!(ptr.offset, Size::ZERO);
let mut alloc = self.memory.alloc_map.remove(&ptr.alloc_id).unwrap().1;
let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
alloc.mutability = Mutability::Not;
Ok(self.tcx.intern_const_alloc(alloc))
}

View File

@ -18,6 +18,7 @@ use rustc_target::abi::{Abi, Align, LayoutOf as _, Primitive, Size};
use super::{
util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
Pointer,
};
mod caller_location;
@ -138,7 +139,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::caller_location => {
let span = self.find_closest_untracked_caller_location();
let location = self.alloc_caller_location_for_span(span);
self.write_scalar(location.ptr, dest)?;
self.write_immediate(location.to_ref(self), dest)?;
}
sym::min_align_of_val | sym::size_of_val => {
@ -190,7 +191,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let ty = substs.type_at(0);
let layout_of = self.layout_of(ty)?;
let val = self.read_scalar(&args[0])?.check_init()?;
let bits = self.force_bits(val, layout_of.size)?;
let bits = val.to_bits(layout_of.size)?;
let kind = match layout_of.abi {
Abi::Scalar(ref scalar) => scalar.value,
_ => span_bug!(
@ -238,7 +239,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// term since the sign of the second term can be inferred from this and
// the fact that the operation has overflowed (if either is 0 no
// overflow can occur)
let first_term: u128 = self.force_bits(l.to_scalar()?, l.layout.size)?;
let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
if first_term_positive {
// Negative overflow not possible since the positive first term
@ -298,7 +299,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
if overflowed {
let layout = self.layout_of(substs.type_at(0))?;
let r_val = self.force_bits(r.to_scalar()?, layout.size)?;
let r_val = r.to_scalar()?.to_bits(layout.size)?;
if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
} else {
@ -312,9 +313,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
let layout = self.layout_of(substs.type_at(0))?;
let val = self.read_scalar(&args[0])?.check_init()?;
let val_bits = self.force_bits(val, layout.size)?;
let val_bits = val.to_bits(layout.size)?;
let raw_shift = self.read_scalar(&args[1])?.check_init()?;
let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
let raw_shift_bits = raw_shift.to_bits(layout.size)?;
let width_bits = u128::from(layout.size.bits());
let shift_bits = raw_shift_bits % width_bits;
let inv_shift_bits = (width_bits - shift_bits) % width_bits;
@ -331,12 +332,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
}
sym::offset => {
let ptr = self.read_scalar(&args[0])?.check_init()?;
let ptr = self.read_pointer(&args[0])?;
let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
let pointee_ty = substs.type_at(0);
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
self.write_scalar(offset_ptr, dest)?;
self.write_scalar(Scalar::from_maybe_pointer(offset_ptr, self), dest)?;
}
sym::arith_offset => {
let ptr = self.read_scalar(&args[0])?.check_init()?;
@ -376,9 +377,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if !done {
// General case: we need two pointers.
let a = self.force_ptr(a)?;
let b = self.force_ptr(b)?;
if a.alloc_id != b.alloc_id {
let a = self.scalar_to_ptr(a);
let b = self.scalar_to_ptr(b);
let (a_alloc_id, a_offset, _) = self.memory.ptr_force_alloc(a)?;
let (b_alloc_id, b_offset, _) = self.memory.ptr_force_alloc(b)?;
if a_alloc_id != b_alloc_id {
throw_ub_format!(
"ptr_offset_from cannot compute offset of pointers into different \
allocations.",
@ -386,8 +389,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
let usize_layout = self.layout_of(self.tcx.types.usize)?;
let isize_layout = self.layout_of(self.tcx.types.isize)?;
let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout);
let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout);
let a_offset = ImmTy::from_uint(a_offset.bytes(), usize_layout);
let b_offset = ImmTy::from_uint(b_offset.bytes(), usize_layout);
let (val, _overflowed, _ty) =
self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
let pointee_layout = self.layout_of(substs.type_at(0))?;
@ -513,10 +516,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
pub fn ptr_offset_inbounds(
&self,
ptr: Scalar<M::PointerTag>,
ptr: Pointer<Option<M::PointerTag>>,
pointee_ty: Ty<'tcx>,
offset_count: i64,
) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
// We cannot overflow i64 as a type's size must be <= isize::MAX.
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
// The computed offset, in bytes, cannot overflow an isize.
@ -524,7 +527,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
// The offset being in bounds cannot rely on "wrapping around" the address space.
// So, first rule out overflows in the pointer arithmetic.
let offset_ptr = ptr.ptr_signed_offset(offset_bytes, self)?;
let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
// ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
// memory between these pointers must be accessible. Note that we do not require the
// pointers to be properly aligned (unlike a read/write operation).
@ -558,8 +561,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
})?;
let src = self.read_scalar(&src)?.check_init()?;
let dst = self.read_scalar(&dst)?.check_init()?;
let src = self.read_pointer(&src)?;
let dst = self.read_pointer(&dst)?;
self.memory.copy(src, align, dst, align, size, nonoverlapping)
}
@ -572,8 +575,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
assert!(!layout.is_unsized());
let lhs = self.read_scalar(lhs)?.check_init()?;
let rhs = self.read_scalar(rhs)?.check_init()?;
let lhs = self.read_pointer(lhs)?;
let rhs = self.read_pointer(rhs)?;
let lhs_bytes = self.memory.read_bytes(lhs, layout.size)?;
let rhs_bytes = self.memory.read_bytes(rhs, layout.size)?;
Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))

View File

@ -96,7 +96,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
// Initialize fields.
self.write_immediate(file.to_ref(), &self.mplace_field(&location, 0).unwrap().into())
self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into())
.expect("writing to memory we just allocated cannot fail");
self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
.expect("writing to memory we just allocated cannot fail");

View File

@ -13,8 +13,8 @@ use rustc_target::abi::Size;
use rustc_target::spec::abi::Abi;
use super::{
AllocId, Allocation, CheckInAllocMsg, Frame, ImmTy, InterpCx, InterpResult, LocalValue,
MemPlace, Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Scalar, StackPopUnwind,
AllocId, Allocation, Frame, ImmTy, InterpCx, InterpResult, LocalValue, MemPlace, Memory,
MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
};
/// Data returned by Machine::stack_pop,
@ -84,12 +84,8 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// Additional memory kinds a machine wishes to distinguish from the builtin ones
type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
/// Tag tracked alongside every pointer. This is used to implement "Stacked Borrows"
/// <https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html>.
/// The `default()` is used for pointers to consts, statics, vtables and functions.
/// The `Debug` formatting is used for displaying pointers; we cannot use `Display`
/// as `()` does not implement that, but it should be "nice" output.
type PointerTag: Debug + Copy + Eq + Hash + 'static;
/// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
type PointerTag: Provenance + Eq + Hash + 'static;
/// Machines can define extra (non-instance) things that represent values of function pointers.
/// For example, Miri uses this to return a function pointer from `dlsym`
@ -287,7 +283,10 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// this will return an unusable tag (i.e., accesses will be UB)!
///
/// Called on the id returned by `thread_local_static_alloc_id` and `extern_static_alloc_id`, if needed.
fn tag_global_base_pointer(memory_extra: &Self::MemoryExtra, id: AllocId) -> Self::PointerTag;
fn tag_global_base_pointer(
memory_extra: &Self::MemoryExtra,
ptr: Pointer,
) -> Pointer<Self::PointerTag>;
/// Called to initialize the "extra" state of an allocation and make the pointers
/// it contains (in relocations) tagged. The way we construct allocations is
@ -400,31 +399,24 @@ pub trait Machine<'mir, 'tcx>: Sized {
Ok(StackPopJump::Normal)
}
fn int_to_ptr(
_mem: &Memory<'mir, 'tcx, Self>,
int: u64,
) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
Err((if int == 0 {
// This is UB, seriously.
// (`DanglingIntPointer` with these exact arguments has special printing code.)
err_ub!(DanglingIntPointer(0, CheckInAllocMsg::InboundsTest))
} else {
// This is just something we cannot support during const-eval.
err_unsup!(ReadBytesAsPointer)
})
.into())
}
/// "Int-to-pointer cast"
fn ptr_from_addr(
mem: &Memory<'mir, 'tcx, Self>,
addr: u64,
) -> Pointer<Option<Self::PointerTag>>;
fn ptr_to_int(
_mem: &Memory<'mir, 'tcx, Self>,
_ptr: Pointer<Self::PointerTag>,
) -> InterpResult<'tcx, u64>;
/// Convert a pointer with provenance into an allocation-offset pair,
/// or a `None` with an absolute address if that conversion is not possible.
fn ptr_get_alloc(
mem: &Memory<'mir, 'tcx, Self>,
ptr: Pointer<Self::PointerTag>,
) -> (Option<AllocId>, Size);
}
// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
// (CTFE and ConstProp) use the same instance. Here, we share that code.
pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
type PointerTag = ();
type PointerTag = AllocId;
type ExtraFnVal = !;
type MemoryMap =
@ -467,19 +459,33 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
#[inline(always)]
fn init_allocation_extra<'b>(
_memory_extra: &Self::MemoryExtra,
_id: AllocId,
id: AllocId,
alloc: Cow<'b, Allocation>,
_kind: Option<MemoryKind<Self::MemoryKind>>,
) -> (Cow<'b, Allocation<Self::PointerTag>>, Self::PointerTag) {
// We do not use a tag so we can just cheaply forward the allocation
(alloc, ())
(alloc, id)
}
#[inline(always)]
fn tag_global_base_pointer(
_memory_extra: &Self::MemoryExtra,
_id: AllocId,
) -> Self::PointerTag {
()
ptr: Pointer<AllocId>,
) -> Pointer<AllocId> {
ptr
}
#[inline(always)]
fn ptr_from_addr(_mem: &Memory<$mir, $tcx, Self>, addr: u64) -> Pointer<Option<AllocId>> {
Pointer::new(None, Size::from_bytes(addr))
}
#[inline(always)]
fn ptr_get_alloc(
_mem: &Memory<$mir, $tcx, Self>,
ptr: Pointer<AllocId>,
) -> (Option<AllocId>, Size) {
let (alloc_id, offset) = ptr.into_parts();
(Some(alloc_id), offset)
}
}

View File

@ -8,7 +8,7 @@
use std::borrow::Cow;
use std::collections::VecDeque;
use std::convert::{TryFrom, TryInto};
use std::convert::TryFrom;
use std::fmt;
use std::ptr;
@ -19,7 +19,8 @@ use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
use super::{
alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc,
InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Scalar, ScalarMaybeUninit,
InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
ScalarMaybeUninit,
};
use crate::util::pretty;
@ -162,25 +163,24 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
#[inline]
pub fn global_base_pointer(
&self,
mut ptr: Pointer,
ptr: Pointer<AllocId>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
let (alloc_id, offset) = ptr.into_parts();
// We need to handle `extern static`.
let ptr = match self.tcx.get_global_alloc(ptr.alloc_id) {
let alloc_id = match self.tcx.get_global_alloc(alloc_id) {
Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
bug!("global memory cannot point to thread-local static")
}
Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
ptr.alloc_id = M::extern_static_alloc_id(self, def_id)?;
ptr
M::extern_static_alloc_id(self, def_id)?
}
_ => {
// No need to change the `AllocId`.
ptr
alloc_id
}
};
// And we need to get the tag.
let tag = M::tag_global_base_pointer(&self.extra, ptr.alloc_id);
Ok(ptr.with_tag(tag))
Ok(M::tag_global_base_pointer(&self.extra, Pointer::new(alloc_id, offset)))
}
pub fn create_fn_alloc(
@ -237,18 +237,19 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// This is a new allocation, not a new global one, so no `global_base_ptr`.
let (alloc, tag) = M::init_allocation_extra(&self.extra, id, Cow::Owned(alloc), Some(kind));
self.alloc_map.insert(id, (kind, alloc.into_owned()));
Pointer::from(id).with_tag(tag)
Pointer::new(tag, Size::ZERO)
}
pub fn reallocate(
&mut self,
ptr: Pointer<M::PointerTag>,
ptr: Pointer<Option<M::PointerTag>>,
old_size_and_align: Option<(Size, Align)>,
new_size: Size,
new_align: Align,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
if ptr.offset.bytes() != 0 {
let (alloc_id, offset, ptr) = self.ptr_force_alloc(ptr)?;
if offset.bytes() != 0 {
throw_ub_format!(
"reallocating {:?} which does not point to the beginning of an object",
ptr
@ -260,7 +261,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
let new_ptr = self.allocate(new_size, new_align, kind)?;
let old_size = match old_size_and_align {
Some((size, _align)) => size,
None => self.get_raw(ptr.alloc_id)?.size(),
None => self.get_raw(alloc_id)?.size(),
};
// This will also call the access hooks.
self.copy(
@ -271,50 +272,51 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
old_size.min(new_size),
/*nonoverlapping*/ true,
)?;
self.deallocate(ptr, old_size_and_align, kind)?;
self.deallocate(ptr.into(), old_size_and_align, kind)?;
Ok(new_ptr)
}
pub fn deallocate(
&mut self,
ptr: Pointer<M::PointerTag>,
ptr: Pointer<Option<M::PointerTag>>,
old_size_and_align: Option<(Size, Align)>,
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx> {
trace!("deallocating: {}", ptr.alloc_id);
let (alloc_id, offset, ptr) = self.ptr_force_alloc(ptr)?;
trace!("deallocating: {}", alloc_id);
if ptr.offset.bytes() != 0 {
if offset.bytes() != 0 {
throw_ub_format!(
"deallocating {:?} which does not point to the beginning of an object",
ptr
);
}
let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
let (alloc_kind, mut alloc) = match self.alloc_map.remove(&alloc_id) {
Some(alloc) => alloc,
None => {
// Deallocating global memory -- always an error
return Err(match self.tcx.get_global_alloc(ptr.alloc_id) {
return Err(match self.tcx.get_global_alloc(alloc_id) {
Some(GlobalAlloc::Function(..)) => {
err_ub_format!("deallocating {}, which is a function", ptr.alloc_id)
err_ub_format!("deallocating {}, which is a function", alloc_id)
}
Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
err_ub_format!("deallocating {}, which is static memory", ptr.alloc_id)
err_ub_format!("deallocating {}, which is static memory", alloc_id)
}
None => err_ub!(PointerUseAfterFree(ptr.alloc_id)),
None => err_ub!(PointerUseAfterFree(alloc_id)),
}
.into());
}
};
if alloc.mutability == Mutability::Not {
throw_ub_format!("deallocating immutable allocation {}", ptr.alloc_id);
throw_ub_format!("deallocating immutable allocation {}", alloc_id);
}
if alloc_kind != kind {
throw_ub_format!(
"deallocating {}, which is {} memory, using {} deallocation operation",
ptr.alloc_id,
alloc_id,
alloc_kind,
kind
);
@ -323,7 +325,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
if size != alloc.size() || align != alloc.align {
throw_ub_format!(
"incorrect layout on deallocation: {} has size {} and alignment {}, but gave size {} and alignment {}",
ptr.alloc_id,
alloc_id,
alloc.size().bytes(),
alloc.align.bytes(),
size.bytes(),
@ -337,7 +339,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
M::memory_deallocated(&mut self.extra, &mut alloc.extra, ptr, size)?;
// Don't forget to remember size and align of this now-dead allocation
let old = self.dead_alloc_map.insert(ptr.alloc_id, (size, alloc.align));
let old = self.dead_alloc_map.insert(alloc_id, (size, alloc.align));
if old.is_some() {
bug!("Nothing can be deallocated twice");
}
@ -345,52 +347,61 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
Ok(())
}
/// Internal helper function for APIs that offer memory access based on `Scalar` pointers.
/// Internal helper function to determine the allocation and offset of a pointer (if any).
#[inline(always)]
pub(super) fn check_ptr_access(
fn get_ptr_access(
&self,
sptr: Scalar<M::PointerTag>,
ptr: Pointer<Option<M::PointerTag>>,
size: Size,
align: Align,
) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
) -> InterpResult<'tcx, Option<(AllocId, Size, Pointer<M::PointerTag>)>> {
let align = M::enforce_alignment(&self.extra).then_some(align);
self.check_and_deref_ptr(sptr, size, align, CheckInAllocMsg::MemoryAccessTest, |ptr| {
let (size, align) =
self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
Ok((size, align, ptr))
})
self.check_and_deref_ptr(
ptr,
size,
align,
CheckInAllocMsg::MemoryAccessTest,
|alloc_id, offset, ptr| {
let (size, align) =
self.get_size_and_align(alloc_id, AllocCheck::Dereferenceable)?;
Ok((size, align, (alloc_id, offset, ptr)))
},
)
}
/// Check if the given scalar is allowed to do a memory access of given `size` and `align`
/// Check if the given pointer is allowed to do a memory access of given `size` and `align`
/// (ignoring `M::enforce_alignment`). The caller can control the error message for the
/// out-of-bounds case.
#[inline(always)]
pub fn check_ptr_access_align(
&self,
sptr: Scalar<M::PointerTag>,
ptr: Pointer<Option<M::PointerTag>>,
size: Size,
align: Align,
msg: CheckInAllocMsg,
) -> InterpResult<'tcx> {
self.check_and_deref_ptr(sptr, size, Some(align), msg, |ptr| {
let (size, align) =
self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
self.check_and_deref_ptr(ptr, size, Some(align), msg, |alloc_id, _, _| {
let (size, align) = self.get_size_and_align(alloc_id, AllocCheck::Dereferenceable)?;
Ok((size, align, ()))
})?;
Ok(())
}
/// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
/// to the allocation it points to. Supports both shared and mutable references, to the actual
/// to the allocation it points to. Supports both shared and mutable references, as the actual
/// checking is offloaded to a helper closure. `align` defines whether and which alignment check
/// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
fn check_and_deref_ptr<T>(
&self,
sptr: Scalar<M::PointerTag>,
ptr: Pointer<Option<M::PointerTag>>,
size: Size,
align: Option<Align>,
msg: CheckInAllocMsg,
alloc_size: impl FnOnce(Pointer<M::PointerTag>) -> InterpResult<'tcx, (Size, Align, T)>,
alloc_size: impl FnOnce(
AllocId,
Size,
Pointer<M::PointerTag>,
) -> InterpResult<'tcx, (Size, Align, T)>,
) -> InterpResult<'tcx, Option<T>> {
fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
if offset % align.bytes() == 0 {
@ -405,53 +416,50 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
}
}
// Normalize to a `Pointer` if we definitely need one.
let normalized = if size.bytes() == 0 {
// Can be an integer, just take what we got. We do NOT `force_bits` here;
// if this is already a `Pointer` we want to do the bounds checks!
sptr
// Extract from the pointer an `Option<AllocId>` and an offset, which is relative to the
// allocation or (if that is `None`) an absolute address.
let ptr_or_addr = if size.bytes() == 0 {
// Let's see what we can do, but don't throw errors if there's nothing there.
self.ptr_try_get_alloc(ptr)
} else {
// A "real" access, we must get a pointer to be able to check the bounds.
Scalar::from(self.force_ptr(sptr)?)
// A "real" access, we insist on getting an `AllocId`.
Ok(self.ptr_force_alloc(ptr)?)
};
Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
Ok(bits) => {
let bits = u64::try_from(bits).unwrap(); // it's ptr-sized
assert!(size.bytes() == 0);
Ok(match ptr_or_addr {
Err(addr) => {
// No memory is actually being accessed.
debug_assert!(size.bytes() == 0);
// Must be non-null.
if bits == 0 {
if addr == 0 {
throw_ub!(DanglingIntPointer(0, msg))
}
// Must be aligned.
if let Some(align) = align {
check_offset_align(bits, align)?;
check_offset_align(addr, align)?;
}
None
}
Err(ptr) => {
let (allocation_size, alloc_align, ret_val) = alloc_size(ptr)?;
Ok((alloc_id, offset, ptr)) => {
let (allocation_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, ptr)?;
// Test bounds. This also ensures non-null.
// It is sufficient to check this for the end pointer. The addition
// checks for overflow.
let end_ptr = ptr.offset(size, self)?;
if end_ptr.offset > allocation_size {
// equal is okay!
throw_ub!(PointerOutOfBounds { ptr: end_ptr.erase_tag(), msg, allocation_size })
// It is sufficient to check this for the end pointer. Also check for overflow!
if offset.checked_add(size, &self.tcx).map_or(true, |end| end > allocation_size) {
throw_ub!(PointerOutOfBounds { alloc_id, offset, size, allocation_size, msg })
}
// Test align. Check this last; if both bounds and alignment are violated
// we want the error to be about the bounds.
if let Some(align) = align {
if M::force_int_for_alignment_check(&self.extra) {
let bits = self
.force_bits(ptr.into(), self.pointer_size())
let addr = Scalar::from(ptr)
.to_machine_usize(&self.tcx)
.expect("ptr-to-int cast for align check should never fail");
check_offset_align(bits.try_into().unwrap(), align)?;
check_offset_align(addr, align)?;
} else {
// Check allocation alignment and offset alignment.
if alloc_align.bytes() < align.bytes() {
throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
}
check_offset_align(ptr.offset.bytes(), align)?;
check_offset_align(offset.bytes(), align)?;
}
}
@ -463,13 +471,18 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
}
/// Test if the pointer might be null.
pub fn ptr_may_be_null(&self, ptr: Pointer<M::PointerTag>) -> bool {
let (size, _align) = self
.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
.expect("alloc info with MaybeDead cannot fail");
// If the pointer is out-of-bounds, it may be null.
// Note that one-past-the-end (offset == size) is still inbounds, and never null.
ptr.offset > size
pub fn ptr_may_be_null(&self, ptr: Pointer<Option<M::PointerTag>>) -> bool {
match self.ptr_try_get_alloc(ptr) {
Ok((alloc_id, offset, _)) => {
let (size, _align) = self
.get_size_and_align(alloc_id, AllocCheck::MaybeDead)
.expect("alloc info with MaybeDead cannot fail");
// If the pointer is out-of-bounds, it may be null.
// Note that one-past-the-end (offset == size) is still inbounds, and never null.
offset > size
}
Err(offset) => offset == 0,
}
}
}
@ -522,8 +535,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
alloc,
M::GLOBAL_KIND.map(MemoryKind::Machine),
);
// Sanity check that this is the same pointer we would have gotten via `global_base_pointer`.
debug_assert_eq!(tag, M::tag_global_base_pointer(memory_extra, id));
// Sanity check that this is the same tag we would have gotten via `global_base_pointer`.
debug_assert!(tag == M::tag_global_base_pointer(memory_extra, id.into()).provenance);
Ok(alloc)
}
@ -566,30 +579,30 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// "Safe" (bounds and align-checked) allocation access.
pub fn get<'a>(
&'a self,
sptr: Scalar<M::PointerTag>,
ptr: Pointer<Option<M::PointerTag>>,
size: Size,
align: Align,
) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
let align = M::enforce_alignment(&self.extra).then_some(align);
let ptr_and_alloc = self.check_and_deref_ptr(
sptr,
ptr,
size,
align,
CheckInAllocMsg::MemoryAccessTest,
|ptr| {
let alloc = self.get_raw(ptr.alloc_id)?;
Ok((alloc.size(), alloc.align, (ptr, alloc)))
|alloc_id, offset, ptr| {
let alloc = self.get_raw(alloc_id)?;
Ok((alloc.size(), alloc.align, (alloc_id, offset, ptr, alloc)))
},
)?;
if let Some((ptr, alloc)) = ptr_and_alloc {
if let Some((alloc_id, offset, ptr, alloc)) = ptr_and_alloc {
M::memory_read(&self.extra, &alloc.extra, ptr, size)?;
let range = alloc_range(ptr.offset, size);
Ok(Some(AllocRef { alloc, range, tcx: self.tcx, alloc_id: ptr.alloc_id }))
let range = alloc_range(offset, size);
Ok(Some(AllocRef { alloc, range, tcx: self.tcx, alloc_id }))
} else {
// Even in this branch we have to be sure that we actually access the allocation, in
// order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
// magically pulling *any* ZST value from the ether. However, the `get_raw` above is
// always called when `sptr` is truly a `Pointer`, so we are good.
// always called when `ptr` has an `AllocId`.
Ok(None)
}
}
@ -638,19 +651,19 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// "Safe" (bounds and align-checked) allocation access.
pub fn get_mut<'a>(
&'a mut self,
sptr: Scalar<M::PointerTag>,
ptr: Pointer<Option<M::PointerTag>>,
size: Size,
align: Align,
) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
let ptr = self.check_ptr_access(sptr, size, align)?;
if let Some(ptr) = ptr {
let parts = self.get_ptr_access(ptr, size, align)?;
if let Some((alloc_id, offset, ptr)) = parts {
let tcx = self.tcx;
// FIXME: can we somehow avoid looking up the allocation twice here?
// We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
let (alloc, extra) = self.get_raw_mut(ptr.alloc_id)?;
let (alloc, extra) = self.get_raw_mut(alloc_id)?;
M::memory_written(extra, &mut alloc.extra, ptr, size)?;
let range = alloc_range(ptr.offset, size);
Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id: ptr.alloc_id }))
let range = alloc_range(offset, size);
Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }))
} else {
Ok(None)
}
@ -740,14 +753,14 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
pub fn get_fn(
&self,
ptr: Scalar<M::PointerTag>,
ptr: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
let ptr = self.force_ptr(ptr)?; // We definitely need a pointer value.
if ptr.offset.bytes() != 0 {
throw_ub!(InvalidFunctionPointer(ptr.erase_tag()))
let (alloc_id, offset, ptr) = self.ptr_force_alloc(ptr)?;
if offset.bytes() != 0 {
throw_ub!(InvalidFunctionPointer(ptr.erase_for_fmt()))
}
self.get_fn_alloc(ptr.alloc_id)
.ok_or_else(|| err_ub!(InvalidFunctionPointer(ptr.erase_tag())).into())
self.get_fn_alloc(alloc_id)
.ok_or_else(|| err_ub!(InvalidFunctionPointer(ptr.erase_for_fmt())).into())
}
pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
@ -786,7 +799,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
if reachable.insert(id) {
// This is a new allocation, add its relocations to `todo`.
if let Some((_, alloc)) = self.alloc_map.get(id) {
todo.extend(alloc.relocations().values().map(|&(_, target_id)| target_id));
todo.extend(alloc.relocations().values().map(|tag| tag.erase_for_fmt()));
}
}
}
@ -820,14 +833,14 @@ pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Cannot be a closure because it is generic in `Tag`, `Extra`.
fn write_allocation_track_relocs<'tcx, Tag: Copy + fmt::Debug, Extra>(
fn write_allocation_track_relocs<'tcx, Tag: Provenance, Extra>(
fmt: &mut std::fmt::Formatter<'_>,
tcx: TyCtxt<'tcx>,
allocs_to_print: &mut VecDeque<AllocId>,
alloc: &Allocation<Tag, Extra>,
) -> std::fmt::Result {
for &(_, target_id) in alloc.relocations().values() {
allocs_to_print.push_back(target_id);
for alloc_id in alloc.relocations().values().map(|tag| tag.erase_for_fmt()) {
allocs_to_print.push_back(alloc_id);
}
write!(fmt, "{}", pretty::display_allocation(tcx, alloc))
}
@ -930,8 +943,12 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Reads the given number of bytes from memory. Returns them as a slice.
///
/// Performs appropriate bounds checks.
pub fn read_bytes(&self, sptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
let alloc_ref = match self.get(sptr, size, Align::ONE)? {
pub fn read_bytes(
&self,
ptr: Pointer<Option<M::PointerTag>>,
size: Size,
) -> InterpResult<'tcx, &[u8]> {
let alloc_ref = match self.get(ptr, size, Align::ONE)? {
Some(a) => a,
None => return Ok(&[]), // zero-sized access
};
@ -948,7 +965,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Performs appropriate bounds checks.
pub fn write_bytes(
&mut self,
sptr: Scalar<M::PointerTag>,
ptr: Pointer<Option<M::PointerTag>>,
src: impl IntoIterator<Item = u8>,
) -> InterpResult<'tcx> {
let mut src = src.into_iter();
@ -957,7 +974,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
assert_eq!(lower, len, "can only write iterators with a precise length");
let size = Size::from_bytes(len);
let alloc_ref = match self.get_mut(sptr, size, Align::ONE)? {
let alloc_ref = match self.get_mut(ptr, size, Align::ONE)? {
Some(alloc_ref) => alloc_ref,
None => {
// zero-sized access
@ -984,9 +1001,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
pub fn copy(
&mut self,
src: Scalar<M::PointerTag>,
src: Pointer<Option<M::PointerTag>>,
src_align: Align,
dest: Scalar<M::PointerTag>,
dest: Pointer<Option<M::PointerTag>>,
dest_align: Align,
size: Size,
nonoverlapping: bool,
@ -996,9 +1013,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
pub fn copy_repeatedly(
&mut self,
src: Scalar<M::PointerTag>,
src: Pointer<Option<M::PointerTag>>,
src_align: Align,
dest: Scalar<M::PointerTag>,
dest: Pointer<Option<M::PointerTag>>,
dest_align: Align,
size: Size,
num_copies: u64,
@ -1006,22 +1023,22 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
) -> InterpResult<'tcx> {
let tcx = self.tcx;
// We need to do our own bounds-checks.
let src = self.check_ptr_access(src, size, src_align)?;
let dest = self.check_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
let src_parts = self.get_ptr_access(src, size, src_align)?;
let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
// FIXME: we look up both allocations twice here, once ebfore for the `check_ptr_access`
// and once below to get the underlying `&[mut] Allocation`.
// Source alloc preparations and access hooks.
let src = match src {
let (src_alloc_id, src_offset, src) = match src_parts {
None => return Ok(()), // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
Some(src_ptr) => src_ptr,
};
let src_alloc = self.get_raw(src.alloc_id)?;
let src_alloc = self.get_raw(src_alloc_id)?;
M::memory_read(&self.extra, &src_alloc.extra, src, size)?;
// We need the `dest` ptr for the next operation, so we get it now.
// We already did the source checks and called the hooks so we are good to return early.
let dest = match dest {
let (dest_alloc_id, dest_offset, dest) = match dest_parts {
None => return Ok(()), // Zero-sized *destiantion*.
Some(dest_ptr) => dest_ptr,
};
@ -1033,23 +1050,23 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// relocations overlapping the edges; those would not be handled correctly).
let relocations = src_alloc.prepare_relocation_copy(
self,
alloc_range(src.offset, size),
dest.offset,
alloc_range(src_offset, size),
dest_offset,
num_copies,
);
// Prepare a copy of the initialization mask.
let compressed = src_alloc.compress_uninit_range(alloc_range(src.offset, size));
let compressed = src_alloc.compress_uninit_range(alloc_range(src_offset, size));
// This checks relocation edges on the src.
let src_bytes = src_alloc
.get_bytes_with_uninit_and_ptr(&tcx, alloc_range(src.offset, size))
.map_err(|e| e.to_interp_error(src.alloc_id))?
.get_bytes_with_uninit_and_ptr(&tcx, alloc_range(src_offset, size))
.map_err(|e| e.to_interp_error(src_alloc_id))?
.as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
// Destination alloc preparations and access hooks.
let (dest_alloc, extra) = self.get_raw_mut(dest.alloc_id)?;
let (dest_alloc, extra) = self.get_raw_mut(dest_alloc_id)?;
M::memory_written(extra, &mut dest_alloc.extra, dest, size * num_copies)?;
let dest_bytes = dest_alloc
.get_bytes_mut_ptr(&tcx, alloc_range(dest.offset, size * num_copies))
.get_bytes_mut_ptr(&tcx, alloc_range(dest_offset, size * num_copies))
.as_mut_ptr();
if compressed.no_bytes_init() {
@ -1059,7 +1076,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// This also avoids writing to the target bytes so that the backing allocation is never
// touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
// operating system this can avoid physically allocating the page.
dest_alloc.mark_init(alloc_range(dest.offset, size * num_copies), false); // `Size` multiplication
dest_alloc.mark_init(alloc_range(dest_offset, size * num_copies), false); // `Size` multiplication
dest_alloc.mark_relocation_range(relocations);
return Ok(());
}
@ -1070,11 +1087,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// The pointers above remain valid even if the `HashMap` table is moved around because they
// point into the `Vec` storing the bytes.
unsafe {
if src.alloc_id == dest.alloc_id {
if src_alloc_id == dest_alloc_id {
if nonoverlapping {
// `Size` additions
if (src.offset <= dest.offset && src.offset + size > dest.offset)
|| (dest.offset <= src.offset && dest.offset + size > src.offset)
if (src_offset <= dest_offset && src_offset + size > dest_offset)
|| (dest_offset <= src_offset && dest_offset + size > src_offset)
{
throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
}
@ -1101,7 +1118,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// now fill in all the "init" data
dest_alloc.mark_compressed_init_range(
&compressed,
alloc_range(dest.offset, size),
alloc_range(dest_offset, size),
num_copies,
);
// copy the relocations to the destination
@ -1113,24 +1130,44 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Machine pointer introspection.
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
pub fn force_ptr(
&self,
scalar: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
match scalar {
Scalar::Ptr(ptr) => Ok(ptr),
_ => M::int_to_ptr(&self, scalar.to_machine_usize(self)?),
pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
match scalar.to_bits_or_ptr(self.pointer_size(), &self.tcx) {
Err(ptr) => ptr.into(),
Ok(bits) => {
let addr = u64::try_from(bits).unwrap();
M::ptr_from_addr(&self, addr)
}
}
}
pub fn force_bits(
/// Internal helper for turning a "maybe pointer" into a proper pointer (and some information
/// about where it points), or an absolute address.
pub(super) fn ptr_try_get_alloc(
&self,
scalar: Scalar<M::PointerTag>,
size: Size,
) -> InterpResult<'tcx, u128> {
match scalar.to_bits_or_ptr(size, self) {
Ok(bits) => Ok(bits),
Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()),
ptr: Pointer<Option<M::PointerTag>>,
) -> Result<(AllocId, Size, Pointer<M::PointerTag>), u64> {
match ptr.into_pointer_or_offset() {
Ok(ptr) => {
let (alloc_id, offset) = M::ptr_get_alloc(self, ptr);
if let Some(alloc_id) = alloc_id {
Ok((alloc_id, offset, ptr))
} else {
Err(offset.bytes())
}
}
Err(offset) => Err(offset.bytes()),
}
}
/// Internal helper for turning a "maybe pointer" into a proper pointer (and some information
/// about where it points).
#[inline(always)]
pub(super) fn ptr_force_alloc(
&self,
ptr: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, (AllocId, Size, Pointer<M::PointerTag>)> {
self.ptr_try_get_alloc(ptr).map_err(|offset| {
err_ub!(DanglingIntPointer(offset, CheckInAllocMsg::InboundsTest)).into()
})
}
}

View File

@ -15,8 +15,9 @@ use rustc_target::abi::{Abi, HasDataLayout, LayoutOf, Size, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
use super::{
alloc_range, from_known_layout, mir_assign_valid_types, ConstValue, GlobalId, InterpCx,
InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Scalar, ScalarMaybeUninit,
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, GlobalId,
InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Provenance,
Scalar, ScalarMaybeUninit,
};
/// An `Immediate` represents a single immediate self-contained Rust value.
@ -26,14 +27,24 @@ use super::{
/// operations and wide pointers. This idea was taken from rustc's codegen.
/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
/// defined on `Immediate`, and do not have to work with a `Place`.
#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
pub enum Immediate<Tag = ()> {
#[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash)]
pub enum Immediate<Tag = AllocId> {
Scalar(ScalarMaybeUninit<Tag>),
ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(Immediate, 56);
//FIXME rustc_data_structures::static_assert_size!(Immediate, 56);
impl<Tag: Provenance> std::fmt::Debug for Immediate<Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Immediate::*;
match self {
Scalar(s) => f.debug_tuple("Scalar").field(s).finish(),
ScalarPair(s1, s2) => f.debug_tuple("ScalarPair").field(s1).field(s2).finish(),
}
}
}
impl<Tag> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
#[inline(always)]
@ -81,26 +92,33 @@ impl<'tcx, Tag> Immediate<Tag> {
// ScalarPair needs a type to interpret, so we often have an immediate and a type together
// as input for binary and cast operations.
#[derive(Copy, Clone, Debug)]
pub struct ImmTy<'tcx, Tag = ()> {
#[derive(Copy, Clone)]
pub struct ImmTy<'tcx, Tag = AllocId> {
imm: Immediate<Tag>,
pub layout: TyAndLayout<'tcx>,
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
//FIXME rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> {
impl<'tcx, Tag: Provenance> std::fmt::Debug for ImmTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let ImmTy { imm, layout } = self;
f.debug_struct("ImmTy").field("imm", imm).field("layout", layout).finish()
}
}
impl<Tag: Provenance> std::fmt::Display for ImmTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
/// Helper function for printing a scalar to a FmtPrinter
fn p<'a, 'tcx, F: std::fmt::Write, Tag>(
fn p<'a, 'tcx, F: std::fmt::Write, Tag: Provenance>(
cx: FmtPrinter<'a, 'tcx, F>,
s: ScalarMaybeUninit<Tag>,
ty: Ty<'tcx>,
) -> Result<FmtPrinter<'a, 'tcx, F>, std::fmt::Error> {
match s {
ScalarMaybeUninit::Scalar(s) => {
cx.pretty_print_const_scalar(s.erase_tag(), ty, true)
cx.pretty_print_const_scalar(s.erase_for_fmt(), ty, true)
}
ScalarMaybeUninit::Uninit => cx.typed_value(
|mut this| {
@ -120,11 +138,11 @@ impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> {
p(cx, s, ty)?;
return Ok(());
}
write!(f, "{}: {}", s.erase_tag(), self.layout.ty)
write!(f, "{}: {}", s.erase_for_fmt(), self.layout.ty)
}
Immediate::ScalarPair(a, b) => {
// FIXME(oli-obk): at least print tuples and slices nicely
write!(f, "({}, {}): {}", a.erase_tag(), b.erase_tag(), self.layout.ty,)
write!(f, "({}, {}): {}", a.erase_for_fmt(), b.erase_for_fmt(), self.layout.ty,)
}
}
})
@ -142,14 +160,24 @@ impl<'tcx, Tag> std::ops::Deref for ImmTy<'tcx, Tag> {
/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
/// or still in memory. The latter is an optimization, to delay reading that chunk of
/// memory and to avoid having to store arbitrary-sized data here.
#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
pub enum Operand<Tag = ()> {
#[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash)]
pub enum Operand<Tag = AllocId> {
Immediate(Immediate<Tag>),
Indirect(MemPlace<Tag>),
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct OpTy<'tcx, Tag = ()> {
impl<Tag: Provenance> std::fmt::Debug for Operand<Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Operand::*;
match self {
Immediate(i) => f.debug_tuple("Immediate").field(i).finish(),
Indirect(p) => f.debug_tuple("Indirect").field(p).finish(),
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct OpTy<'tcx, Tag = AllocId> {
op: Operand<Tag>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>,
}
@ -157,6 +185,13 @@ pub struct OpTy<'tcx, Tag = ()> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(OpTy<'_, ()>, 80);
impl<'tcx, Tag: Provenance> std::fmt::Debug for OpTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let OpTy { op, layout } = self;
f.debug_struct("OpTy").field("op", op).field("layout", layout).finish()
}
}
impl<'tcx, Tag> std::ops::Deref for OpTy<'tcx, Tag> {
type Target = Operand<Tag>;
#[inline(always)]
@ -225,19 +260,6 @@ impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> {
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Normalize `place.ptr` to a `Pointer` if this is a place and not a ZST.
/// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
#[inline]
pub fn force_op_ptr(
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
match op.try_as_mplace(self) {
Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()),
Err(imm) => Ok(imm.into()), // Nothing to cast/force
}
}
/// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
/// Returns `None` if the layout does not permit loading this as a value.
fn try_read_immediate_from_mplace(
@ -291,7 +313,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
src: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
Ok(match src.try_as_mplace(self) {
Ok(match src.try_as_mplace() {
Ok(ref mplace) => {
if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
Ok(val)
@ -324,6 +346,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(self.read_immediate(op)?.to_scalar_or_uninit())
}
/// Read a pointer from a place.
pub fn read_pointer(
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
Ok(self.scalar_to_ptr(self.read_scalar(op)?.check_init()?))
}
// Turn the wide MPlace into a string (must already be dereferenced!)
pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
let len = mplace.len(self)?;
@ -338,7 +368,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
op: &OpTy<'tcx, M::PointerTag>,
field: usize,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let base = match op.try_as_mplace(self) {
let base = match op.try_as_mplace() {
Ok(ref mplace) => {
// We can reuse the mplace field computation logic for indirect operands.
let field = self.mplace_field(mplace, field)?;
@ -381,7 +411,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.operand_field(op, index)
} else {
// Indexing into a big array. This must be an mplace.
let mplace = op.assert_mem_place(self);
let mplace = op.assert_mem_place();
Ok(self.mplace_index(&mplace, index)?.into())
}
}
@ -392,7 +422,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
variant: VariantIdx,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout
Ok(match op.try_as_mplace(self) {
Ok(match op.try_as_mplace() {
Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
Err(..) => {
let layout = op.layout.for_variant(self, variant);
@ -414,7 +444,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Subslice { .. } | ConstantIndex { .. } | Index(_) => {
// The rest should only occur as mplace, we do not use Immediates for types
// allowing such operations. This matches place_projection forcing an allocation.
let mplace = base.assert_mem_place(self);
let mplace = base.assert_mem_place();
self.mplace_projection(&mplace, proj_elem)?.into()
}
})
@ -580,9 +610,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// We rely on mutability being set correctly in that allocation to prevent writes
// where none should happen.
let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
Operand::Indirect(MemPlace::from_ptr(ptr, layout.align.abi))
Operand::Indirect(MemPlace::from_ptr(ptr.into(), layout.align.abi))
}
ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()),
ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x.into())?.into()),
ConstValue::Slice { data, start, end } => {
// We rely on mutability being set correctly in `data` to prevent writes
// where none should happen.
@ -658,9 +688,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Figure out which discriminant and variant this corresponds to.
Ok(match *tag_encoding {
TagEncoding::Direct => {
let tag_bits = self
.force_bits(tag_val, tag_layout.size)
.map_err(|_| err_ub!(InvalidTag(tag_val.erase_tag())))?;
let tag_bits = tag_val
.to_bits(tag_layout.size)
.map_err(|_| err_ub!(InvalidTag(tag_val.erase_for_fmt())))?;
// Cast bits from tag layout to discriminant layout.
let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty);
let discr_bits = discr_val.assert_bits(discr_layout.size);
@ -677,7 +707,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
}
.ok_or_else(|| err_ub!(InvalidTag(tag_val.erase_tag())))?;
.ok_or_else(|| err_ub!(InvalidTag(tag_val.erase_for_fmt())))?;
// Return the cast value, and the index.
(discr_val, index.0)
}
@ -691,9 +721,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The niche must be just 0 (which an inbounds pointer value never is)
let ptr_valid = niche_start == 0
&& variants_start == variants_end
&& !self.memory.ptr_may_be_null(ptr);
&& !self.memory.ptr_may_be_null(ptr.into());
if !ptr_valid {
throw_ub!(InvalidTag(tag_val.erase_tag()))
throw_ub!(InvalidTag(tag_val.erase_for_fmt()))
}
dataful_variant
}

View File

@ -318,8 +318,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
right.layout.ty
);
let l = self.force_bits(left.to_scalar()?, left.layout.size)?;
let r = self.force_bits(right.to_scalar()?, right.layout.size)?;
let l = left.to_scalar()?.to_bits(left.layout.size)?;
let r = right.to_scalar()?.to_bits(right.layout.size)?;
self.binary_int_op(bin_op, l, left.layout, r, right.layout)
}
_ if left.layout.ty.is_any_ptr() => {
@ -386,7 +386,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => {
assert!(layout.ty.is_integral());
let val = self.force_bits(val, layout.size)?;
let val = val.to_bits(layout.size)?;
let (res, overflow) = match un_op {
Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
Neg => {

View File

@ -3,7 +3,6 @@
//! All high-level functions to write to memory work on places as destinations.
use std::convert::TryFrom;
use std::fmt::Debug;
use std::hash::Hash;
use rustc_ast::Mutability;
@ -15,14 +14,14 @@ use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, VariantIdx, Variants};
use super::{
alloc_range, mir_assign_valid_types, AllocRef, AllocRefMut, ConstAlloc, ImmTy, Immediate,
InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand, Pointer,
PointerArithmetic, Scalar, ScalarMaybeUninit,
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy,
Operand, Pointer, PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
};
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable)]
/// Information required for the sound usage of a `MemPlace`.
pub enum MemPlaceMeta<Tag = ()> {
pub enum MemPlaceMeta<Tag = AllocId> {
/// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
Meta(Scalar<Tag>),
/// `Sized` types or unsized `extern type`
@ -35,7 +34,18 @@ pub enum MemPlaceMeta<Tag = ()> {
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(MemPlaceMeta, 24);
//FIXME rustc_data_structures::static_assert_size!(MemPlaceMeta, 24);
impl<Tag: Provenance> std::fmt::Debug for MemPlaceMeta<Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use MemPlaceMeta::*;
match self {
Meta(s) => f.debug_tuple("Meta").field(s).finish(),
None => f.debug_tuple("None").finish(),
Poison => f.debug_tuple("Poison").finish(),
}
}
}
impl<Tag> MemPlaceMeta<Tag> {
pub fn unwrap_meta(self) -> Scalar<Tag> {
@ -53,21 +63,22 @@ impl<Tag> MemPlaceMeta<Tag> {
}
}
pub fn erase_tag(self) -> MemPlaceMeta<()> {
pub fn erase_for_fmt(self) -> MemPlaceMeta
where
Tag: Provenance,
{
match self {
Self::Meta(s) => MemPlaceMeta::Meta(s.erase_tag()),
Self::Meta(s) => MemPlaceMeta::Meta(s.erase_for_fmt()),
Self::None => MemPlaceMeta::None,
Self::Poison => MemPlaceMeta::Poison,
}
}
}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
pub struct MemPlace<Tag = ()> {
/// A place may have an integral pointer for ZSTs, and since it might
/// be turned back into a reference before ever being dereferenced.
/// However, it may never be uninit.
pub ptr: Scalar<Tag>,
#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable)]
pub struct MemPlace<Tag = AllocId> {
/// The pointer can be a pure integer, with the `None` tag.
pub ptr: Pointer<Option<Tag>>,
pub align: Align,
/// Metadata for unsized places. Interpretation is up to the type.
/// Must not be present for sized types, but can be missing for unsized types
@ -76,10 +87,21 @@ pub struct MemPlace<Tag = ()> {
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(MemPlace, 56);
//FIXME rustc_data_structures::static_assert_size!(MemPlace, 56);
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
pub enum Place<Tag = ()> {
impl<Tag: Provenance> std::fmt::Debug for MemPlace<Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let MemPlace { ptr, align, meta } = self;
f.debug_struct("MemPlace")
.field("ptr", ptr)
.field("align", align)
.field("meta", meta)
.finish()
}
}
#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable)]
pub enum Place<Tag = AllocId> {
/// A place referring to a value allocated in the `Memory` system.
Ptr(MemPlace<Tag>),
@ -89,16 +111,35 @@ pub enum Place<Tag = ()> {
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(Place, 64);
//FIXME rustc_data_structures::static_assert_size!(Place, 64);
#[derive(Copy, Clone, Debug)]
pub struct PlaceTy<'tcx, Tag = ()> {
impl<Tag: Provenance> std::fmt::Debug for Place<Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Place::*;
match self {
Ptr(p) => f.debug_tuple("Ptr").field(p).finish(),
Local { frame, local } => {
f.debug_struct("Local").field("frame", frame).field("local", local).finish()
}
}
}
}
#[derive(Copy, Clone)]
pub struct PlaceTy<'tcx, Tag = AllocId> {
place: Place<Tag>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>,
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(PlaceTy<'_>, 80);
//FIXME rustc_data_structures::static_assert_size!(PlaceTy<'_>, 80);
impl<'tcx, Tag: Provenance> std::fmt::Debug for PlaceTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let PlaceTy { place, layout } = self;
f.debug_struct("PlaceTy").field("place", place).field("layout", layout).finish()
}
}
impl<'tcx, Tag> std::ops::Deref for PlaceTy<'tcx, Tag> {
type Target = Place<Tag>;
@ -109,14 +150,21 @@ impl<'tcx, Tag> std::ops::Deref for PlaceTy<'tcx, Tag> {
}
/// A MemPlace with its layout. Constructing it is only possible in this module.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub struct MPlaceTy<'tcx, Tag = ()> {
#[derive(Copy, Clone, Hash, Eq, PartialEq)]
pub struct MPlaceTy<'tcx, Tag = AllocId> {
mplace: MemPlace<Tag>,
pub layout: TyAndLayout<'tcx>,
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 72);
//FIXME rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 72);
impl<'tcx, Tag: Provenance> std::fmt::Debug for MPlaceTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let MPlaceTy { mplace, layout } = self;
f.debug_struct("MPlaceTy").field("mplace", mplace).field("layout", layout).finish()
}
}
impl<'tcx, Tag> std::ops::Deref for MPlaceTy<'tcx, Tag> {
type Target = MemPlace<Tag>;
@ -134,34 +182,32 @@ impl<'tcx, Tag> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
}
impl<Tag> MemPlace<Tag> {
/// Replace ptr tag, maintain vtable tag (if any)
#[inline]
pub fn replace_tag(self, new_tag: Tag) -> Self {
MemPlace { ptr: self.ptr.erase_tag().with_tag(new_tag), align: self.align, meta: self.meta }
}
#[inline]
pub fn erase_tag(self) -> MemPlace {
MemPlace { ptr: self.ptr.erase_tag(), align: self.align, meta: self.meta.erase_tag() }
pub fn erase_for_fmt(self) -> MemPlace
where
Tag: Provenance,
{
MemPlace {
ptr: self.ptr.map_erase_for_fmt(),
align: self.align,
meta: self.meta.erase_for_fmt(),
}
}
#[inline(always)]
fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self {
pub fn from_ptr(ptr: Pointer<Option<Tag>>, align: Align) -> Self {
MemPlace { ptr, align, meta: MemPlaceMeta::None }
}
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self {
Self::from_scalar_ptr(ptr.into(), align)
}
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
/// This is the inverse of `ref_to_mplace`.
#[inline(always)]
pub fn to_ref(self) -> Immediate<Tag> {
pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Tag> {
match self.meta {
MemPlaceMeta::None => Immediate::Scalar(self.ptr.into()),
MemPlaceMeta::Meta(meta) => Immediate::ScalarPair(self.ptr.into(), meta.into()),
MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
MemPlaceMeta::Meta(meta) => {
Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into())
}
MemPlaceMeta::Poison => bug!(
"MPlaceTy::dangling may never be used to produce a \
place that will have the address of its pointee taken"
@ -177,7 +223,7 @@ impl<Tag> MemPlace<Tag> {
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
Ok(MemPlace {
ptr: self.ptr.ptr_offset(offset, cx)?,
ptr: self.ptr.offset(offset, cx)?,
align: self.align.restrict_for_offset(offset),
meta,
})
@ -187,19 +233,13 @@ impl<Tag> MemPlace<Tag> {
impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> {
/// Produces a MemPlace that works for ZST but nothing else
#[inline]
pub fn dangling(layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
pub fn dangling(layout: TyAndLayout<'tcx>) -> Self {
let align = layout.align.abi;
let ptr = Scalar::from_machine_usize(align.bytes(), cx);
let ptr = Pointer::new(None, Size::from_bytes(align.bytes())); // no provenance, absolute address
// `Poison` this to make sure that the pointer value `ptr` is never observable by the program.
MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout }
}
/// Replace ptr tag, maintain vtable tag (if any)
#[inline]
pub fn replace_tag(&self, new_tag: Tag) -> Self {
MPlaceTy { mplace: self.mplace.replace_tag(new_tag), layout: self.layout }
}
#[inline]
pub fn offset(
&self,
@ -212,12 +252,15 @@ impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> {
}
#[inline]
fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyAndLayout<'tcx>) -> Self {
fn from_aligned_ptr(ptr: Pointer<Option<Tag>>, layout: TyAndLayout<'tcx>) -> Self {
MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout }
}
#[inline]
pub(super) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
pub(super) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64>
where
Tag: Provenance,
{
if self.layout.is_unsized() {
// We need to consult `meta` metadata
match self.layout.ty.kind() {
@ -244,19 +287,14 @@ impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> {
}
// These are defined here because they produce a place.
impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> {
impl<'tcx, Tag: Copy> OpTy<'tcx, Tag> {
#[inline(always)]
/// Note: do not call `as_ref` on the resulting place. This function should only be used to
/// read from the resulting mplace, not to get its address back.
pub fn try_as_mplace(
&self,
cx: &impl HasDataLayout,
) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
match **self {
Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
Operand::Immediate(_) if self.layout.is_zst() => {
Ok(MPlaceTy::dangling(self.layout, cx))
}
Operand::Immediate(_) if self.layout.is_zst() => Ok(MPlaceTy::dangling(self.layout)),
Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)),
}
}
@ -264,12 +302,15 @@ impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> {
#[inline(always)]
/// Note: do not call `as_ref` on the resulting place. This function should only be used to
/// read from the resulting mplace, not to get its address back.
pub fn assert_mem_place(&self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> {
self.try_as_mplace(cx).unwrap()
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag>
where
Tag: Provenance,
{
self.try_as_mplace().unwrap()
}
}
impl<Tag: Debug> Place<Tag> {
impl<Tag: Provenance> Place<Tag> {
#[inline]
pub fn assert_mem_place(self) -> MemPlace<Tag> {
match self {
@ -279,7 +320,7 @@ impl<Tag: Debug> Place<Tag> {
}
}
impl<'tcx, Tag: Debug> PlaceTy<'tcx, Tag> {
impl<'tcx, Tag: Provenance> PlaceTy<'tcx, Tag> {
#[inline]
pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout }
@ -290,7 +331,7 @@ impl<'tcx, Tag: Debug> PlaceTy<'tcx, Tag> {
impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
where
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
Tag: Debug + Copy + Eq + Hash + 'static,
Tag: Provenance + Eq + Hash + 'static,
M: Machine<'mir, 'tcx, PointerTag = Tag>,
{
/// Take a value, which represents a (thin or wide) reference, and make it a place.
@ -307,14 +348,12 @@ where
val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
let layout = self.layout_of(pointee_type)?;
let (ptr, meta) = match **val {
Immediate::Scalar(ptr) => (ptr.check_init()?, MemPlaceMeta::None),
Immediate::ScalarPair(ptr, meta) => {
(ptr.check_init()?, MemPlaceMeta::Meta(meta.check_init()?))
}
Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None),
Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)),
};
let mplace = MemPlace {
ptr,
ptr: self.scalar_to_ptr(ptr.check_init()?),
// We could use the run-time alignment here. For now, we do not, because
// the point of tracking the alignment here is to make sure that the *static*
// alignment information emitted with the loads is correct. The run-time
@ -333,8 +372,9 @@ where
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let val = self.read_immediate(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val);
let place = self.ref_to_mplace(&val)?;
self.mplace_access_checked(place, None)
let mplace = self.ref_to_mplace(&val)?;
self.check_mplace_access(mplace)?;
Ok(mplace)
}
#[inline]
@ -359,38 +399,20 @@ where
self.memory.get_mut(place.ptr, size, place.align)
}
/// Return the "access-checked" version of this `MPlace`, where for non-ZST
/// this is definitely a `Pointer`.
///
/// `force_align` must only be used when correct alignment does not matter,
/// like in Stacked Borrows.
pub fn mplace_access_checked(
&self,
mut place: MPlaceTy<'tcx, M::PointerTag>,
force_align: Option<Align>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
/// Check if this mplace is dereferencable and sufficiently aligned.
pub fn check_mplace_access(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
let (size, align) = self
.size_and_align_of_mplace(&place)?
.unwrap_or((place.layout.size, place.layout.align.abi));
assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?");
let align = force_align.unwrap_or(align);
// Record new (stricter, unless forced) alignment requirement in place.
place.mplace.align = align;
// When dereferencing a pointer, it must be non-null, aligned, and live.
if let Some(ptr) = self.memory.check_ptr_access(place.ptr, size, align)? {
place.mplace.ptr = ptr.into();
}
Ok(place)
}
/// Force `place.ptr` to a `Pointer`.
/// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
pub(super) fn force_mplace_ptr(
&self,
mut place: MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
place.mplace.ptr = self.force_ptr(place.mplace.ptr)?.into();
Ok(place)
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
assert!(mplace.mplace.align <= align, "dynamic alignment less strict than static one?");
let align = M::enforce_alignment(&self.memory.extra).then_some(align);
self.memory.check_ptr_access_align(
mplace.ptr,
size,
align.unwrap_or(Align::ONE),
CheckInAllocMsg::MemoryAccessTest, // FIXME sth more specific?
)?;
Ok(())
}
/// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
@ -558,10 +580,7 @@ where
let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.access_local(self.frame(), local, Some(layout))?;
let n = self.read_scalar(&n)?;
let n = u64::try_from(
self.force_bits(n.check_init()?, self.tcx.data_layout.pointer_size)?,
)
.unwrap();
let n = n.to_machine_usize(self)?;
self.mplace_index(base, n)?
}
@ -1020,7 +1039,7 @@ where
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'static, MPlaceTy<'tcx, M::PointerTag>> {
let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?;
Ok(MPlaceTy::from_aligned_ptr(ptr, layout))
Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
}
/// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
@ -1125,7 +1144,7 @@ where
let _ = self.tcx.global_alloc(raw.alloc_id);
let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
let layout = self.layout_of(raw.ty)?;
Ok(MPlaceTy::from_aligned_ptr(ptr, layout))
Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
}
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
@ -1134,7 +1153,7 @@ where
&self,
mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
let vtable = mplace.vtable(); // also sanity checks the type
let vtable = self.scalar_to_ptr(mplace.vtable()); // also sanity checks the type
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
let layout = self.layout_of(ty)?;

View File

@ -240,7 +240,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// of the first element.
let elem_size = first.layout.size;
let first_ptr = first.ptr;
let rest_ptr = first_ptr.ptr_offset(elem_size, self)?;
let rest_ptr = first_ptr.offset(elem_size, self)?;
self.memory.copy_repeatedly(
first_ptr,
first.align,
@ -264,11 +264,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
AddressOf(_, place) | Ref(_, _, place) => {
let src = self.eval_place(place)?;
let place = self.force_allocation(&src)?;
if place.layout.size.bytes() > 0 {
// definitely not a ZST
assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
}
self.write_immediate(place.to_ref(), &dest)?;
self.write_immediate(place.to_ref(self), &dest)?;
}
NullaryOp(mir::NullOp::Box, _) => {

View File

@ -12,8 +12,8 @@ use rustc_target::abi::{self, LayoutOf as _};
use rustc_target::spec::abi::Abi;
use super::{
FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, StackPopCleanup,
StackPopUnwind,
FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Scalar,
StackPopCleanup, StackPopUnwind,
};
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
@ -72,8 +72,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (fn_val, abi, caller_can_unwind) = match *func.layout.ty.kind() {
ty::FnPtr(sig) => {
let caller_abi = sig.abi();
let fn_ptr = self.read_scalar(&func)?.check_init()?;
let fn_val = self.memory.get_fn(fn_ptr)?;
let fn_ptr = self.read_pointer(&func)?;
let fn_val = self.memory.get_fn(fn_ptr.into())?;
(
fn_val,
caller_abi,
@ -454,11 +454,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
None => {
// Unsized self.
args[0].assert_mem_place(self)
args[0].assert_mem_place()
}
};
// Find and consult vtable
let vtable = receiver_place.vtable();
let vtable = self.scalar_to_ptr(receiver_place.vtable());
let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
// `*mut receiver_place.layout.ty` is almost the layout that we
@ -468,8 +468,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?;
// Adjust receiver argument.
args[0] =
OpTy::from(ImmTy::from_immediate(receiver_place.ptr.into(), this_receiver_ptr));
args[0] = OpTy::from(ImmTy::from_immediate(
Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
this_receiver_ptr,
));
trace!("Patched self operand to {:#?}", args[0]);
// recurse with concrete function
self.eval_fn_call(fn_val, caller_abi, &args, ret, unwind)
@ -499,12 +501,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
};
let arg = ImmTy::from_immediate(
place.to_ref(),
place.to_ref(self),
self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
);
let ty = self.tcx.mk_unit(); // return type is ()
let dest = MPlaceTy::dangling(self.layout_of(ty)?, self);
let dest = MPlaceTy::dangling(self.layout_of(ty)?);
self.eval_fn_call(
FnVal::Instance(instance),

View File

@ -1,6 +1,6 @@
use std::convert::TryFrom;
use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar};
use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic};
use rustc_middle::ty::{
self, Ty, COMMON_VTABLE_ENTRIES, COMMON_VTABLE_ENTRIES_ALIGN,
COMMON_VTABLE_ENTRIES_DROPINPLACE, COMMON_VTABLE_ENTRIES_SIZE,
@ -42,23 +42,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// corresponds to the first method declared in the trait of the provided vtable.
pub fn get_vtable_slot(
&self,
vtable: Scalar<M::PointerTag>,
vtable: Pointer<Option<M::PointerTag>>,
idx: u64,
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
let ptr_size = self.pointer_size();
let vtable_slot = vtable.ptr_offset(ptr_size * idx, self)?;
let vtable_slot = vtable.offset(ptr_size * idx, self)?;
let vtable_slot = self
.memory
.get(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
.expect("cannot be a ZST");
let fn_ptr = vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?;
let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?);
self.memory.get_fn(fn_ptr)
}
/// Returns the drop fn instance as well as the actual dynamic type.
pub fn read_drop_type_from_vtable(
&self,
vtable: Scalar<M::PointerTag>,
vtable: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
let pointer_size = self.pointer_size();
// We don't care about the pointee type; we just want a pointer.
@ -77,7 +77,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.check_init()?;
// We *need* an instance here, no other kind of function value, to be able
// to determine the type.
let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?;
let drop_instance = self.memory.get_fn(self.scalar_to_ptr(drop_fn))?.as_instance()?;
trace!("Found drop fn: {:?}", drop_instance);
let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx);
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig);
@ -93,7 +93,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn read_size_and_align_from_vtable(
&self,
vtable: Scalar<M::PointerTag>,
vtable: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
// We check for `size = 3 * ptr_size`, which covers the drop fn (unused here),
@ -109,11 +109,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let size = vtable
.read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_SIZE).unwrap())?
.check_init()?;
let size = u64::try_from(self.force_bits(size, pointer_size)?).unwrap();
let size = size.to_machine_usize(self)?;
let align = vtable
.read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_ALIGN).unwrap())?
.check_init()?;
let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap();
let align = align.to_machine_usize(self)?;
let align = Align::from_bytes(align).map_err(|e| err_ub!(InvalidVtableAlignment(e)))?;
if size >= self.tcx.data_layout.obj_size_bound() {

View File

@ -21,7 +21,7 @@ use std::hash::Hash;
use super::{
alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,
MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
MemPlaceMeta, OpTy, ScalarMaybeUninit, ValueVisitor,
};
macro_rules! throw_validation_failure {
@ -324,7 +324,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
match tail.kind() {
ty::Dynamic(..) => {
let vtable = meta.unwrap_meta();
let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta());
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
try_validation!(
self.ecx.memory.check_ptr_access_align(
@ -448,17 +448,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
if let Some(ref mut ref_tracking) = self.ref_tracking {
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
// Normalize before handing `place` to tracking because that will
// check for duplicates.
let place = if size.bytes() > 0 {
self.ecx.force_mplace_ptr(place).expect("we already bounds-checked")
} else {
place
};
// Skip validation entirely for some external statics
if let Scalar::Ptr(ptr) = place.ptr {
if let Ok((alloc_id, _offset, _ptr)) = self.ecx.memory.ptr_try_get_alloc(place.ptr) {
// not a ZST
let alloc_kind = self.ecx.tcx.get_global_alloc(ptr.alloc_id);
let alloc_kind = self.ecx.tcx.get_global_alloc(alloc_id);
if let Some(GlobalAlloc::Static(did)) = alloc_kind {
assert!(!self.ecx.tcx.is_thread_local_static(did));
assert!(self.ecx.tcx.is_static(did));
@ -601,7 +594,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// message below.
let value = value.to_scalar_or_uninit();
let _fn = try_validation!(
value.check_init().and_then(|ptr| self.ecx.memory.get_fn(ptr)),
value.check_init().and_then(|ptr| self.ecx.memory.get_fn(self.ecx.scalar_to_ptr(ptr))),
self.path,
err_ub!(DanglingIntPointer(..)) |
err_ub!(InvalidFunctionPointer(..)) |
@ -668,7 +661,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
Err(ptr) => {
if lo == 1 && hi == max_hi {
// Only null is the niche. So make sure the ptr is NOT null.
if self.ecx.memory.ptr_may_be_null(ptr) {
if self.ecx.memory.ptr_may_be_null(ptr.into()) {
throw_validation_failure!(self.path,
{ "a potentially null pointer" }
expected {
@ -832,7 +825,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
) -> InterpResult<'tcx> {
match op.layout.ty.kind() {
ty::Str => {
let mplace = op.assert_mem_place(self.ecx); // strings are never immediate
let mplace = op.assert_mem_place(); // strings are never immediate
let len = mplace.len(self.ecx)?;
try_validation!(
self.ecx.memory.read_bytes(mplace.ptr, Size::from_bytes(len)),
@ -853,7 +846,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// Optimized handling for arrays of integer/float type.
// Arrays cannot be immediate, slices are never immediate.
let mplace = op.assert_mem_place(self.ecx);
let mplace = op.assert_mem_place();
// This is the length of the array/slice.
let len = mplace.len(self.ecx)?;
// This is the element type size.
@ -940,9 +933,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Construct a visitor
let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
// Try to cast to ptr *once* instead of all the time.
let op = self.force_op_ptr(&op).unwrap_or(*op);
// Run it.
match visitor.visit_value(&op) {
Ok(()) => Ok(()),

View File

@ -211,7 +211,8 @@ macro_rules! make_value_visitor {
// If it is a trait object, switch to the real type that was used to create it.
ty::Dynamic(..) => {
// immediate trait objects are not a thing
let dest = v.to_op(self.ecx())?.assert_mem_place(self.ecx());
let op = v.to_op(self.ecx())?;
let dest = op.assert_mem_place();
let inner = self.ecx().unpack_dyn_trait(&dest)?.1;
trace!("walk_value: dyn object layout: {:#?}", inner.layout);
// recurse with the inner type
@ -241,7 +242,8 @@ macro_rules! make_value_visitor {
},
FieldsShape::Array { .. } => {
// Let's get an mplace first.
let mplace = v.to_op(self.ecx())?.assert_mem_place(self.ecx());
let op = v.to_op(self.ecx())?;
let mplace = op.assert_mem_place();
// Now we can go over all the fields.
// This uses the *run-time length*, i.e., if we are a slice,
// the dynamic info from the metadata is used.

View File

@ -403,7 +403,7 @@ fn collect_items_rec<'tcx>(
recursion_depth_reset = None;
if let Ok(alloc) = tcx.eval_static_initializer(def_id) {
for &((), id) in alloc.relocations().values() {
for &id in alloc.relocations().values() {
collect_miri(tcx, id, &mut neighbors);
}
}
@ -1369,7 +1369,7 @@ fn collect_miri<'tcx>(
}
GlobalAlloc::Memory(alloc) => {
trace!("collecting {:?} with {:#?}", alloc_id, alloc);
for &((), inner) in alloc.relocations().values() {
for &inner in alloc.relocations().values() {
rustc_data_structures::stack::ensure_sufficient_stack(|| {
collect_miri(tcx, inner, output);
});
@ -1402,9 +1402,9 @@ fn collect_const_value<'tcx>(
output: &mut Vec<Spanned<MonoItem<'tcx>>>,
) {
match value {
ConstValue::Scalar(Scalar::Ptr(ptr)) => collect_miri(tcx, ptr.alloc_id, output),
ConstValue::Scalar(Scalar::Ptr(ptr)) => collect_miri(tcx, ptr.provenance, output),
ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => {
for &((), id) in alloc.relocations().values() {
for &id in alloc.relocations().values() {
collect_miri(tcx, id, output);
}
}

View File

@ -31,9 +31,8 @@ use rustc_trait_selection::traits;
use crate::const_eval::ConstEvalErr;
use crate::interpret::{
self, compile_time_machine, AllocId, Allocation, ConstValue, CtfeValidationMode, Frame, ImmTy,
Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemPlace, Memory, MemoryKind, OpTy,
Operand as InterpOperand, PlaceTy, Pointer, Scalar, ScalarMaybeUninit, StackPopCleanup,
StackPopUnwind,
Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemPlace, MemoryKind, OpTy,
Operand as InterpOperand, PlaceTy, Scalar, ScalarMaybeUninit, StackPopCleanup, StackPopUnwind,
};
use crate::transform::MirPass;
@ -157,7 +156,7 @@ impl<'tcx> MirPass<'tcx> for ConstProp {
struct ConstPropMachine<'mir, 'tcx> {
/// The virtual call stack.
stack: Vec<Frame<'mir, 'tcx, (), ()>>,
stack: Vec<Frame<'mir, 'tcx>>,
/// `OnlyInsideOwnBlock` locals that were written in the current block get erased at the end.
written_only_inside_own_block_locals: FxHashSet<Local>,
/// Locals that need to be cleared after every block terminates.
@ -223,10 +222,6 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
bug!("panics terminators are not evaluated in ConstProp")
}
fn ptr_to_int(_mem: &Memory<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx, u64> {
throw_unsup!(ReadPointerAsBytes)
}
fn binary_ptr_op(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_bin_op: BinOp,
@ -759,8 +754,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
}
};
let arg_value =
this.ecx.force_bits(const_arg.to_scalar()?, const_arg.layout.size)?;
let arg_value = const_arg.to_scalar()?.to_bits(const_arg.layout.size)?;
let dest = this.ecx.eval_place(place)?;
match op {

View File

@ -211,7 +211,7 @@ fn find_branch_value_info<'tcx>(
return None;
};
let branch_value_scalar = branch_value.literal.try_to_scalar()?;
Some((branch_value_scalar, branch_value_ty, *to_switch_on))
Some((branch_value_scalar.into(), branch_value_ty, *to_switch_on))
}
_ => None,
}

View File

@ -1,6 +1,6 @@
use std::collections::BTreeSet;
use std::fmt::Display;
use std::fmt::Write as _;
use std::fmt::{Debug, Display};
use std::fs;
use std::io::{self, Write};
use std::path::{Path, PathBuf};
@ -13,7 +13,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def_id::DefId;
use rustc_index::vec::Idx;
use rustc_middle::mir::interpret::{
read_target_uint, AllocId, Allocation, ConstValue, GlobalAlloc, Pointer,
read_target_uint, AllocId, Allocation, ConstValue, GlobalAlloc, Pointer, Provenance,
};
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*;
@ -665,12 +665,12 @@ pub fn write_allocations<'tcx>(
w: &mut dyn Write,
) -> io::Result<()> {
fn alloc_ids_from_alloc(alloc: &Allocation) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
alloc.relocations().values().map(|(_, id)| *id)
alloc.relocations().values().map(|id| *id)
}
fn alloc_ids_from_const(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
match val {
ConstValue::Scalar(interpret::Scalar::Ptr(ptr)) => {
Either::Left(Either::Left(std::iter::once(ptr.alloc_id)))
Either::Left(Either::Left(std::iter::once(ptr.provenance)))
}
ConstValue::Scalar(interpret::Scalar::Int { .. }) => {
Either::Left(Either::Right(std::iter::empty()))
@ -755,7 +755,7 @@ pub fn write_allocations<'tcx>(
/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
/// characters or characters whose value is larger than 127) with a `.`
/// This also prints relocations adequately.
pub fn display_allocation<Tag: Copy + Debug, Extra>(
pub fn display_allocation<Tag, Extra>(
tcx: TyCtxt<'tcx>,
alloc: &'a Allocation<Tag, Extra>,
) -> RenderAllocation<'a, 'tcx, Tag, Extra> {
@ -768,7 +768,7 @@ pub struct RenderAllocation<'a, 'tcx, Tag, Extra> {
alloc: &'a Allocation<Tag, Extra>,
}
impl<Tag: Copy + Debug, Extra> std::fmt::Display for RenderAllocation<'a, 'tcx, Tag, Extra> {
impl<Tag: Provenance, Extra> std::fmt::Display for RenderAllocation<'a, 'tcx, Tag, Extra> {
fn fmt(&self, w: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let RenderAllocation { tcx, alloc } = *self;
write!(w, "size: {}, align: {})", alloc.size().bytes(), alloc.align.bytes())?;
@ -811,7 +811,7 @@ fn write_allocation_newline(
/// The `prefix` argument allows callers to add an arbitrary prefix before each line (even if there
/// is only one line). Note that your prefix should contain a trailing space as the lines are
/// printed directly after it.
fn write_allocation_bytes<Tag: Copy + Debug, Extra>(
fn write_allocation_bytes<Tag: Provenance, Extra>(
tcx: TyCtxt<'tcx>,
alloc: &Allocation<Tag, Extra>,
w: &mut dyn std::fmt::Write,
@ -847,7 +847,7 @@ fn write_allocation_bytes<Tag: Copy + Debug, Extra>(
if i != line_start {
write!(w, " ")?;
}
if let Some(&(tag, target_id)) = alloc.relocations().get(&i) {
if let Some(&tag) = alloc.relocations().get(&i) {
// Memory with a relocation must be defined
let j = i.bytes_usize();
let offset = alloc
@ -855,7 +855,7 @@ fn write_allocation_bytes<Tag: Copy + Debug, Extra>(
let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap();
let offset = Size::from_bytes(offset);
let relocation_width = |bytes| bytes * 3;
let ptr = Pointer::new_with_tag(target_id, offset, tag);
let ptr = Pointer::new(tag, offset);
let mut target = format!("{:?}", ptr);
if target.len() > relocation_width(ptr_size.bytes_usize() - 1) {
// This is too long, try to save some space.