Auto merge of #85376 - RalfJung:ptrless-allocs, r=oli-obk

CTFE core engine allocation & memory API improvemenets

This is a first step towards https://github.com/rust-lang/miri/issues/841.
- make `Allocation` API offset-based (no more making up `Pointer`s just to access an `Allocation`)
- make `Memory` API higher-level (combine checking for access and getting access into one operation)

The Miri-side PR is at https://github.com/rust-lang/miri/pull/1804.
r? `@oli-obk`
This commit is contained in:
bors 2021-05-19 10:11:28 +00:00
commit 3e827cc21e
19 changed files with 599 additions and 512 deletions

View File

@ -6,7 +6,7 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_errors::ErrorReported;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::{
read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Pointer, Scalar,
alloc_range, read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
};
use rustc_middle::ty::ConstKind;
@ -176,8 +176,7 @@ pub(crate) fn codegen_const_value<'tcx>(
std::iter::repeat(0).take(size.bytes_usize()).collect::<Vec<u8>>(),
align,
);
let ptr = Pointer::new(AllocId(!0), Size::ZERO); // The alloc id is never used
alloc.write_scalar(fx, ptr, x.into(), size).unwrap();
alloc.write_scalar(fx, alloc_range(Size::ZERO, size), x.into()).unwrap();
let alloc = fx.tcx.intern_const_alloc(alloc);
return CValue::by_ref(pointer_for_allocation(fx, alloc), layout);
}

View File

@ -86,9 +86,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let idx_bytes = match idx_const {
ConstValue::ByRef { alloc, offset } => {
let ptr = Pointer::new(AllocId(0 /* dummy */), offset);
let size = Size::from_bytes(4 * ret_lane_count /* size_of([u32; ret_lane_count]) */);
alloc.get_bytes(fx, ptr, size).unwrap()
alloc.get_bytes(fx, alloc_range(offset, size)).unwrap()
}
_ => unreachable!("{:?}", idx_const),
};

View File

@ -4,16 +4,22 @@ use std::borrow::Cow;
use std::convert::TryFrom;
use std::iter;
use std::ops::{Deref, DerefMut, Range};
use std::ptr;
use rustc_ast::Mutability;
use rustc_data_structures::sorted_map::SortedMap;
use rustc_target::abi::{Align, HasDataLayout, Size};
use super::{
read_target_uint, write_target_uint, AllocId, InterpResult, Pointer, Scalar, ScalarMaybeUninit,
UninitBytesAccess,
read_target_uint, write_target_uint, AllocId, InterpError, Pointer, Scalar, ScalarMaybeUninit,
UndefinedBehaviorInfo, UninitBytesAccess, UnsupportedOpInfo,
};
/// This type represents an Allocation in the Miri/CTFE core engine.
///
/// Its public API is rather low-level, working directly with allocation offsets and a custom error
/// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
/// module provides higher-level access.
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
#[derive(HashStable)]
pub struct Allocation<Tag = (), Extra = ()> {
@ -38,49 +44,58 @@ pub struct Allocation<Tag = (), Extra = ()> {
pub extra: Extra,
}
pub trait AllocationExtra<Tag>: std::fmt::Debug + Clone {
// There is no constructor in here because the constructor's type depends
// on `MemoryKind`, and making things sufficiently generic leads to painful
// inference failure.
/// We have our own error type that does not know about the `AllocId`; that information
/// is added when converting to `InterpError`.
#[derive(Debug)]
pub enum AllocError {
/// Encountered a pointer where we needed raw bytes.
ReadPointerAsBytes,
/// Using uninitialized data where it is not allowed.
InvalidUninitBytes(Option<UninitBytesAccess>),
}
pub type AllocResult<T = ()> = Result<T, AllocError>;
/// Hook for performing extra checks on a memory read access.
///
/// Takes read-only access to the allocation so we can keep all the memory read
/// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
/// need to mutate.
#[inline(always)]
fn memory_read(
_alloc: &Allocation<Tag, Self>,
_ptr: Pointer<Tag>,
_size: Size,
) -> InterpResult<'tcx> {
Ok(())
}
/// Hook for performing extra checks on a memory write access.
#[inline(always)]
fn memory_written(
_alloc: &mut Allocation<Tag, Self>,
_ptr: Pointer<Tag>,
_size: Size,
) -> InterpResult<'tcx> {
Ok(())
}
/// Hook for performing extra checks on a memory deallocation.
/// `size` will be the size of the allocation.
#[inline(always)]
fn memory_deallocated(
_alloc: &mut Allocation<Tag, Self>,
_ptr: Pointer<Tag>,
_size: Size,
) -> InterpResult<'tcx> {
Ok(())
impl AllocError {
pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
match self {
AllocError::ReadPointerAsBytes => {
InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes)
}
AllocError::InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
),
}
}
}
// For `Tag = ()` and no extra state, we have a trivial implementation.
impl AllocationExtra<()> for () {}
/// The information that makes up a memory access: offset and size.
#[derive(Copy, Clone, Debug)]
pub struct AllocRange {
pub start: Size,
pub size: Size,
}
/// Free-starting constructor for less syntactic overhead.
#[inline(always)]
pub fn alloc_range(start: Size, size: Size) -> AllocRange {
AllocRange { start, size }
}
impl AllocRange {
#[inline(always)]
pub fn end(self) -> Size {
self.start + self.size // This does overflow checking.
}
/// Returns the `subrange` within this range; panics if it is not a subrange.
#[inline]
pub fn subrange(self, subrange: AllocRange) -> AllocRange {
let sub_start = self.start + subrange.start;
let range = alloc_range(sub_start, subrange.size);
assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
range
}
}
// The constructors are all without extra; the extra gets added by a machine hook later.
impl<Tag> Allocation<Tag> {
@ -99,7 +114,7 @@ impl<Tag> Allocation<Tag> {
}
pub fn from_byte_aligned_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
Allocation::from_bytes(slice, Align::from_bytes(1).unwrap())
Allocation::from_bytes(slice, Align::ONE)
}
pub fn uninit(size: Size, align: Align) -> Self {
@ -114,7 +129,7 @@ impl<Tag> Allocation<Tag> {
}
}
impl Allocation<(), ()> {
impl Allocation<()> {
/// Add Tag and Extra fields
pub fn with_tags_and_extra<T, E>(
self,
@ -154,7 +169,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
/// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
/// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
/// edges) at all. It further ignores `AllocationExtra` callbacks.
/// edges) at all.
/// This must not be used for reads affecting the interpreter execution.
pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
&self.bytes[range]
@ -172,23 +187,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
}
/// Byte accessors.
impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// Just a small local helper function to avoid a bit of code repetition.
/// Returns the range of this allocation that was meant.
#[inline]
fn check_bounds(&self, offset: Size, size: Size) -> Range<usize> {
let end = offset + size; // This does overflow checking.
let end = usize::try_from(end.bytes()).expect("access too big for this host architecture");
assert!(
end <= self.len(),
"Out-of-bounds access at offset {}, size {} in allocation of size {}",
offset.bytes(),
size.bytes(),
self.len()
);
offset.bytes_usize()..end
}
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
/// The last argument controls whether we error out when there are uninitialized
/// or pointer bytes. You should never call this, call `get_bytes` or
/// `get_bytes_with_uninit_and_ptr` instead,
@ -201,23 +200,18 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
fn get_bytes_internal(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
range: AllocRange,
check_init_and_ptr: bool,
) -> InterpResult<'tcx, &[u8]> {
let range = self.check_bounds(ptr.offset, size);
) -> AllocResult<&[u8]> {
if check_init_and_ptr {
self.check_init(ptr, size)?;
self.check_relocations(cx, ptr, size)?;
self.check_init(range)?;
self.check_relocations(cx, range)?;
} else {
// We still don't want relocations on the *edges*.
self.check_relocation_edges(cx, ptr, size)?;
self.check_relocation_edges(cx, range)?;
}
AllocationExtra::memory_read(self, ptr, size)?;
Ok(&self.bytes[range])
Ok(&self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
}
/// Checks that these bytes are initialized and not pointer bytes, and then return them
@ -227,13 +221,8 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
/// on `InterpCx` instead.
#[inline]
pub fn get_bytes(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx, &[u8]> {
self.get_bytes_internal(cx, ptr, size, true)
pub fn get_bytes(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult<&[u8]> {
self.get_bytes_internal(cx, range, true)
}
/// It is the caller's responsibility to handle uninitialized and pointer bytes.
@ -244,10 +233,9 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
pub fn get_bytes_with_uninit_and_ptr(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx, &[u8]> {
self.get_bytes_internal(cx, ptr, size, false)
range: AllocRange,
) -> AllocResult<&[u8]> {
self.get_bytes_internal(cx, range, false)
}
/// Just calling this already marks everything as defined and removes relocations,
@ -256,69 +244,46 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// It is the caller's responsibility to check bounds and alignment beforehand.
/// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
/// on `InterpCx` instead.
pub fn get_bytes_mut(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx, &mut [u8]> {
let range = self.check_bounds(ptr.offset, size);
pub fn get_bytes_mut(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> &mut [u8] {
self.mark_init(range, true);
self.clear_relocations(cx, range);
self.mark_init(ptr, size, true);
self.clear_relocations(cx, ptr, size);
&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
}
AllocationExtra::memory_written(self, ptr, size)?;
/// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
pub fn get_bytes_mut_ptr(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> *mut [u8] {
self.mark_init(range, true);
self.clear_relocations(cx, range);
Ok(&mut self.bytes[range])
assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
let len = range.end().bytes_usize() - range.start.bytes_usize();
ptr::slice_from_raw_parts_mut(begin_ptr, len)
}
}
/// Reading and writing.
impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
/// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
/// relocation. If `allow_uninit_and_ptr` is `false`, also enforces that the memory in the
/// given range contains neither relocations nor uninitialized bytes.
pub fn check_bytes(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
range: AllocRange,
allow_uninit_and_ptr: bool,
) -> InterpResult<'tcx> {
) -> AllocResult {
// Check bounds and relocations on the edges.
self.get_bytes_with_uninit_and_ptr(cx, ptr, size)?;
self.get_bytes_with_uninit_and_ptr(cx, range)?;
// Check uninit and ptr.
if !allow_uninit_and_ptr {
self.check_init(ptr, size)?;
self.check_relocations(cx, ptr, size)?;
self.check_init(range)?;
self.check_relocations(cx, range)?;
}
Ok(())
}
/// Writes `src` to the memory starting at `ptr.offset`.
///
/// It is the caller's responsibility to check bounds and alignment beforehand.
/// Most likely, you want to call `Memory::write_bytes` instead of this method.
pub fn write_bytes(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
src: impl IntoIterator<Item = u8>,
) -> InterpResult<'tcx> {
let mut src = src.into_iter();
let (lower, upper) = src.size_hint();
let len = upper.expect("can only write bounded iterators");
assert_eq!(lower, len, "can only write iterators with a precise length");
let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len))?;
// `zip` would stop when the first iterator ends; we want to definitely
// cover all of `bytes`.
for dest in bytes {
*dest = src.next().expect("iterator was shorter than it said it would be");
}
assert_matches!(src.next(), None, "iterator was longer than it said it would be");
Ok(())
}
/// Reads a *non-ZST* scalar.
///
/// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
@ -329,14 +294,13 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
pub fn read_scalar(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
range: AllocRange,
) -> AllocResult<ScalarMaybeUninit<Tag>> {
// `get_bytes_unchecked` tests relocation edges.
let bytes = self.get_bytes_with_uninit_and_ptr(cx, ptr, size)?;
let bytes = self.get_bytes_with_uninit_and_ptr(cx, range)?;
// Uninit check happens *after* we established that the alignment is correct.
// We must not return `Ok()` for unaligned pointers!
if self.is_init(ptr, size).is_err() {
if self.is_init(range).is_err() {
// This inflates uninitialized bytes to the entire scalar, even if only a few
// bytes are uninitialized.
return Ok(ScalarMaybeUninit::Uninit);
@ -344,29 +308,19 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
// Now we do the actual reading.
let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
// See if we got a pointer.
if size != cx.data_layout().pointer_size {
if range.size != cx.data_layout().pointer_size {
// Not a pointer.
// *Now*, we better make sure that the inside is free of relocations too.
self.check_relocations(cx, ptr, size)?;
self.check_relocations(cx, range)?;
} else {
if let Some(&(tag, alloc_id)) = self.relocations.get(&ptr.offset) {
// Maybe a pointer.
if let Some(&(tag, alloc_id)) = self.relocations.get(&range.start) {
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag);
return Ok(ScalarMaybeUninit::Scalar(ptr.into()));
}
}
// We don't. Just return the bits.
Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, size)))
}
/// Reads a pointer-sized scalar.
///
/// It is the caller's responsibility to check bounds and alignment beforehand.
/// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
pub fn read_ptr_sized(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
self.read_scalar(cx, ptr, cx.data_layout().pointer_size)
Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)))
}
/// Writes a *non-ZST* scalar.
@ -379,78 +333,56 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
pub fn write_scalar(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
range: AllocRange,
val: ScalarMaybeUninit<Tag>,
type_size: Size,
) -> InterpResult<'tcx> {
) -> AllocResult {
let val = match val {
ScalarMaybeUninit::Scalar(scalar) => scalar,
ScalarMaybeUninit::Uninit => {
self.mark_init(ptr, type_size, false);
self.mark_init(range, false);
return Ok(());
}
};
let bytes = match val.to_bits_or_ptr(type_size, cx) {
let bytes = match val.to_bits_or_ptr(range.size, cx) {
Err(val) => u128::from(val.offset.bytes()),
Ok(data) => data,
};
let endian = cx.data_layout().endian;
let dst = self.get_bytes_mut(cx, ptr, type_size)?;
let dst = self.get_bytes_mut(cx, range);
write_target_uint(endian, dst, bytes).unwrap();
// See if we have to also write a relocation.
if let Scalar::Ptr(val) = val {
self.relocations.insert(ptr.offset, (val.tag, val.alloc_id));
self.relocations.insert(range.start, (val.tag, val.alloc_id));
}
Ok(())
}
/// Writes a pointer-sized scalar.
///
/// It is the caller's responsibility to check bounds and alignment beforehand.
/// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
pub fn write_ptr_sized(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
val: ScalarMaybeUninit<Tag>,
) -> InterpResult<'tcx> {
let ptr_size = cx.data_layout().pointer_size;
self.write_scalar(cx, ptr, val, ptr_size)
}
}
/// Relocations.
impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
/// Returns all relocations overlapping with the given pointer-offset pair.
pub fn get_relocations(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
range: AllocRange,
) -> &[(Size, (Tag, AllocId))] {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
let end = ptr.offset + size; // This does overflow checking.
self.relocations.range(Size::from_bytes(start)..end)
let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
self.relocations.range(Size::from_bytes(start)..range.end())
}
/// Checks that there are no relocations overlapping with the given range.
#[inline(always)]
fn check_relocations(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
if self.get_relocations(cx, ptr, size).is_empty() {
fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
if self.get_relocations(cx, range).is_empty() {
Ok(())
} else {
throw_unsup!(ReadPointerAsBytes)
Err(AllocError::ReadPointerAsBytes)
}
}
@ -460,11 +392,11 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
/// uninitialized. This is a somewhat odd "spooky action at a distance",
/// but it allows strictly more code to run than if we would just error
/// immediately in that case.
fn clear_relocations(&mut self, cx: &impl HasDataLayout, ptr: Pointer<Tag>, size: Size) {
fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
// Find the start and end of the given range and its outermost relocations.
let (first, last) = {
// Find all relocations overlapping the given range.
let relocations = self.get_relocations(cx, ptr, size);
let relocations = self.get_relocations(cx, range);
if relocations.is_empty() {
return;
}
@ -474,8 +406,8 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
relocations.last().unwrap().0 + cx.data_layout().pointer_size,
)
};
let start = ptr.offset;
let end = start + size; // `Size` addition
let start = range.start;
let end = range.end();
// Mark parts of the outermost relocations as uninitialized if they partially fall outside the
// given range.
@ -493,46 +425,41 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
/// Errors if there are relocations overlapping with the edges of the
/// given memory range.
#[inline]
fn check_relocation_edges(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
self.check_relocations(cx, ptr, Size::ZERO)?;
self.check_relocations(cx, ptr.offset(size, cx)?, Size::ZERO)?;
fn check_relocation_edges(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
self.check_relocations(cx, alloc_range(range.start, Size::ZERO))?;
self.check_relocations(cx, alloc_range(range.end(), Size::ZERO))?;
Ok(())
}
}
/// Uninitialized bytes.
impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
/// Checks whether the given range is entirely initialized.
///
/// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
/// indexes of the first contiguous uninitialized access.
fn is_init(&self, ptr: Pointer<Tag>, size: Size) -> Result<(), Range<Size>> {
self.init_mask.is_range_initialized(ptr.offset, ptr.offset + size) // `Size` addition
fn is_init(&self, range: AllocRange) -> Result<(), Range<Size>> {
self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
}
/// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
/// error which will report the first range of bytes which is uninitialized.
fn check_init(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
self.is_init(ptr, size).or_else(|idx_range| {
throw_ub!(InvalidUninitBytes(Some(UninitBytesAccess {
access_ptr: ptr.erase_tag(),
access_size: size,
uninit_ptr: Pointer::new(ptr.alloc_id, idx_range.start),
fn check_init(&self, range: AllocRange) -> AllocResult {
self.is_init(range).or_else(|idx_range| {
Err(AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
access_offset: range.start,
access_size: range.size,
uninit_offset: idx_range.start,
uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
})))
})
}
pub fn mark_init(&mut self, ptr: Pointer<Tag>, size: Size, is_init: bool) {
if size.bytes() == 0 {
pub fn mark_init(&mut self, range: AllocRange, is_init: bool) {
if range.size.bytes() == 0 {
return;
}
self.init_mask.set_range(ptr.offset, ptr.offset + size, is_init);
self.init_mask.set_range(range.start, range.end(), is_init);
}
}
@ -667,25 +594,25 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
pub fn prepare_relocation_copy(
&self,
cx: &impl HasDataLayout,
src: Pointer<Tag>,
size: Size,
dest: Pointer<Tag>,
length: u64,
src: AllocRange,
dest: Size,
count: u64,
) -> AllocationRelocations<Tag> {
let relocations = self.get_relocations(cx, src, size);
let relocations = self.get_relocations(cx, src);
if relocations.is_empty() {
return AllocationRelocations { relative_relocations: Vec::new() };
}
let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
let size = src.size;
let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
for i in 0..length {
for i in 0..count {
new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
// compute offset for current repetition
let dest_offset = dest.offset + size * i; // `Size` operations
let dest_offset = dest + size * i; // `Size` operations
(
// shift offsets from source allocation to destination allocation
(offset + dest_offset) - src.offset, // `Size` operations
(offset + dest_offset) - src.start, // `Size` operations
reloc,
)
}));

View File

@ -198,11 +198,11 @@ impl fmt::Display for CheckInAllocMsg {
#[derive(Debug)]
pub struct UninitBytesAccess {
/// Location of the original memory access.
pub access_ptr: Pointer,
pub access_offset: Size,
/// Size of the original memory access.
pub access_size: Size,
/// Location of the first uninitialized byte that was accessed.
pub uninit_ptr: Pointer,
pub uninit_offset: Size,
/// Number of consecutive uninitialized bytes that were accessed.
pub uninit_size: Size,
}
@ -264,7 +264,7 @@ pub enum UndefinedBehaviorInfo<'tcx> {
/// Using a string that is not valid UTF-8,
InvalidStr(std::str::Utf8Error),
/// Using uninitialized data where it is not allowed.
InvalidUninitBytes(Option<UninitBytesAccess>),
InvalidUninitBytes(Option<(AllocId, UninitBytesAccess)>),
/// Working with a local that is not currently live.
DeadLocal,
/// Data size is not equal to target size.
@ -335,18 +335,18 @@ impl fmt::Display for UndefinedBehaviorInfo<'_> {
write!(f, "using {} as function pointer but it does not point to a function", p)
}
InvalidStr(err) => write!(f, "this string is not valid UTF-8: {}", err),
InvalidUninitBytes(Some(access)) => write!(
InvalidUninitBytes(Some((alloc, access))) => write!(
f,
"reading {} byte{} of memory starting at {}, \
but {} byte{} {} uninitialized starting at {}, \
and this operation requires initialized memory",
access.access_size.bytes(),
pluralize!(access.access_size.bytes()),
access.access_ptr,
Pointer::new(*alloc, access.access_offset),
access.uninit_size.bytes(),
pluralize!(access.uninit_size.bytes()),
if access.uninit_size.bytes() != 1 { "are" } else { "is" },
access.uninit_ptr,
Pointer::new(*alloc, access.uninit_offset),
),
InvalidUninitBytes(None) => write!(
f,
@ -446,7 +446,7 @@ impl dyn MachineStopType {
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(InterpError<'_>, 72);
static_assert_size!(InterpError<'_>, 64);
pub enum InterpError<'tcx> {
/// The program caused undefined behavior.

View File

@ -125,7 +125,7 @@ pub use self::error::{
pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit};
pub use self::allocation::{Allocation, AllocationExtra, InitMask, Relocations};
pub use self::allocation::{alloc_range, AllocRange, Allocation, InitMask, Relocations};
pub use self::pointer::{Pointer, PointerArithmetic};

View File

@ -10,7 +10,7 @@ use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
use crate::ty::{Lift, ParamEnv, ScalarInt, Ty, TyCtxt};
use super::{AllocId, Allocation, InterpResult, Pointer, PointerArithmetic};
use super::{AllocId, AllocRange, Allocation, InterpResult, Pointer, PointerArithmetic};
/// Represents the result of const evaluation via the `eval_to_allocation` query.
#[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)]
@ -661,9 +661,7 @@ pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) ->
let len = end - start;
data.get_bytes(
cx,
// invent a pointer, only the offset is relevant anyway
Pointer::new(AllocId(0), Size::from_bytes(start)),
Size::from_bytes(len),
AllocRange { start: Size::from_bytes(start), size: Size::from_bytes(len) },
)
.unwrap_or_else(|err| bug!("const slice is invalid: {:?}", err))
} else {

View File

@ -1,5 +1,5 @@
use crate::middle::cstore::{ExternCrate, ExternCrateSource};
use crate::mir::interpret::{AllocId, ConstValue, GlobalAlloc, Pointer, Scalar};
use crate::mir::interpret::{AllocRange, ConstValue, GlobalAlloc, Pointer, Scalar};
use crate::ty::subst::{GenericArg, GenericArgKind, Subst};
use crate::ty::{self, ConstInt, DefIdTree, ParamConst, ScalarInt, Ty, TyCtxt, TypeFoldable};
use rustc_apfloat::ieee::{Double, Single};
@ -1004,9 +1004,9 @@ pub trait PrettyPrinter<'tcx>:
_,
) => match self.tcx().get_global_alloc(ptr.alloc_id) {
Some(GlobalAlloc::Memory(alloc)) => {
let bytes = int.assert_bits(self.tcx().data_layout.pointer_size);
let size = Size::from_bytes(bytes);
if let Ok(byte_str) = alloc.get_bytes(&self.tcx(), ptr, size) {
let len = int.assert_bits(self.tcx().data_layout.pointer_size);
let range = AllocRange { start: ptr.offset, size: Size::from_bytes(len) };
if let Ok(byte_str) = alloc.get_bytes(&self.tcx(), range) {
p!(pretty_print_byte_str(byte_str))
} else {
p!("<too short allocation>")
@ -1181,10 +1181,9 @@ pub trait PrettyPrinter<'tcx>:
(ConstValue::ByRef { alloc, offset }, ty::Array(t, n)) if *t == u8_type => {
let n = n.val.try_to_bits(self.tcx().data_layout.pointer_size).unwrap();
// cast is ok because we already checked for pointer size (32 or 64 bit) above
let n = Size::from_bytes(n);
let ptr = Pointer::new(AllocId(0), offset);
let range = AllocRange { start: offset, size: Size::from_bytes(n) };
let byte_str = alloc.get_bytes(&self.tcx(), ptr, n).unwrap();
let byte_str = alloc.get_bytes(&self.tcx(), range).unwrap();
p!("*");
p!(pretty_print_byte_str(byte_str));
Ok(self)

View File

@ -181,7 +181,7 @@ pub(crate) fn deref_const<'tcx>(
let mplace = ecx.deref_operand(&op).unwrap();
if let Scalar::Ptr(ptr) = mplace.ptr {
assert_eq!(
ecx.memory.get_raw(ptr.alloc_id).unwrap().mutability,
tcx.get_global_alloc(ptr.alloc_id).unwrap().unwrap_memory().mutability,
Mutability::Not,
"deref_const cannot be used with mutable allocations as \
that could allow pattern matching to observe mutable statics",

View File

@ -14,7 +14,7 @@ use rustc_middle::ty;
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{Ty, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
use rustc_target::abi::{Abi, LayoutOf as _, Primitive, Size};
use rustc_target::abi::{Abi, Align, LayoutOf as _, Primitive, Size};
use super::{
util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
@ -525,7 +525,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.memory.check_ptr_access_align(
min_ptr,
Size::from_bytes(size),
None,
Align::ONE,
CheckInAllocMsg::PointerArithmeticTest,
)?;
Ok(offset_ptr)
@ -549,17 +549,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
})?;
// Make sure we check both pointers for an access of the total size and aligment,
// *even if* the total size is 0.
let src =
self.memory.check_ptr_access(self.read_scalar(&src)?.check_init()?, size, align)?;
let src = self.read_scalar(&src)?.check_init()?;
let dst = self.read_scalar(&dst)?.check_init()?;
let dst =
self.memory.check_ptr_access(self.read_scalar(&dst)?.check_init()?, size, align)?;
if let (Some(src), Some(dst)) = (src, dst) {
self.memory.copy(src, dst, size, nonoverlapping)?;
}
Ok(())
self.memory.copy(src, align, dst, align, size, nonoverlapping)
}
}

View File

@ -13,8 +13,8 @@ use rustc_target::abi::Size;
use rustc_target::spec::abi::Abi;
use super::{
AllocId, Allocation, AllocationExtra, CheckInAllocMsg, Frame, ImmTy, InterpCx, InterpResult,
LocalValue, MemPlace, Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Scalar,
AllocId, Allocation, CheckInAllocMsg, Frame, ImmTy, InterpCx, InterpResult, LocalValue,
MemPlace, Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Scalar,
};
/// Data returned by Machine::stack_pop,
@ -105,7 +105,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
type MemoryExtra;
/// Extra data stored in every allocation.
type AllocExtra: AllocationExtra<Self::PointerTag> + 'static;
type AllocExtra: Debug + Clone + 'static;
/// Memory's allocation map
type MemoryMap: AllocMap<
@ -305,10 +305,38 @@ pub trait Machine<'mir, 'tcx>: Sized {
kind: Option<MemoryKind<Self::MemoryKind>>,
) -> (Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>, Self::PointerTag);
/// Called to notify the machine before a deallocation occurs.
fn before_deallocation(
/// Hook for performing extra checks on a memory read access.
///
/// Takes read-only access to the allocation so we can keep all the memory read
/// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
/// need to mutate.
#[inline(always)]
fn memory_read(
_memory_extra: &Self::MemoryExtra,
_alloc: &Allocation<Self::PointerTag, Self::AllocExtra>,
_ptr: Pointer<Self::PointerTag>,
_size: Size,
) -> InterpResult<'tcx> {
Ok(())
}
/// Hook for performing extra checks on a memory write access.
#[inline(always)]
fn memory_written(
_memory_extra: &mut Self::MemoryExtra,
_id: AllocId,
_alloc: &mut Allocation<Self::PointerTag, Self::AllocExtra>,
_ptr: Pointer<Self::PointerTag>,
_size: Size,
) -> InterpResult<'tcx> {
Ok(())
}
/// Hook for performing extra operations on a memory deallocation.
#[inline(always)]
fn memory_deallocated(
_memory_extra: &mut Self::MemoryExtra,
_alloc: &mut Allocation<Self::PointerTag, Self::AllocExtra>,
_ptr: Pointer<Self::PointerTag>,
) -> InterpResult<'tcx> {
Ok(())
}
@ -322,7 +350,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
Ok(())
}
/// Executes a retagging operation
/// Executes a retagging operation.
#[inline]
fn retag(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,

View File

@ -18,8 +18,8 @@ use rustc_middle::ty::{Instance, ParamEnv, TyCtxt};
use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
use super::{
AllocId, AllocMap, Allocation, AllocationExtra, CheckInAllocMsg, GlobalAlloc, InterpResult,
Machine, MayLeak, Pointer, PointerArithmetic, Scalar,
alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc,
InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Scalar, ScalarMaybeUninit,
};
use crate::util::pretty;
@ -125,6 +125,24 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M>
}
}
/// A reference to some allocation that was already bounds-checked for the given region
/// and had the on-access machine hooks run.
#[derive(Copy, Clone)]
pub struct AllocRef<'a, 'tcx, Tag, Extra> {
alloc: &'a Allocation<Tag, Extra>,
range: AllocRange,
tcx: TyCtxt<'tcx>,
alloc_id: AllocId,
}
/// A reference to some allocation that was already bounds-checked for the given region
/// and had the on-access machine hooks run.
pub struct AllocRefMut<'a, 'tcx, Tag, Extra> {
alloc: &'a mut Allocation<Tag, Extra>,
range: AllocRange,
tcx: TyCtxt<'tcx>,
alloc_id: AllocId,
}
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
pub fn new(tcx: TyCtxt<'tcx>, extra: M::MemoryExtra) -> Self {
Memory {
@ -246,7 +264,15 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
Some((size, _align)) => size,
None => self.get_raw(ptr.alloc_id)?.size(),
};
self.copy(ptr, new_ptr, old_size.min(new_size), /*nonoverlapping*/ true)?;
// This will also call the access hooks.
self.copy(
ptr.into(),
Align::ONE,
new_ptr.into(),
Align::ONE,
old_size.min(new_size),
/*nonoverlapping*/ true,
)?;
self.deallocate(ptr, old_size_and_align, kind)?;
Ok(new_ptr)
@ -278,8 +304,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
);
}
M::before_deallocation(&mut self.extra, ptr.alloc_id)?;
let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
Some(alloc) => alloc,
None => {
@ -319,8 +343,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
}
// Let the machine take some extra action
let size = alloc.size();
AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
M::memory_deallocated(&mut self.extra, &mut alloc, ptr)?;
// Don't forget to remember size and align of this now-dead allocation
let old = self.dead_alloc_map.insert(ptr.alloc_id, (alloc.size(), alloc.align));
@ -331,40 +354,53 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
Ok(())
}
/// Check if the given scalar is allowed to do a memory access of given `size`
/// and `align`. On success, returns `None` for zero-sized accesses (where
/// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
/// Crucially, if the input is a `Pointer`, we will test it for liveness
/// *even if* the size is 0.
///
/// Everyone accessing memory based on a `Scalar` should use this method to get the
/// `Pointer` they need. And even if you already have a `Pointer`, call this method
/// to make sure it is sufficiently aligned and not dangling. Not doing that may
/// cause ICEs.
///
/// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
/// this method is still appropriate.
/// Internal helper function for APIs that offer memory access based on `Scalar` pointers.
#[inline(always)]
pub fn check_ptr_access(
pub(super) fn check_ptr_access(
&self,
sptr: Scalar<M::PointerTag>,
size: Size,
align: Align,
) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
let align = M::enforce_alignment(&self.extra).then_some(align);
self.check_ptr_access_align(sptr, size, align, CheckInAllocMsg::MemoryAccessTest)
self.check_and_deref_ptr(sptr, size, align, CheckInAllocMsg::MemoryAccessTest, |ptr| {
let (size, align) =
self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
Ok((size, align, ptr))
})
}
/// Like `check_ptr_access`, but *definitely* checks alignment when `align`
/// is `Some` (overriding `M::enforce_alignment`). Also lets the caller control
/// the error message for the out-of-bounds case.
/// Check if the given scalar is allowed to do a memory access of given `size` and `align`
/// (ignoring `M::enforce_alignment`). The caller can control the error message for the
/// out-of-bounds case.
#[inline(always)]
pub fn check_ptr_access_align(
&self,
sptr: Scalar<M::PointerTag>,
size: Size,
align: Align,
msg: CheckInAllocMsg,
) -> InterpResult<'tcx> {
self.check_and_deref_ptr(sptr, size, Some(align), msg, |ptr| {
let (size, align) =
self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
Ok((size, align, ()))
})?;
Ok(())
}
/// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
/// to the allocation it points to. Supports both shared and mutable references, to the actual
/// checking is offloaded to a helper closure. `align` defines whether and which alignment check
/// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
fn check_and_deref_ptr<T>(
&self,
sptr: Scalar<M::PointerTag>,
size: Size,
align: Option<Align>,
msg: CheckInAllocMsg,
) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
alloc_size: impl FnOnce(Pointer<M::PointerTag>) -> InterpResult<'tcx, (Size, Align, T)>,
) -> InterpResult<'tcx, Option<T>> {
fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
if offset % align.bytes() == 0 {
Ok(())
@ -402,8 +438,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
None
}
Err(ptr) => {
let (allocation_size, alloc_align) =
self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
let (allocation_size, alloc_align, ret_val) = alloc_size(ptr)?;
// Test bounds. This also ensures non-null.
// It is sufficient to check this for the end pointer. The addition
// checks for overflow.
@ -431,7 +466,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// We can still be zero-sized in this branch, in which case we have to
// return `None`.
if size.bytes() == 0 { None } else { Some(ptr) }
if size.bytes() == 0 { None } else { Some(ret_val) }
}
})
}
@ -502,8 +537,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
}
/// Gives raw access to the `Allocation`, without bounds or alignment checks.
/// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
pub fn get_raw(
/// The caller is responsible for calling the access hooks!
fn get_raw(
&self,
id: AllocId,
) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
@ -537,14 +572,54 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
}
}
/// "Safe" (bounds and align-checked) allocation access.
pub fn get<'a>(
&'a self,
sptr: Scalar<M::PointerTag>,
size: Size,
align: Align,
) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
let align = M::enforce_alignment(&self.extra).then_some(align);
let ptr_and_alloc = self.check_and_deref_ptr(
sptr,
size,
align,
CheckInAllocMsg::MemoryAccessTest,
|ptr| {
let alloc = self.get_raw(ptr.alloc_id)?;
Ok((alloc.size(), alloc.align, (ptr, alloc)))
},
)?;
if let Some((ptr, alloc)) = ptr_and_alloc {
M::memory_read(&self.extra, alloc, ptr, size)?;
let range = alloc_range(ptr.offset, size);
Ok(Some(AllocRef { alloc, range, tcx: self.tcx, alloc_id: ptr.alloc_id }))
} else {
// Even in this branch we have to be sure that we actually access the allocation, in
// order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
// magically pulling *any* ZST value from the ether. However, the `get_raw` above is
// always called when `sptr` is truly a `Pointer`, so we are good.
Ok(None)
}
}
/// Return the `extra` field of the given allocation.
pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
Ok(&self.get_raw(id)?.extra)
}
/// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
/// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
pub fn get_raw_mut(
/// The caller is responsible for calling the access hooks!
///
/// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
/// allocation.
fn get_raw_mut(
&mut self,
id: AllocId,
) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
) -> InterpResult<'tcx, (&mut Allocation<M::PointerTag, M::AllocExtra>, &mut M::MemoryExtra)>
{
let tcx = self.tcx;
let memory_extra = &self.extra;
let memory_extra = &mut self.extra;
let a = self.alloc_map.get_mut_or(id, || {
// Need to make a copy, even if `get_global_alloc` is able
// to give us a cheap reference.
@ -567,11 +642,40 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
if a.mutability == Mutability::Not {
throw_ub!(WriteToReadOnly(id))
}
Ok(a)
Ok((a, memory_extra))
}
}
}
/// "Safe" (bounds and align-checked) allocation access.
pub fn get_mut<'a>(
&'a mut self,
sptr: Scalar<M::PointerTag>,
size: Size,
align: Align,
) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
let ptr = self.check_ptr_access(sptr, size, align)?;
if let Some(ptr) = ptr {
let tcx = self.tcx;
// FIXME: can we somehow avoid looking up the allocation twice here?
// We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
let (alloc, extra) = self.get_raw_mut(ptr.alloc_id)?;
M::memory_written(extra, alloc, ptr, size)?;
let range = alloc_range(ptr.offset, size);
Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id: ptr.alloc_id }))
} else {
Ok(None)
}
}
/// Return the `extra` field of the given allocation.
pub fn get_alloc_extra_mut<'a>(
&'a mut self,
id: AllocId,
) -> InterpResult<'tcx, &'a mut M::AllocExtra> {
Ok(&mut self.get_raw_mut(id)?.0.extra)
}
/// Obtain the size and alignment of an allocation, even if that allocation has
/// been deallocated.
///
@ -596,7 +700,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// The caller requested no function pointers.
throw_ub!(DerefFunctionPointer(id))
} else {
Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
Ok((Size::ZERO, Align::ONE))
};
}
@ -658,7 +762,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
}
pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
self.get_raw_mut(id)?.mutability = Mutability::Not;
self.get_raw_mut(id)?.0.mutability = Mutability::Not;
Ok(())
}
@ -792,16 +896,62 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
}
/// Reading and writing.
impl<'tcx, 'a, Tag: Copy, Extra> AllocRefMut<'a, 'tcx, Tag, Extra> {
pub fn write_scalar(
&mut self,
range: AllocRange,
val: ScalarMaybeUninit<Tag>,
) -> InterpResult<'tcx> {
Ok(self
.alloc
.write_scalar(&self.tcx, self.range.subrange(range), val)
.map_err(|e| e.to_interp_error(self.alloc_id))?)
}
pub fn write_ptr_sized(
&mut self,
offset: Size,
val: ScalarMaybeUninit<Tag>,
) -> InterpResult<'tcx> {
self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
}
}
impl<'tcx, 'a, Tag: Copy, Extra> AllocRef<'a, 'tcx, Tag, Extra> {
pub fn read_scalar(&self, range: AllocRange) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
Ok(self
.alloc
.read_scalar(&self.tcx, self.range.subrange(range))
.map_err(|e| e.to_interp_error(self.alloc_id))?)
}
pub fn read_ptr_sized(&self, offset: Size) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
self.read_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size))
}
pub fn check_bytes(&self, range: AllocRange, allow_uninit_and_ptr: bool) -> InterpResult<'tcx> {
Ok(self
.alloc
.check_bytes(&self.tcx, self.range.subrange(range), allow_uninit_and_ptr)
.map_err(|e| e.to_interp_error(self.alloc_id))?)
}
}
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Reads the given number of bytes from memory. Returns them as a slice.
///
/// Performs appropriate bounds checks.
pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
Some(ptr) => ptr,
pub fn read_bytes(&self, sptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
let alloc_ref = match self.get(sptr, size, Align::ONE)? {
Some(a) => a,
None => return Ok(&[]), // zero-sized access
};
self.get_raw(ptr.alloc_id)?.get_bytes(self, ptr, size)
// Side-step AllocRef and directly access the underlying bytes more efficiently.
// (We are staying inside the bounds here so all is good.)
Ok(alloc_ref
.alloc
.get_bytes(&alloc_ref.tcx, alloc_ref.range)
.map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
}
/// Writes the given stream of bytes into memory.
@ -809,14 +959,17 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Performs appropriate bounds checks.
pub fn write_bytes(
&mut self,
ptr: Scalar<M::PointerTag>,
sptr: Scalar<M::PointerTag>,
src: impl IntoIterator<Item = u8>,
) -> InterpResult<'tcx> {
let mut src = src.into_iter();
let size = Size::from_bytes(src.size_hint().0);
// `write_bytes` checks that this lower bound `size` matches the upper bound and reality.
let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
Some(ptr) => ptr,
let (lower, upper) = src.size_hint();
let len = upper.expect("can only write bounded iterators");
assert_eq!(lower, len, "can only write iterators with a precise length");
let size = Size::from_bytes(len);
let alloc_ref = match self.get_mut(sptr, size, Align::ONE)? {
Some(alloc_ref) => alloc_ref,
None => {
// zero-sized access
assert_matches!(
@ -827,56 +980,88 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
return Ok(());
}
};
let tcx = self.tcx;
self.get_raw_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src)
// Side-step AllocRef and directly access the underlying bytes more efficiently.
// (We are staying inside the bounds here so all is good.)
let bytes = alloc_ref.alloc.get_bytes_mut(&alloc_ref.tcx, alloc_ref.range);
// `zip` would stop when the first iterator ends; we want to definitely
// cover all of `bytes`.
for dest in bytes {
*dest = src.next().expect("iterator was shorter than it said it would be");
}
assert_matches!(src.next(), None, "iterator was longer than it said it would be");
Ok(())
}
/// Expects the caller to have checked bounds and alignment.
pub fn copy(
&mut self,
src: Pointer<M::PointerTag>,
dest: Pointer<M::PointerTag>,
src: Scalar<M::PointerTag>,
src_align: Align,
dest: Scalar<M::PointerTag>,
dest_align: Align,
size: Size,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
self.copy_repeatedly(src, dest, size, 1, nonoverlapping)
self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
}
/// Expects the caller to have checked bounds and alignment.
pub fn copy_repeatedly(
&mut self,
src: Pointer<M::PointerTag>,
dest: Pointer<M::PointerTag>,
src: Scalar<M::PointerTag>,
src_align: Align,
dest: Scalar<M::PointerTag>,
dest_align: Align,
size: Size,
length: u64,
num_copies: u64,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
let tcx = self.tcx;
// We need to do our own bounds-checks.
let src = self.check_ptr_access(src, size, src_align)?;
let dest = self.check_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
// FIXME: we look up both allocations twice here, once ebfore for the `check_ptr_access`
// and once below to get the underlying `&[mut] Allocation`.
// Source alloc preparations and access hooks.
let src = match src {
None => return Ok(()), // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
Some(src_ptr) => src_ptr,
};
let src_alloc = self.get_raw(src.alloc_id)?;
M::memory_read(&self.extra, src_alloc, src, size)?;
// We need the `dest` ptr for the next operation, so we get it now.
// We already did the source checks and called the hooks so we are good to return early.
let dest = match dest {
None => return Ok(()), // Zero-sized *destiantion*.
Some(dest_ptr) => dest_ptr,
};
// first copy the relocations to a temporary buffer, because
// `get_bytes_mut` will clear the relocations, which is correct,
// since we don't want to keep any relocations at the target.
// (`get_bytes_with_uninit_and_ptr` below checks that there are no
// relocations overlapping the edges; those would not be handled correctly).
let relocations =
self.get_raw(src.alloc_id)?.prepare_relocation_copy(self, src, size, dest, length);
let tcx = self.tcx;
// This checks relocation edges on the src.
let src_bytes =
self.get_raw(src.alloc_id)?.get_bytes_with_uninit_and_ptr(&tcx, src, size)?.as_ptr();
let dest_bytes =
self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; // `Size` multiplication
// If `dest_bytes` is empty we just optimize to not run anything for zsts.
// See #67539
if dest_bytes.is_empty() {
return Ok(());
}
let dest_bytes = dest_bytes.as_mut_ptr();
let relocations = src_alloc.prepare_relocation_copy(
self,
alloc_range(src.offset, size),
dest.offset,
num_copies,
);
// Prepare a copy of the initialization mask.
let compressed = self.get_raw(src.alloc_id)?.compress_uninit_range(src, size);
let compressed = src_alloc.compress_uninit_range(src, size);
// This checks relocation edges on the src.
let src_bytes = src_alloc
.get_bytes_with_uninit_and_ptr(&tcx, alloc_range(src.offset, size))
.map_err(|e| e.to_interp_error(src.alloc_id))?
.as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
// Destination alloc preparations and access hooks.
let (dest_alloc, extra) = self.get_raw_mut(dest.alloc_id)?;
M::memory_written(extra, dest_alloc, dest, size * num_copies)?;
let dest_bytes = dest_alloc
.get_bytes_mut_ptr(&tcx, alloc_range(dest.offset, size * num_copies))
.as_mut_ptr();
if compressed.no_bytes_init() {
// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
@ -885,8 +1070,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// This also avoids writing to the target bytes so that the backing allocation is never
// touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
// operating system this can avoid physically allocating the page.
let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
dest_alloc.mark_init(dest, size * length, false); // `Size` multiplication
dest_alloc.mark_init(alloc_range(dest.offset, size * num_copies), false); // `Size` multiplication
dest_alloc.mark_relocation_range(relocations);
return Ok(());
}
@ -907,7 +1091,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
}
}
for i in 0..length {
for i in 0..num_copies {
ptr::copy(
src_bytes,
dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
@ -915,7 +1099,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
);
}
} else {
for i in 0..length {
for i in 0..num_copies {
ptr::copy_nonoverlapping(
src_bytes,
dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
@ -925,16 +1109,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
}
}
// now fill in all the data
self.get_raw_mut(dest.alloc_id)?.mark_compressed_init_range(
&compressed,
dest,
size,
length,
);
// now fill in all the "init" data
dest_alloc.mark_compressed_init_range(&compressed, dest, size, num_copies);
// copy the relocations to the destination
self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
dest_alloc.mark_relocation_range(relocations);
Ok(())
}

View File

@ -21,7 +21,7 @@ pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in
pub use self::eval_context::{Frame, FrameInfo, InterpCx, LocalState, LocalValue, StackPopCleanup};
pub use self::intern::{intern_const_alloc_recursive, InternKind};
pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
pub use self::memory::{AllocCheck, FnVal, Memory, MemoryKind};
pub use self::memory::{AllocCheck, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
pub use self::validity::{CtfeValidationMode, RefTracking};

View File

@ -15,8 +15,8 @@ use rustc_target::abi::{Abi, HasDataLayout, LayoutOf, Size, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
use super::{
from_known_layout, mir_assign_valid_types, ConstValue, GlobalId, InterpCx, InterpResult,
MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Scalar, ScalarMaybeUninit,
alloc_range, from_known_layout, mir_assign_valid_types, ConstValue, GlobalId, InterpCx,
InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Scalar, ScalarMaybeUninit,
};
/// An `Immediate` represents a single immediate self-contained Rust value.
@ -249,19 +249,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return Ok(None);
}
let ptr = match self
.check_mplace_access(mplace, None)
.expect("places should be checked on creation")
{
let alloc = match self.get_alloc(mplace)? {
Some(ptr) => ptr,
None => {
if let Scalar::Ptr(ptr) = mplace.ptr {
// We may be reading from a static.
// In order to ensure that `static FOO: Type = FOO;` causes a cycle error
// instead of magically pulling *any* ZST value from the ether, we need to
// actually access the referenced allocation.
self.memory.get_raw(ptr.alloc_id)?;
}
return Ok(Some(ImmTy {
// zero-sized type
imm: Scalar::ZST.into(),
@ -270,11 +260,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
};
let alloc = self.memory.get_raw(ptr.alloc_id)?;
match mplace.layout.abi {
Abi::Scalar(..) => {
let scalar = alloc.read_scalar(self, ptr, mplace.layout.size)?;
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
}
Abi::ScalarPair(ref a, ref b) => {
@ -283,12 +271,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = (&a.value, &b.value);
let (a_size, b_size) = (a.size(self), b.size(self));
let a_ptr = ptr;
let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
let b_ptr = ptr.offset(b_offset, self)?;
let a_val = alloc.read_scalar(self, a_ptr, a_size)?;
let b_val = alloc.read_scalar(self, b_ptr, b_size)?;
let a_val = alloc.read_scalar(alloc_range(Size::ZERO, a_size))?;
let b_val = alloc.read_scalar(alloc_range(b_offset, b_size))?;
Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }))
}
_ => Ok(None),

View File

@ -14,9 +14,9 @@ use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, VariantIdx, Variants};
use super::{
mir_assign_valid_types, AllocId, AllocMap, Allocation, AllocationExtra, ConstAlloc, ImmTy,
Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand, Pointer,
PointerArithmetic, Scalar, ScalarMaybeUninit,
alloc_range, mir_assign_valid_types, AllocId, AllocMap, AllocRef, AllocRefMut, Allocation,
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy,
Operand, Pointer, PointerArithmetic, Scalar, ScalarMaybeUninit,
};
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
@ -293,7 +293,6 @@ where
M: Machine<'mir, 'tcx, PointerTag = Tag>,
// FIXME: Working around https://github.com/rust-lang/rust/issues/24159
M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKind>, Allocation<Tag, M::AllocExtra>)>,
M::AllocExtra: AllocationExtra<Tag>,
{
/// Take a value, which represents a (thin or wide) reference, and make it a place.
/// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`.
@ -339,24 +338,26 @@ where
self.mplace_access_checked(place, None)
}
/// Check if the given place is good for memory access with the given
/// size, falling back to the layout's size if `None` (in the latter case,
/// this must be a statically sized type).
///
/// On success, returns `None` for zero-sized accesses (where nothing else is
/// left to do) and a `Pointer` to use for the actual access otherwise.
#[inline]
pub(super) fn check_mplace_access(
pub(super) fn get_alloc(
&self,
place: &MPlaceTy<'tcx, M::PointerTag>,
size: Option<Size>,
) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
let size = size.unwrap_or_else(|| {
assert!(!place.layout.is_unsized());
assert!(!place.meta.has_meta());
place.layout.size
});
self.memory.check_ptr_access(place.ptr, size, place.align)
) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::PointerTag, M::AllocExtra>>> {
assert!(!place.layout.is_unsized());
assert!(!place.meta.has_meta());
let size = place.layout.size;
self.memory.get(place.ptr, size, place.align)
}
#[inline]
pub(super) fn get_alloc_mut(
&mut self,
place: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::PointerTag, M::AllocExtra>>> {
assert!(!place.layout.is_unsized());
assert!(!place.meta.has_meta());
let size = place.layout.size;
self.memory.get_mut(place.ptr, size, place.align)
}
/// Return the "access-checked" version of this `MPlace`, where for non-ZST
@ -373,10 +374,11 @@ where
.size_and_align_of_mplace(&place)?
.unwrap_or((place.layout.size, place.layout.align.abi));
assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?");
// Check (stricter) dynamic alignment, unless forced otherwise.
place.mplace.align = force_align.unwrap_or(align);
let align = force_align.unwrap_or(align);
// Record new (stricter, unless forced) alignment requirement in place.
place.mplace.align = align;
// When dereferencing a pointer, it must be non-null, aligned, and live.
if let Some(ptr) = self.check_mplace_access(&place, Some(size))? {
if let Some(ptr) = self.memory.check_ptr_access(place.ptr, size, align)? {
place.mplace.ptr = ptr.into();
}
Ok(place)
@ -786,12 +788,12 @@ where
// wrong type.
// Invalid places are a thing: the return place of a diverging function
let ptr = match self.check_mplace_access(dest, None)? {
Some(ptr) => ptr,
let tcx = *self.tcx;
let mut alloc = match self.get_alloc_mut(dest)? {
Some(a) => a,
None => return Ok(()), // zero-sized access
};
let tcx = *self.tcx;
// FIXME: We should check that there are dest.layout.size many bytes available in
// memory. The code below is not sufficient, with enough padding it might not
// cover all the bytes!
@ -805,12 +807,7 @@ where
dest.layout
),
}
self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(
&tcx,
ptr,
scalar,
dest.layout.size,
)
alloc.write_scalar(alloc_range(Size::ZERO, dest.layout.size), scalar)
}
Immediate::ScalarPair(a_val, b_val) => {
// We checked `ptr_align` above, so all fields will have the alignment they need.
@ -824,16 +821,15 @@ where
dest.layout
),
};
let (a_size, b_size) = (a.size(self), b.size(self));
let b_offset = a_size.align_to(b.align(self).abi);
let b_ptr = ptr.offset(b_offset, self)?;
let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
let b_offset = a_size.align_to(b.align(&tcx).abi);
// It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
// but that does not work: We could be a newtype around a pair, then the
// fields do not match the `ScalarPair` components.
self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(&tcx, ptr, a_val, a_size)?;
self.memory.get_raw_mut(b_ptr.alloc_id)?.write_scalar(&tcx, b_ptr, b_val, b_size)
alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?;
alloc.write_scalar(alloc_range(b_offset, b_size), b_val)
}
}
}
@ -902,19 +898,8 @@ where
});
assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
let src = self
.check_mplace_access(&src, Some(size))
.expect("places should be checked on creation");
let dest = self
.check_mplace_access(&dest, Some(size))
.expect("places should be checked on creation");
let (src_ptr, dest_ptr) = match (src, dest) {
(Some(src_ptr), Some(dest_ptr)) => (src_ptr, dest_ptr),
(None, None) => return Ok(()), // zero-sized copy
_ => bug!("The pointers should both be Some or both None"),
};
self.memory.copy(src_ptr, dest_ptr, size, /*nonoverlapping*/ true)
self.memory
.copy(src.ptr, src.align, dest.ptr, dest.align, size, /*nonoverlapping*/ true)
}
/// Copies the data from an operand to a place. The layouts may disagree, but they must
@ -1047,11 +1032,8 @@ where
) -> MPlaceTy<'tcx, M::PointerTag> {
let ptr = self.memory.allocate_bytes(str.as_bytes(), kind);
let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self);
let mplace = MemPlace {
ptr: ptr.into(),
align: Align::from_bytes(1).unwrap(),
meta: MemPlaceMeta::Meta(meta),
};
let mplace =
MemPlace { ptr: ptr.into(), align: Align::ONE, meta: MemPlaceMeta::Meta(meta) };
let layout = self.layout_of(self.tcx.mk_static_str()).unwrap();
MPlaceTy { mplace, layout }

View File

@ -222,28 +222,34 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
Repeat(ref operand, _) => {
let op = self.eval_operand(operand, None)?;
let src = self.eval_operand(operand, None)?;
assert!(!src.layout.is_unsized());
let dest = self.force_allocation(&dest)?;
let length = dest.len(self)?;
if let Some(first_ptr) = self.check_mplace_access(&dest, None)? {
// Write the first.
if length == 0 {
// Nothing to copy... but let's still make sure that `dest` as a place is valid.
self.get_alloc_mut(&dest)?;
} else {
// Write the src to the first element.
let first = self.mplace_field(&dest, 0)?;
self.copy_op(&op, &first.into())?;
self.copy_op(&src, &first.into())?;
if length > 1 {
let elem_size = first.layout.size;
// Copy the rest. This is performance-sensitive code
// for big static/const arrays!
let rest_ptr = first_ptr.offset(elem_size, self)?;
self.memory.copy_repeatedly(
first_ptr,
rest_ptr,
elem_size,
length - 1,
/*nonoverlapping:*/ true,
)?;
}
// This is performance-sensitive code for big static/const arrays! So we
// avoid writing each operand individually and instead just make many copies
// of the first element.
let elem_size = first.layout.size;
let first_ptr = first.ptr;
let rest_ptr = first_ptr.ptr_offset(elem_size, self)?;
self.memory.copy_repeatedly(
first_ptr,
first.align,
rest_ptr,
first.align,
elem_size,
length - 1,
/*nonoverlapping:*/ true,
)?;
}
}

View File

@ -62,32 +62,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let drop = Instance::resolve_drop_in_place(tcx, ty);
let drop = self.memory.create_fn_alloc(FnVal::Instance(drop));
// Prepare the fn ptrs we will write into the vtable later.
let fn_ptrs = methods
.iter()
.enumerate() // remember the original position
.filter_map(|(i, method)| {
if let Some((def_id, substs)) = method { Some((i, def_id, substs)) } else { None }
})
.map(|(i, def_id, substs)| {
let instance =
ty::Instance::resolve_for_vtable(tcx, self.param_env, *def_id, substs)
.ok_or_else(|| err_inval!(TooGeneric))?;
Ok((i, self.memory.create_fn_alloc(FnVal::Instance(instance))))
})
.collect::<InterpResult<'tcx, Vec<(usize, Pointer<M::PointerTag>)>>>()?;
// No need to do any alignment checks on the memory accesses below, because we know the
// allocation is correctly aligned as we created it above. Also we're only offsetting by
// multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
let vtable_alloc = self.memory.get_raw_mut(vtable.alloc_id)?;
vtable_alloc.write_ptr_sized(&tcx, vtable, drop.into())?;
let mut vtable_alloc =
self.memory.get_mut(vtable.into(), vtable_size, ptr_align)?.expect("not a ZST");
vtable_alloc.write_ptr_sized(ptr_size * 0, drop.into())?;
vtable_alloc.write_ptr_sized(ptr_size * 1, Scalar::from_uint(size, ptr_size).into())?;
vtable_alloc.write_ptr_sized(ptr_size * 2, Scalar::from_uint(align, ptr_size).into())?;
let size_ptr = vtable.offset(ptr_size, &tcx)?;
vtable_alloc.write_ptr_sized(&tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?;
let align_ptr = vtable.offset(ptr_size * 2, &tcx)?;
vtable_alloc.write_ptr_sized(&tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?;
for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method {
// resolve for vtable: insert shims where needed
let instance =
ty::Instance::resolve_for_vtable(tcx, self.param_env, def_id, substs)
.ok_or_else(|| err_inval!(TooGeneric))?;
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
// We cannot use `vtable_allic` as we are creating fn ptrs in this loop.
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &tcx)?;
self.memory.get_raw_mut(vtable.alloc_id)?.write_ptr_sized(
&tcx,
method_ptr,
fn_ptr.into(),
)?;
}
for (i, fn_ptr) in fn_ptrs.into_iter() {
vtable_alloc.write_ptr_sized(ptr_size * (3 + i as u64), fn_ptr.into())?;
}
M::after_static_mem_initialized(self, vtable, vtable_size)?;
@ -111,13 +111,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let vtable_slot = vtable.ptr_offset(ptr_size * idx.checked_add(3).unwrap(), self)?;
let vtable_slot = self
.memory
.check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
.get(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
.expect("cannot be a ZST");
let fn_ptr = self
.memory
.get_raw(vtable_slot.alloc_id)?
.read_ptr_sized(self, vtable_slot)?
.check_init()?;
let fn_ptr = vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?;
self.memory.get_fn(fn_ptr)
}
@ -129,14 +125,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// We don't care about the pointee type; we just want a pointer.
let vtable = self
.memory
.check_ptr_access(
vtable,
self.tcx.data_layout.pointer_size,
self.tcx.data_layout.pointer_align.abi,
)?
.get(vtable, self.tcx.data_layout.pointer_size, self.tcx.data_layout.pointer_align.abi)?
.expect("cannot be a ZST");
let drop_fn =
self.memory.get_raw(vtable.alloc_id)?.read_ptr_sized(self, vtable)?.check_init()?;
let drop_fn = vtable.read_ptr_sized(Size::ZERO)?.check_init()?;
// We *need* an instance here, no other kind of function value, to be able
// to determine the type.
let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?;
@ -161,13 +152,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// the size, and the align (which we read below).
let vtable = self
.memory
.check_ptr_access(vtable, 3 * pointer_size, self.tcx.data_layout.pointer_align.abi)?
.get(vtable, 3 * pointer_size, self.tcx.data_layout.pointer_align.abi)?
.expect("cannot be a ZST");
let alloc = self.memory.get_raw(vtable.alloc_id)?;
let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)?.check_init()?;
let size = vtable.read_ptr_sized(pointer_size)?.check_init()?;
let size = u64::try_from(self.force_bits(size, pointer_size)?).unwrap();
let align =
alloc.read_ptr_sized(self, vtable.offset(pointer_size * 2, self)?)?.check_init()?;
let align = vtable.read_ptr_sized(pointer_size * 2)?.check_init()?;
let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap();
if size >= self.tcx.data_layout.obj_size_bound() {

View File

@ -15,13 +15,13 @@ use rustc_middle::mir::interpret::InterpError;
use rustc_middle::ty;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_span::symbol::{sym, Symbol};
use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx, Variants};
use rustc_target::abi::{Abi, LayoutOf, Scalar as ScalarAbi, Size, VariantIdx, Variants};
use std::hash::Hash;
use super::{
CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy,
ScalarMaybeUninit, ValueVisitor,
alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,
MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
};
macro_rules! throw_validation_failure {
@ -329,7 +329,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
self.ecx.memory.check_ptr_access_align(
vtable,
3 * self.ecx.tcx.data_layout.pointer_size, // drop, size, align
Some(self.ecx.tcx.data_layout.pointer_align.abi),
self.ecx.tcx.data_layout.pointer_align.abi,
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
@ -411,11 +411,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// alignment should take attributes into account).
.unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
let ptr: Option<_> = try_validation!(
try_validation!(
self.ecx.memory.check_ptr_access_align(
place.ptr,
size,
Some(align),
align,
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
@ -441,9 +441,18 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
);
// Recursive checking
if let Some(ref mut ref_tracking) = self.ref_tracking {
if let Some(ptr) = ptr {
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
// Normalize before handing `place` to tracking because that will
// check for duplicates.
let place = if size.bytes() > 0 {
self.ecx.force_mplace_ptr(place).expect("we already bounds-checked")
} else {
place
};
// Skip validation entirely for some external statics
if let Scalar::Ptr(ptr) = place.ptr {
// not a ZST
// Skip validation entirely for some external statics
let alloc_kind = self.ecx.tcx.get_global_alloc(ptr.alloc_id);
if let Some(GlobalAlloc::Static(did)) = alloc_kind {
assert!(!self.ecx.tcx.is_thread_local_static(did));
@ -473,15 +482,6 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
return Ok(());
}
}
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
// Normalize before handing `place` to tracking because that will
// check for duplicates.
let place = if size.bytes() > 0 {
self.ecx.force_mplace_ptr(place).expect("we already bounds-checked")
} else {
place
};
let path = &self.path;
ref_tracking.track(place, || {
// We need to clone the path anyway, make sure it gets created
@ -638,7 +638,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
fn visit_scalar(
&mut self,
op: &OpTy<'tcx, M::PointerTag>,
scalar_layout: &Scalar,
scalar_layout: &ScalarAbi,
) -> InterpResult<'tcx> {
let value = self.read_scalar(op)?;
let valid_range = &scalar_layout.valid_range;
@ -851,16 +851,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
let mplace = op.assert_mem_place(self.ecx);
// This is the length of the array/slice.
let len = mplace.len(self.ecx)?;
// Zero length slices have nothing to be checked.
if len == 0 {
return Ok(());
}
// This is the element type size.
let layout = self.ecx.layout_of(tys)?;
// This is the size in bytes of the whole array. (This checks for overflow.)
let size = layout.size * len;
// Size is not 0, get a pointer.
let ptr = self.ecx.force_ptr(mplace.ptr)?;
// Optimization: we just check the entire range at once.
// NOTE: Keep this in sync with the handling of integer and float
@ -872,10 +866,16 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// to reject those pointers, we just do not have the machinery to
// talk about parts of a pointer.
// We also accept uninit, for consistency with the slow path.
match self.ecx.memory.get_raw(ptr.alloc_id)?.check_bytes(
self.ecx,
ptr,
size,
let alloc = match self.ecx.memory.get(mplace.ptr, size, mplace.align)? {
Some(a) => a,
None => {
// Size 0, nothing more to check.
return Ok(());
}
};
match alloc.check_bytes(
alloc_range(Size::ZERO, size),
/*allow_uninit_and_ptr*/ self.ctfe_mode.is_none(),
) {
// In the happy case, we needn't check anything else.
@ -885,12 +885,12 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// For some errors we might be able to provide extra information.
// (This custom logic does not fit the `try_validation!` macro.)
match err.kind() {
err_ub!(InvalidUninitBytes(Some(access))) => {
err_ub!(InvalidUninitBytes(Some((_alloc_id, access)))) => {
// Some byte was uninitialized, determine which
// element that byte belongs to so we can
// provide an index.
let i = usize::try_from(
access.uninit_ptr.offset.bytes() / layout.size.bytes(),
access.uninit_offset.bytes() / layout.size.bytes(),
)
.unwrap();
self.path.push(PathElem::ArrayElem(i));

View File

@ -21,6 +21,8 @@ Rust MIR: a lowered representation of Rust.
#![feature(never_type)]
#![feature(map_try_insert)]
#![feature(min_specialization)]
#![feature(slice_ptr_len)]
#![feature(slice_ptr_get)]
#![feature(trusted_len)]
#![feature(try_blocks)]
#![feature(associated_type_defaults)]

View File

@ -441,6 +441,8 @@ pub struct Align {
}
impl Align {
pub const ONE: Align = Align { pow2: 0 };
#[inline]
pub fn from_bits(bits: u64) -> Result<Align, String> {
Align::from_bytes(Size::from_bits(bits).bytes())
@ -450,7 +452,7 @@ impl Align {
pub fn from_bytes(align: u64) -> Result<Align, String> {
// Treat an alignment of 0 bytes like 1-byte alignment.
if align == 0 {
return Ok(Align { pow2: 0 });
return Ok(Align::ONE);
}
#[cold]