mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-23 23:34:48 +00:00
Auto merge of #70404 - Dylan-DPC:rollup-iikcm6r, r=Dylan-DPC
Rollup of 5 pull requests Successful merges: - #70226 (use checked casts and arithmetic in Miri engine) - #70319 (correctly normalize constants) - #70352 (Add long error explanation for E0710 ) - #70366 (Implement Fuse with Option) - #70379 (fix incorrect type name in doc comments) Failed merges: - #70375 (avoid catching InterpError) r? @ghost
This commit is contained in:
commit
a5fb9ae5b2
342
src/libcore/iter/adapters/fuse.rs
Normal file
342
src/libcore/iter/adapters/fuse.rs
Normal file
@ -0,0 +1,342 @@
|
||||
use crate::intrinsics;
|
||||
use crate::iter::{
|
||||
DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator, TrustedRandomAccess,
|
||||
};
|
||||
use crate::ops::Try;
|
||||
|
||||
/// An iterator that yields `None` forever after the underlying iterator
|
||||
/// yields `None` once.
|
||||
///
|
||||
/// This `struct` is created by the [`fuse`] method on [`Iterator`]. See its
|
||||
/// documentation for more.
|
||||
///
|
||||
/// [`fuse`]: trait.Iterator.html#method.fuse
|
||||
/// [`Iterator`]: trait.Iterator.html
|
||||
#[derive(Clone, Debug)]
|
||||
#[must_use = "iterators are lazy and do nothing unless consumed"]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct Fuse<I> {
|
||||
// NOTE: for `I: FusedIterator`, this is always assumed `Some`!
|
||||
iter: Option<I>,
|
||||
}
|
||||
impl<I> Fuse<I> {
|
||||
pub(in crate::iter) fn new(iter: I) -> Fuse<I> {
|
||||
Fuse { iter: Some(iter) }
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "fused", since = "1.26.0")]
|
||||
impl<I> FusedIterator for Fuse<I> where I: Iterator {}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<I> Iterator for Fuse<I>
|
||||
where
|
||||
I: Iterator,
|
||||
{
|
||||
type Item = <I as Iterator>::Item;
|
||||
|
||||
#[inline]
|
||||
default fn next(&mut self) -> Option<<I as Iterator>::Item> {
|
||||
let next = self.iter.as_mut()?.next();
|
||||
if next.is_none() {
|
||||
self.iter = None;
|
||||
}
|
||||
next
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn nth(&mut self, n: usize) -> Option<I::Item> {
|
||||
let nth = self.iter.as_mut()?.nth(n);
|
||||
if nth.is_none() {
|
||||
self.iter = None;
|
||||
}
|
||||
nth
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn last(self) -> Option<I::Item> {
|
||||
self.iter?.last()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn count(self) -> usize {
|
||||
self.iter.map_or(0, I::count)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.iter.as_ref().map_or((0, Some(0)), I::size_hint)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn try_fold<Acc, Fold, R>(&mut self, mut acc: Acc, fold: Fold) -> R
|
||||
where
|
||||
Self: Sized,
|
||||
Fold: FnMut(Acc, Self::Item) -> R,
|
||||
R: Try<Ok = Acc>,
|
||||
{
|
||||
if let Some(ref mut iter) = self.iter {
|
||||
acc = iter.try_fold(acc, fold)?;
|
||||
self.iter = None;
|
||||
}
|
||||
Try::from_ok(acc)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn fold<Acc, Fold>(self, mut acc: Acc, fold: Fold) -> Acc
|
||||
where
|
||||
Fold: FnMut(Acc, Self::Item) -> Acc,
|
||||
{
|
||||
if let Some(iter) = self.iter {
|
||||
acc = iter.fold(acc, fold);
|
||||
}
|
||||
acc
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
|
||||
where
|
||||
P: FnMut(&Self::Item) -> bool,
|
||||
{
|
||||
let found = self.iter.as_mut()?.find(predicate);
|
||||
if found.is_none() {
|
||||
self.iter = None;
|
||||
}
|
||||
found
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<I> DoubleEndedIterator for Fuse<I>
|
||||
where
|
||||
I: DoubleEndedIterator,
|
||||
{
|
||||
#[inline]
|
||||
default fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
|
||||
let next = self.iter.as_mut()?.next_back();
|
||||
if next.is_none() {
|
||||
self.iter = None;
|
||||
}
|
||||
next
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
|
||||
let nth = self.iter.as_mut()?.nth_back(n);
|
||||
if nth.is_none() {
|
||||
self.iter = None;
|
||||
}
|
||||
nth
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn try_rfold<Acc, Fold, R>(&mut self, mut acc: Acc, fold: Fold) -> R
|
||||
where
|
||||
Self: Sized,
|
||||
Fold: FnMut(Acc, Self::Item) -> R,
|
||||
R: Try<Ok = Acc>,
|
||||
{
|
||||
if let Some(ref mut iter) = self.iter {
|
||||
acc = iter.try_rfold(acc, fold)?;
|
||||
self.iter = None;
|
||||
}
|
||||
Try::from_ok(acc)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn rfold<Acc, Fold>(self, mut acc: Acc, fold: Fold) -> Acc
|
||||
where
|
||||
Fold: FnMut(Acc, Self::Item) -> Acc,
|
||||
{
|
||||
if let Some(iter) = self.iter {
|
||||
acc = iter.rfold(acc, fold);
|
||||
}
|
||||
acc
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
|
||||
where
|
||||
P: FnMut(&Self::Item) -> bool,
|
||||
{
|
||||
let found = self.iter.as_mut()?.rfind(predicate);
|
||||
if found.is_none() {
|
||||
self.iter = None;
|
||||
}
|
||||
found
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<I> ExactSizeIterator for Fuse<I>
|
||||
where
|
||||
I: ExactSizeIterator,
|
||||
{
|
||||
default fn len(&self) -> usize {
|
||||
self.iter.as_ref().map_or(0, I::len)
|
||||
}
|
||||
|
||||
default fn is_empty(&self) -> bool {
|
||||
self.iter.as_ref().map_or(true, I::is_empty)
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: for `I: FusedIterator`, we assume that the iterator is always `Some`
|
||||
impl<I: FusedIterator> Fuse<I> {
|
||||
#[inline(always)]
|
||||
fn as_inner(&self) -> &I {
|
||||
match self.iter {
|
||||
Some(ref iter) => iter,
|
||||
// SAFETY: the specialized iterator never sets `None`
|
||||
None => unsafe { intrinsics::unreachable() },
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn as_inner_mut(&mut self) -> &mut I {
|
||||
match self.iter {
|
||||
Some(ref mut iter) => iter,
|
||||
// SAFETY: the specialized iterator never sets `None`
|
||||
None => unsafe { intrinsics::unreachable() },
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn into_inner(self) -> I {
|
||||
match self.iter {
|
||||
Some(iter) => iter,
|
||||
// SAFETY: the specialized iterator never sets `None`
|
||||
None => unsafe { intrinsics::unreachable() },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "fused", since = "1.26.0")]
|
||||
impl<I> Iterator for Fuse<I>
|
||||
where
|
||||
I: FusedIterator,
|
||||
{
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<<I as Iterator>::Item> {
|
||||
self.as_inner_mut().next()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn nth(&mut self, n: usize) -> Option<I::Item> {
|
||||
self.as_inner_mut().nth(n)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn last(self) -> Option<I::Item> {
|
||||
self.into_inner().last()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn count(self) -> usize {
|
||||
self.into_inner().count()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.as_inner().size_hint()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
|
||||
where
|
||||
Self: Sized,
|
||||
Fold: FnMut(Acc, Self::Item) -> R,
|
||||
R: Try<Ok = Acc>,
|
||||
{
|
||||
self.as_inner_mut().try_fold(init, fold)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
|
||||
where
|
||||
Fold: FnMut(Acc, Self::Item) -> Acc,
|
||||
{
|
||||
self.into_inner().fold(init, fold)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
|
||||
where
|
||||
P: FnMut(&Self::Item) -> bool,
|
||||
{
|
||||
self.as_inner_mut().find(predicate)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "fused", since = "1.26.0")]
|
||||
impl<I> DoubleEndedIterator for Fuse<I>
|
||||
where
|
||||
I: DoubleEndedIterator + FusedIterator,
|
||||
{
|
||||
#[inline]
|
||||
fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
|
||||
self.as_inner_mut().next_back()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
|
||||
self.as_inner_mut().nth_back(n)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
|
||||
where
|
||||
Self: Sized,
|
||||
Fold: FnMut(Acc, Self::Item) -> R,
|
||||
R: Try<Ok = Acc>,
|
||||
{
|
||||
self.as_inner_mut().try_rfold(init, fold)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
|
||||
where
|
||||
Fold: FnMut(Acc, Self::Item) -> Acc,
|
||||
{
|
||||
self.into_inner().rfold(init, fold)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
|
||||
where
|
||||
P: FnMut(&Self::Item) -> bool,
|
||||
{
|
||||
self.as_inner_mut().rfind(predicate)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<I> ExactSizeIterator for Fuse<I>
|
||||
where
|
||||
I: ExactSizeIterator + FusedIterator,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.as_inner().len()
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.as_inner().is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<I> TrustedRandomAccess for Fuse<I>
|
||||
where
|
||||
I: TrustedRandomAccess,
|
||||
{
|
||||
unsafe fn get_unchecked(&mut self, i: usize) -> I::Item {
|
||||
match self.iter {
|
||||
Some(ref mut iter) => iter.get_unchecked(i),
|
||||
// SAFETY: the caller asserts there is an item at `i`, so we're not exhausted.
|
||||
None => intrinsics::unreachable(),
|
||||
}
|
||||
}
|
||||
|
||||
fn may_have_side_effect() -> bool {
|
||||
I::may_have_side_effect()
|
||||
}
|
||||
}
|
@ -9,11 +9,13 @@ use super::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator, Tru
|
||||
|
||||
mod chain;
|
||||
mod flatten;
|
||||
mod fuse;
|
||||
mod zip;
|
||||
|
||||
pub use self::chain::Chain;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use self::flatten::{FlatMap, Flatten};
|
||||
pub use self::fuse::Fuse;
|
||||
pub(crate) use self::zip::TrustedRandomAccess;
|
||||
pub use self::zip::Zip;
|
||||
|
||||
@ -2238,261 +2240,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator that yields `None` forever after the underlying iterator
|
||||
/// yields `None` once.
|
||||
///
|
||||
/// This `struct` is created by the [`fuse`] method on [`Iterator`]. See its
|
||||
/// documentation for more.
|
||||
///
|
||||
/// [`fuse`]: trait.Iterator.html#method.fuse
|
||||
/// [`Iterator`]: trait.Iterator.html
|
||||
#[derive(Clone, Debug)]
|
||||
#[must_use = "iterators are lazy and do nothing unless consumed"]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct Fuse<I> {
|
||||
iter: I,
|
||||
done: bool,
|
||||
}
|
||||
impl<I> Fuse<I> {
|
||||
pub(super) fn new(iter: I) -> Fuse<I> {
|
||||
Fuse { iter, done: false }
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "fused", since = "1.26.0")]
|
||||
impl<I> FusedIterator for Fuse<I> where I: Iterator {}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<I> Iterator for Fuse<I>
|
||||
where
|
||||
I: Iterator,
|
||||
{
|
||||
type Item = <I as Iterator>::Item;
|
||||
|
||||
#[inline]
|
||||
default fn next(&mut self) -> Option<<I as Iterator>::Item> {
|
||||
if self.done {
|
||||
None
|
||||
} else {
|
||||
let next = self.iter.next();
|
||||
self.done = next.is_none();
|
||||
next
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn nth(&mut self, n: usize) -> Option<I::Item> {
|
||||
if self.done {
|
||||
None
|
||||
} else {
|
||||
let nth = self.iter.nth(n);
|
||||
self.done = nth.is_none();
|
||||
nth
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn last(self) -> Option<I::Item> {
|
||||
if self.done { None } else { self.iter.last() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn count(self) -> usize {
|
||||
if self.done { 0 } else { self.iter.count() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
if self.done { (0, Some(0)) } else { self.iter.size_hint() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
|
||||
where
|
||||
Self: Sized,
|
||||
Fold: FnMut(Acc, Self::Item) -> R,
|
||||
R: Try<Ok = Acc>,
|
||||
{
|
||||
if self.done {
|
||||
Try::from_ok(init)
|
||||
} else {
|
||||
let acc = self.iter.try_fold(init, fold)?;
|
||||
self.done = true;
|
||||
Try::from_ok(acc)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
|
||||
where
|
||||
Fold: FnMut(Acc, Self::Item) -> Acc,
|
||||
{
|
||||
if self.done { init } else { self.iter.fold(init, fold) }
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<I> DoubleEndedIterator for Fuse<I>
|
||||
where
|
||||
I: DoubleEndedIterator,
|
||||
{
|
||||
#[inline]
|
||||
default fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
|
||||
if self.done {
|
||||
None
|
||||
} else {
|
||||
let next = self.iter.next_back();
|
||||
self.done = next.is_none();
|
||||
next
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
|
||||
if self.done {
|
||||
None
|
||||
} else {
|
||||
let nth = self.iter.nth_back(n);
|
||||
self.done = nth.is_none();
|
||||
nth
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
|
||||
where
|
||||
Self: Sized,
|
||||
Fold: FnMut(Acc, Self::Item) -> R,
|
||||
R: Try<Ok = Acc>,
|
||||
{
|
||||
if self.done {
|
||||
Try::from_ok(init)
|
||||
} else {
|
||||
let acc = self.iter.try_rfold(init, fold)?;
|
||||
self.done = true;
|
||||
Try::from_ok(acc)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
default fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
|
||||
where
|
||||
Fold: FnMut(Acc, Self::Item) -> Acc,
|
||||
{
|
||||
if self.done { init } else { self.iter.rfold(init, fold) }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<I> TrustedRandomAccess for Fuse<I>
|
||||
where
|
||||
I: TrustedRandomAccess,
|
||||
{
|
||||
unsafe fn get_unchecked(&mut self, i: usize) -> I::Item {
|
||||
self.iter.get_unchecked(i)
|
||||
}
|
||||
|
||||
fn may_have_side_effect() -> bool {
|
||||
I::may_have_side_effect()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "fused", since = "1.26.0")]
|
||||
impl<I> Iterator for Fuse<I>
|
||||
where
|
||||
I: FusedIterator,
|
||||
{
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<<I as Iterator>::Item> {
|
||||
self.iter.next()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn nth(&mut self, n: usize) -> Option<I::Item> {
|
||||
self.iter.nth(n)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn last(self) -> Option<I::Item> {
|
||||
self.iter.last()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn count(self) -> usize {
|
||||
self.iter.count()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.iter.size_hint()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
|
||||
where
|
||||
Self: Sized,
|
||||
Fold: FnMut(Acc, Self::Item) -> R,
|
||||
R: Try<Ok = Acc>,
|
||||
{
|
||||
self.iter.try_fold(init, fold)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
|
||||
where
|
||||
Fold: FnMut(Acc, Self::Item) -> Acc,
|
||||
{
|
||||
self.iter.fold(init, fold)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "fused", since = "1.26.0")]
|
||||
impl<I> DoubleEndedIterator for Fuse<I>
|
||||
where
|
||||
I: DoubleEndedIterator + FusedIterator,
|
||||
{
|
||||
#[inline]
|
||||
fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
|
||||
self.iter.next_back()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
|
||||
self.iter.nth_back(n)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
|
||||
where
|
||||
Self: Sized,
|
||||
Fold: FnMut(Acc, Self::Item) -> R,
|
||||
R: Try<Ok = Acc>,
|
||||
{
|
||||
self.iter.try_rfold(init, fold)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
|
||||
where
|
||||
Fold: FnMut(Acc, Self::Item) -> Acc,
|
||||
{
|
||||
self.iter.rfold(init, fold)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<I> ExactSizeIterator for Fuse<I>
|
||||
where
|
||||
I: ExactSizeIterator,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.iter.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator that calls a function with a reference to each element before
|
||||
/// yielding it.
|
||||
///
|
||||
|
@ -57,7 +57,7 @@ use crate::traits::query::{
|
||||
CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal,
|
||||
CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal,
|
||||
};
|
||||
use crate::ty::subst::SubstsRef;
|
||||
use crate::ty::subst::{GenericArg, SubstsRef};
|
||||
use crate::ty::{self, ParamEnvAnd, Ty, TyCtxt};
|
||||
|
||||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
|
@ -1,18 +1,20 @@
|
||||
//! The virtual memory representation of the MIR interpreter.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::convert::TryFrom;
|
||||
use std::iter;
|
||||
use std::ops::{Deref, DerefMut, Range};
|
||||
|
||||
use rustc_ast::ast::Mutability;
|
||||
use rustc_data_structures::sorted_map::SortedMap;
|
||||
use rustc_target::abi::HasDataLayout;
|
||||
|
||||
use super::{
|
||||
read_target_uint, write_target_uint, AllocId, InterpResult, Pointer, Scalar, ScalarMaybeUndef,
|
||||
};
|
||||
|
||||
use crate::ty::layout::{Align, Size};
|
||||
|
||||
use rustc_ast::ast::Mutability;
|
||||
use rustc_data_structures::sorted_map::SortedMap;
|
||||
use rustc_target::abi::HasDataLayout;
|
||||
use std::borrow::Cow;
|
||||
use std::iter;
|
||||
use std::ops::{Deref, DerefMut, Range};
|
||||
|
||||
// NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in
|
||||
// `src/librustc_mir/interpret/snapshot.rs`.
|
||||
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
|
||||
@ -90,7 +92,7 @@ impl<Tag> Allocation<Tag> {
|
||||
/// Creates a read-only allocation initialized by the given bytes
|
||||
pub fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align) -> Self {
|
||||
let bytes = slice.into().into_owned();
|
||||
let size = Size::from_bytes(bytes.len() as u64);
|
||||
let size = Size::from_bytes(bytes.len());
|
||||
Self {
|
||||
bytes,
|
||||
relocations: Relocations::new(),
|
||||
@ -107,9 +109,8 @@ impl<Tag> Allocation<Tag> {
|
||||
}
|
||||
|
||||
pub fn undef(size: Size, align: Align) -> Self {
|
||||
assert_eq!(size.bytes() as usize as u64, size.bytes());
|
||||
Allocation {
|
||||
bytes: vec![0; size.bytes() as usize],
|
||||
bytes: vec![0; size.bytes_usize()],
|
||||
relocations: Relocations::new(),
|
||||
undef_mask: UndefMask::new(size, false),
|
||||
size,
|
||||
@ -152,7 +153,7 @@ impl Allocation<(), ()> {
|
||||
/// Raw accessors. Provide access to otherwise private bytes.
|
||||
impl<Tag, Extra> Allocation<Tag, Extra> {
|
||||
pub fn len(&self) -> usize {
|
||||
self.size.bytes() as usize
|
||||
self.size.bytes_usize()
|
||||
}
|
||||
|
||||
/// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
|
||||
@ -183,12 +184,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
|
||||
#[inline]
|
||||
fn check_bounds(&self, offset: Size, size: Size) -> Range<usize> {
|
||||
let end = offset + size; // This does overflow checking.
|
||||
assert_eq!(
|
||||
end.bytes() as usize as u64,
|
||||
end.bytes(),
|
||||
"cannot handle this access on this host architecture"
|
||||
);
|
||||
let end = end.bytes() as usize;
|
||||
let end = usize::try_from(end.bytes()).expect("access too big for this host architecture");
|
||||
assert!(
|
||||
end <= self.len(),
|
||||
"Out-of-bounds access at offset {}, size {} in allocation of size {}",
|
||||
@ -196,7 +192,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
|
||||
size.bytes(),
|
||||
self.len()
|
||||
);
|
||||
(offset.bytes() as usize)..end
|
||||
offset.bytes_usize()..end
|
||||
}
|
||||
|
||||
/// The last argument controls whether we error out when there are undefined
|
||||
@ -294,11 +290,10 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
|
||||
cx: &impl HasDataLayout,
|
||||
ptr: Pointer<Tag>,
|
||||
) -> InterpResult<'tcx, &[u8]> {
|
||||
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
|
||||
let offset = ptr.offset.bytes() as usize;
|
||||
let offset = ptr.offset.bytes_usize();
|
||||
Ok(match self.bytes[offset..].iter().position(|&c| c == 0) {
|
||||
Some(size) => {
|
||||
let size_with_null = Size::from_bytes((size + 1) as u64);
|
||||
let size_with_null = Size::from_bytes(size) + Size::from_bytes(1);
|
||||
// Go through `get_bytes` for checks and AllocationExtra hooks.
|
||||
// We read the null, so we include it in the request, but we want it removed
|
||||
// from the result, so we do subslicing.
|
||||
@ -343,7 +338,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
|
||||
let (lower, upper) = src.size_hint();
|
||||
let len = upper.expect("can only write bounded iterators");
|
||||
assert_eq!(lower, len, "can only write iterators with a precise length");
|
||||
let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len as u64))?;
|
||||
let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len))?;
|
||||
// `zip` would stop when the first iterator ends; we want to definitely
|
||||
// cover all of `bytes`.
|
||||
for dest in bytes {
|
||||
@ -386,7 +381,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
|
||||
} else {
|
||||
match self.relocations.get(&ptr.offset) {
|
||||
Some(&(tag, alloc_id)) => {
|
||||
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag);
|
||||
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag);
|
||||
return Ok(ScalarMaybeUndef::Scalar(ptr.into()));
|
||||
}
|
||||
None => {}
|
||||
@ -433,7 +428,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
|
||||
};
|
||||
|
||||
let bytes = match val.to_bits_or_ptr(type_size, cx) {
|
||||
Err(val) => val.offset.bytes() as u128,
|
||||
Err(val) => u128::from(val.offset.bytes()),
|
||||
Ok(data) => data,
|
||||
};
|
||||
|
||||
@ -524,7 +519,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
|
||||
)
|
||||
};
|
||||
let start = ptr.offset;
|
||||
let end = start + size;
|
||||
let end = start + size; // `Size` addition
|
||||
|
||||
// Mark parts of the outermost relocations as undefined if they partially fall outside the
|
||||
// given range.
|
||||
@ -563,7 +558,7 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
|
||||
#[inline]
|
||||
fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
|
||||
self.undef_mask
|
||||
.is_range_defined(ptr.offset, ptr.offset + size)
|
||||
.is_range_defined(ptr.offset, ptr.offset + size) // `Size` addition
|
||||
.or_else(|idx| throw_ub!(InvalidUndefBytes(Some(Pointer::new(ptr.alloc_id, idx)))))
|
||||
}
|
||||
|
||||
@ -643,7 +638,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
|
||||
if defined.ranges.len() <= 1 {
|
||||
self.undef_mask.set_range_inbounds(
|
||||
dest.offset,
|
||||
dest.offset + size * repeat,
|
||||
dest.offset + size * repeat, // `Size` operations
|
||||
defined.initial,
|
||||
);
|
||||
return;
|
||||
@ -721,10 +716,10 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
||||
for i in 0..length {
|
||||
new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
|
||||
// compute offset for current repetition
|
||||
let dest_offset = dest.offset + (i * size);
|
||||
let dest_offset = dest.offset + size * i; // `Size` operations
|
||||
(
|
||||
// shift offsets from source allocation to destination allocation
|
||||
offset + dest_offset - src.offset,
|
||||
(offset + dest_offset) - src.offset, // `Size` operations
|
||||
reloc,
|
||||
)
|
||||
}));
|
||||
@ -861,18 +856,18 @@ impl UndefMask {
|
||||
if amount.bytes() == 0 {
|
||||
return;
|
||||
}
|
||||
let unused_trailing_bits = self.blocks.len() as u64 * Self::BLOCK_SIZE - self.len.bytes();
|
||||
let unused_trailing_bits =
|
||||
u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
|
||||
if amount.bytes() > unused_trailing_bits {
|
||||
let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
|
||||
assert_eq!(additional_blocks as usize as u64, additional_blocks);
|
||||
self.blocks.extend(
|
||||
// FIXME(oli-obk): optimize this by repeating `new_state as Block`.
|
||||
iter::repeat(0).take(additional_blocks as usize),
|
||||
iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
|
||||
);
|
||||
}
|
||||
let start = self.len;
|
||||
self.len += amount;
|
||||
self.set_range_inbounds(start, start + amount, new_state);
|
||||
self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
|
||||
}
|
||||
}
|
||||
|
||||
@ -881,7 +876,5 @@ fn bit_index(bits: Size) -> (usize, usize) {
|
||||
let bits = bits.bytes();
|
||||
let a = bits / UndefMask::BLOCK_SIZE;
|
||||
let b = bits % UndefMask::BLOCK_SIZE;
|
||||
assert_eq!(a as usize as u64, a);
|
||||
assert_eq!(b as usize as u64, b);
|
||||
(a as usize, b as usize)
|
||||
(usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
|
||||
}
|
||||
|
@ -95,6 +95,27 @@ mod pointer;
|
||||
mod queries;
|
||||
mod value;
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::num::NonZeroU32;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
|
||||
use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use rustc_ast::ast::LitKind;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::sync::{HashMapExt, Lock};
|
||||
use rustc_data_structures::tiny_list::TinyList;
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_macros::HashStable;
|
||||
use rustc_serialize::{Decodable, Encodable, Encoder};
|
||||
|
||||
use crate::mir;
|
||||
use crate::ty::codec::TyDecoder;
|
||||
use crate::ty::layout::{self, Size};
|
||||
use crate::ty::subst::GenericArgKind;
|
||||
use crate::ty::{self, Instance, Ty, TyCtxt};
|
||||
|
||||
pub use self::error::{
|
||||
struct_error, ConstEvalErr, ConstEvalRawResult, ConstEvalResult, ErrorHandled, FrameInfo,
|
||||
InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType,
|
||||
@ -107,24 +128,6 @@ pub use self::allocation::{Allocation, AllocationExtra, Relocations, UndefMask};
|
||||
|
||||
pub use self::pointer::{CheckInAllocMsg, Pointer, PointerArithmetic};
|
||||
|
||||
use crate::mir;
|
||||
use crate::ty::codec::TyDecoder;
|
||||
use crate::ty::layout::{self, Size};
|
||||
use crate::ty::subst::GenericArgKind;
|
||||
use crate::ty::{self, Instance, Ty, TyCtxt};
|
||||
use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use rustc_ast::ast::LitKind;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::sync::{HashMapExt, Lock};
|
||||
use rustc_data_structures::tiny_list::TinyList;
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_macros::HashStable;
|
||||
use rustc_serialize::{Decodable, Encodable, Encoder};
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::num::NonZeroU32;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
|
||||
/// Uniquely identifies one of the following:
|
||||
/// - A constant
|
||||
/// - A static
|
||||
@ -264,8 +267,8 @@ impl<'s> AllocDecodingSession<'s> {
|
||||
D: TyDecoder<'tcx>,
|
||||
{
|
||||
// Read the index of the allocation.
|
||||
let idx = decoder.read_u32()? as usize;
|
||||
let pos = self.state.data_offsets[idx] as usize;
|
||||
let idx = usize::try_from(decoder.read_u32()?).unwrap();
|
||||
let pos = usize::try_from(self.state.data_offsets[idx]).unwrap();
|
||||
|
||||
// Decode the `AllocDiscriminant` now so that we know if we have to reserve an
|
||||
// `AllocId`.
|
||||
|
@ -62,9 +62,9 @@ pub trait PointerArithmetic: layout::HasDataLayout {
|
||||
/// This should be called by all the other methods before returning!
|
||||
#[inline]
|
||||
fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) {
|
||||
let val = val as u128;
|
||||
let val = u128::from(val);
|
||||
let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
|
||||
((val % max_ptr_plus_1) as u64, over || val >= max_ptr_plus_1)
|
||||
(u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -73,10 +73,8 @@ pub trait PointerArithmetic: layout::HasDataLayout {
|
||||
self.truncate_to_ptr(res)
|
||||
}
|
||||
|
||||
// Overflow checking only works properly on the range from -u64 to +u64.
|
||||
#[inline]
|
||||
fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) {
|
||||
// FIXME: is it possible to over/underflow here?
|
||||
fn overflowing_signed_offset(&self, val: u64, i: i64) -> (u64, bool) {
|
||||
if i < 0 {
|
||||
// Trickery to ensure that `i64::MIN` works fine: compute `n = -i`.
|
||||
// This formula only works for true negative values; it overflows for zero!
|
||||
@ -84,6 +82,7 @@ pub trait PointerArithmetic: layout::HasDataLayout {
|
||||
let res = val.overflowing_sub(n);
|
||||
self.truncate_to_ptr(res)
|
||||
} else {
|
||||
// `i >= 0`, so the cast is safe.
|
||||
self.overflowing_offset(val, i as u64)
|
||||
}
|
||||
}
|
||||
@ -96,7 +95,7 @@ pub trait PointerArithmetic: layout::HasDataLayout {
|
||||
|
||||
#[inline]
|
||||
fn signed_offset<'tcx>(&self, val: u64, i: i64) -> InterpResult<'tcx, u64> {
|
||||
let (res, over) = self.overflowing_signed_offset(val, i128::from(i));
|
||||
let (res, over) = self.overflowing_signed_offset(val, i);
|
||||
if over { throw_ub!(PointerArithOverflow) } else { Ok(res) }
|
||||
}
|
||||
}
|
||||
@ -189,14 +188,14 @@ impl<'tcx, Tag> Pointer<Tag> {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn overflowing_signed_offset(self, i: i128, cx: &impl HasDataLayout) -> (Self, bool) {
|
||||
pub fn overflowing_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> (Self, bool) {
|
||||
let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
|
||||
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
|
||||
self.overflowing_signed_offset(i128::from(i), cx).0
|
||||
self.overflowing_signed_offset(i, cx).0
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -1,9 +1,12 @@
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt;
|
||||
|
||||
use rustc_apfloat::{
|
||||
ieee::{Double, Single},
|
||||
Float,
|
||||
};
|
||||
use rustc_macros::HashStable;
|
||||
use std::fmt;
|
||||
use rustc_target::abi::TargetDataLayout;
|
||||
|
||||
use crate::ty::{
|
||||
layout::{HasDataLayout, Size},
|
||||
@ -156,7 +159,7 @@ impl Scalar<()> {
|
||||
#[inline(always)]
|
||||
fn check_data(data: u128, size: u8) {
|
||||
debug_assert_eq!(
|
||||
truncate(data, Size::from_bytes(size as u64)),
|
||||
truncate(data, Size::from_bytes(u64::from(size))),
|
||||
data,
|
||||
"Scalar value {:#x} exceeds size of {} bytes",
|
||||
data,
|
||||
@ -198,55 +201,54 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
Scalar::Raw { data: 0, size: 0 }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn ptr_op(
|
||||
self,
|
||||
dl: &TargetDataLayout,
|
||||
f_int: impl FnOnce(u64) -> InterpResult<'tcx, u64>,
|
||||
f_ptr: impl FnOnce(Pointer<Tag>) -> InterpResult<'tcx, Pointer<Tag>>,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
match self {
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(u64::from(size), dl.pointer_size.bytes());
|
||||
Ok(Scalar::Raw { data: u128::from(f_int(u64::try_from(data).unwrap())?), size })
|
||||
}
|
||||
Scalar::Ptr(ptr) => Ok(Scalar::Ptr(f_ptr(ptr)?)),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
|
||||
let dl = cx.data_layout();
|
||||
match self {
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(size as u64, dl.pointer_size.bytes());
|
||||
Ok(Scalar::Raw { data: dl.offset(data as u64, i.bytes())? as u128, size })
|
||||
}
|
||||
Scalar::Ptr(ptr) => ptr.offset(i, dl).map(Scalar::Ptr),
|
||||
}
|
||||
self.ptr_op(dl, |int| dl.offset(int, i.bytes()), |ptr| ptr.offset(i, dl))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn ptr_wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self {
|
||||
let dl = cx.data_layout();
|
||||
match self {
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(size as u64, dl.pointer_size.bytes());
|
||||
Scalar::Raw { data: dl.overflowing_offset(data as u64, i.bytes()).0 as u128, size }
|
||||
}
|
||||
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_offset(i, dl)),
|
||||
}
|
||||
self.ptr_op(
|
||||
dl,
|
||||
|int| Ok(dl.overflowing_offset(int, i.bytes()).0),
|
||||
|ptr| Ok(ptr.wrapping_offset(i, dl)),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
|
||||
let dl = cx.data_layout();
|
||||
match self {
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(size as u64, dl.pointer_size().bytes());
|
||||
Ok(Scalar::Raw { data: dl.signed_offset(data as u64, i)? as u128, size })
|
||||
}
|
||||
Scalar::Ptr(ptr) => ptr.signed_offset(i, dl).map(Scalar::Ptr),
|
||||
}
|
||||
self.ptr_op(dl, |int| dl.signed_offset(int, i), |ptr| ptr.signed_offset(i, dl))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn ptr_wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
|
||||
let dl = cx.data_layout();
|
||||
match self {
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(size as u64, dl.pointer_size.bytes());
|
||||
Scalar::Raw {
|
||||
data: dl.overflowing_signed_offset(data as u64, i128::from(i)).0 as u128,
|
||||
size,
|
||||
}
|
||||
}
|
||||
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, dl)),
|
||||
}
|
||||
self.ptr_op(
|
||||
dl,
|
||||
|int| Ok(dl.overflowing_signed_offset(int, i).0),
|
||||
|ptr| Ok(ptr.wrapping_signed_offset(i, dl)),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -281,25 +283,25 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
#[inline]
|
||||
pub fn from_u8(i: u8) -> Self {
|
||||
// Guaranteed to be truncated and does not need sign extension.
|
||||
Scalar::Raw { data: i as u128, size: 1 }
|
||||
Scalar::Raw { data: i.into(), size: 1 }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_u16(i: u16) -> Self {
|
||||
// Guaranteed to be truncated and does not need sign extension.
|
||||
Scalar::Raw { data: i as u128, size: 2 }
|
||||
Scalar::Raw { data: i.into(), size: 2 }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_u32(i: u32) -> Self {
|
||||
// Guaranteed to be truncated and does not need sign extension.
|
||||
Scalar::Raw { data: i as u128, size: 4 }
|
||||
Scalar::Raw { data: i.into(), size: 4 }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_u64(i: u64) -> Self {
|
||||
// Guaranteed to be truncated and does not need sign extension.
|
||||
Scalar::Raw { data: i as u128, size: 8 }
|
||||
Scalar::Raw { data: i.into(), size: 8 }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -376,7 +378,7 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
|
||||
match self {
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(target_size.bytes(), size as u64);
|
||||
assert_eq!(target_size.bytes(), u64::from(size));
|
||||
Scalar::check_data(data, size);
|
||||
Ok(data)
|
||||
}
|
||||
@ -394,7 +396,7 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
|
||||
match self {
|
||||
Scalar::Raw { data, size } => {
|
||||
assert_eq!(target_size.bytes(), size as u64);
|
||||
assert_eq!(target_size.bytes(), u64::from(size));
|
||||
Scalar::check_data(data, size);
|
||||
Ok(data)
|
||||
}
|
||||
@ -458,27 +460,27 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
|
||||
/// Converts the scalar to produce an `u8`. Fails if the scalar is a pointer.
|
||||
pub fn to_u8(self) -> InterpResult<'static, u8> {
|
||||
self.to_unsigned_with_bit_width(8).map(|v| v as u8)
|
||||
self.to_unsigned_with_bit_width(8).map(|v| u8::try_from(v).unwrap())
|
||||
}
|
||||
|
||||
/// Converts the scalar to produce an `u16`. Fails if the scalar is a pointer.
|
||||
pub fn to_u16(self) -> InterpResult<'static, u16> {
|
||||
self.to_unsigned_with_bit_width(16).map(|v| v as u16)
|
||||
self.to_unsigned_with_bit_width(16).map(|v| u16::try_from(v).unwrap())
|
||||
}
|
||||
|
||||
/// Converts the scalar to produce an `u32`. Fails if the scalar is a pointer.
|
||||
pub fn to_u32(self) -> InterpResult<'static, u32> {
|
||||
self.to_unsigned_with_bit_width(32).map(|v| v as u32)
|
||||
self.to_unsigned_with_bit_width(32).map(|v| u32::try_from(v).unwrap())
|
||||
}
|
||||
|
||||
/// Converts the scalar to produce an `u64`. Fails if the scalar is a pointer.
|
||||
pub fn to_u64(self) -> InterpResult<'static, u64> {
|
||||
self.to_unsigned_with_bit_width(64).map(|v| v as u64)
|
||||
self.to_unsigned_with_bit_width(64).map(|v| u64::try_from(v).unwrap())
|
||||
}
|
||||
|
||||
pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'static, u64> {
|
||||
let b = self.to_bits(cx.data_layout().pointer_size)?;
|
||||
Ok(b as u64)
|
||||
Ok(u64::try_from(b).unwrap())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -490,41 +492,41 @@ impl<'tcx, Tag> Scalar<Tag> {
|
||||
|
||||
/// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer.
|
||||
pub fn to_i8(self) -> InterpResult<'static, i8> {
|
||||
self.to_signed_with_bit_width(8).map(|v| v as i8)
|
||||
self.to_signed_with_bit_width(8).map(|v| i8::try_from(v).unwrap())
|
||||
}
|
||||
|
||||
/// Converts the scalar to produce an `i16`. Fails if the scalar is a pointer.
|
||||
pub fn to_i16(self) -> InterpResult<'static, i16> {
|
||||
self.to_signed_with_bit_width(16).map(|v| v as i16)
|
||||
self.to_signed_with_bit_width(16).map(|v| i16::try_from(v).unwrap())
|
||||
}
|
||||
|
||||
/// Converts the scalar to produce an `i32`. Fails if the scalar is a pointer.
|
||||
pub fn to_i32(self) -> InterpResult<'static, i32> {
|
||||
self.to_signed_with_bit_width(32).map(|v| v as i32)
|
||||
self.to_signed_with_bit_width(32).map(|v| i32::try_from(v).unwrap())
|
||||
}
|
||||
|
||||
/// Converts the scalar to produce an `i64`. Fails if the scalar is a pointer.
|
||||
pub fn to_i64(self) -> InterpResult<'static, i64> {
|
||||
self.to_signed_with_bit_width(64).map(|v| v as i64)
|
||||
self.to_signed_with_bit_width(64).map(|v| i64::try_from(v).unwrap())
|
||||
}
|
||||
|
||||
pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'static, i64> {
|
||||
let sz = cx.data_layout().pointer_size;
|
||||
let b = self.to_bits(sz)?;
|
||||
let b = sign_extend(b, sz) as i128;
|
||||
Ok(b as i64)
|
||||
Ok(i64::try_from(b).unwrap())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_f32(self) -> InterpResult<'static, Single> {
|
||||
// Going through `u32` to check size and truncation.
|
||||
Ok(Single::from_bits(self.to_u32()? as u128))
|
||||
Ok(Single::from_bits(self.to_u32()?.into()))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_f64(self) -> InterpResult<'static, Double> {
|
||||
// Going through `u64` to check size and truncation.
|
||||
Ok(Double::from_bits(self.to_u64()? as u128))
|
||||
Ok(Double::from_bits(self.to_u64()?.into()))
|
||||
}
|
||||
}
|
||||
|
||||
@ -671,8 +673,8 @@ pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) ->
|
||||
data.get_bytes(
|
||||
cx,
|
||||
// invent a pointer, only the offset is relevant anyway
|
||||
Pointer::new(AllocId(0), Size::from_bytes(start as u64)),
|
||||
Size::from_bytes(len as u64),
|
||||
Pointer::new(AllocId(0), Size::from_bytes(start)),
|
||||
Size::from_bytes(len),
|
||||
)
|
||||
.unwrap_or_else(|err| bug!("const slice is invalid: {:?}", err))
|
||||
} else {
|
||||
|
@ -9,7 +9,7 @@ use crate::traits::query::{
|
||||
};
|
||||
use crate::ty::query::queries;
|
||||
use crate::ty::query::QueryDescription;
|
||||
use crate::ty::subst::SubstsRef;
|
||||
use crate::ty::subst::{GenericArg, SubstsRef};
|
||||
use crate::ty::{self, ParamEnvAnd, Ty, TyCtxt};
|
||||
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
|
||||
|
||||
@ -1114,10 +1114,10 @@ rustc_queries! {
|
||||
}
|
||||
|
||||
/// Do not call this query directly: invoke `normalize_erasing_regions` instead.
|
||||
query normalize_ty_after_erasing_regions(
|
||||
goal: ParamEnvAnd<'tcx, Ty<'tcx>>
|
||||
) -> Ty<'tcx> {
|
||||
desc { "normalizing `{:?}`", goal }
|
||||
query normalize_generic_arg_after_erasing_regions(
|
||||
goal: ParamEnvAnd<'tcx, GenericArg<'tcx>>
|
||||
) -> GenericArg<'tcx> {
|
||||
desc { "normalizing `{}`", goal.value }
|
||||
}
|
||||
|
||||
query implied_outlives_bounds(
|
||||
|
@ -273,6 +273,20 @@ impl<'tcx> TypeVisitor<'tcx> for BoundNamesCollector {
|
||||
t.super_visit_with(self)
|
||||
}
|
||||
|
||||
fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
|
||||
match c.val {
|
||||
ty::ConstKind::Bound(debruijn, bound_var) if debruijn == self.binder_index => {
|
||||
self.types.insert(
|
||||
bound_var.as_u32(),
|
||||
Symbol::intern(&format!("^{}", bound_var.as_u32())),
|
||||
);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
c.super_visit_with(self)
|
||||
}
|
||||
|
||||
fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
|
||||
match r {
|
||||
ty::ReLateBound(index, br) if *index == self.binder_index => match br {
|
||||
|
@ -978,17 +978,27 @@ impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector {
|
||||
// ignore the inputs to a projection, as they may not appear
|
||||
// in the normalized form
|
||||
if self.just_constrained {
|
||||
match t.kind {
|
||||
ty::Projection(..) | ty::Opaque(..) => {
|
||||
return false;
|
||||
}
|
||||
_ => {}
|
||||
if let ty::Projection(..) | ty::Opaque(..) = t.kind {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
t.super_visit_with(self)
|
||||
}
|
||||
|
||||
fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
|
||||
// if we are only looking for "constrained" region, we have to
|
||||
// ignore the inputs of an unevaluated const, as they may not appear
|
||||
// in the normalized form
|
||||
if self.just_constrained {
|
||||
if let ty::ConstKind::Unevaluated(..) = c.val {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
c.super_visit_with(self)
|
||||
}
|
||||
|
||||
fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
|
||||
if let ty::ReLateBound(debruijn, br) = *r {
|
||||
if debruijn == self.current_index {
|
||||
|
@ -4,8 +4,8 @@
|
||||
//!
|
||||
//! The methods in this file use a `TypeFolder` to recursively process
|
||||
//! contents, invoking the underlying
|
||||
//! `normalize_ty_after_erasing_regions` query for each type found
|
||||
//! within. (This underlying query is what is cached.)
|
||||
//! `normalize_generic_arg_after_erasing_regions` query for each type
|
||||
//! or constant found within. (This underlying query is what is cached.)
|
||||
|
||||
use crate::ty::fold::{TypeFoldable, TypeFolder};
|
||||
use crate::ty::subst::{Subst, SubstsRef};
|
||||
@ -94,6 +94,12 @@ impl TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> {
|
||||
}
|
||||
|
||||
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
|
||||
self.tcx.normalize_ty_after_erasing_regions(self.param_env.and(ty))
|
||||
let arg = self.param_env.and(ty.into());
|
||||
self.tcx.normalize_generic_arg_after_erasing_regions(arg).expect_ty()
|
||||
}
|
||||
|
||||
fn fold_const(&mut self, c: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> {
|
||||
let arg = self.param_env.and(c.into());
|
||||
self.tcx.normalize_generic_arg_after_erasing_regions(arg).expect_const()
|
||||
}
|
||||
}
|
||||
|
@ -981,7 +981,7 @@ pub trait PrettyPrinter<'tcx>:
|
||||
.alloc_map
|
||||
.lock()
|
||||
.unwrap_memory(ptr.alloc_id)
|
||||
.get_bytes(&self.tcx(), ptr, Size::from_bytes(*data as u64))
|
||||
.get_bytes(&self.tcx(), ptr, Size::from_bytes(*data))
|
||||
.unwrap();
|
||||
p!(pretty_print_byte_str(byte_str));
|
||||
}
|
||||
@ -1169,7 +1169,7 @@ pub trait PrettyPrinter<'tcx>:
|
||||
(ConstValue::ByRef { alloc, offset }, ty::Array(t, n)) if *t == u8_type => {
|
||||
let n = n.val.try_to_bits(self.tcx().data_layout.pointer_size).unwrap();
|
||||
// cast is ok because we already checked for pointer size (32 or 64 bit) above
|
||||
let n = Size::from_bytes(n as u64);
|
||||
let n = Size::from_bytes(n);
|
||||
let ptr = Pointer::new(AllocId(0), offset);
|
||||
|
||||
let byte_str = alloc.get_bytes(&self.tcx(), ptr, n).unwrap();
|
||||
|
@ -5,7 +5,7 @@ use crate::mir;
|
||||
use crate::traits;
|
||||
use crate::ty::fast_reject::SimplifiedType;
|
||||
use crate::ty::query::caches::DefaultCacheSelector;
|
||||
use crate::ty::subst::SubstsRef;
|
||||
use crate::ty::subst::{GenericArg, SubstsRef};
|
||||
use crate::ty::{self, Ty, TyCtxt};
|
||||
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
|
||||
use rustc_span::symbol::Symbol;
|
||||
@ -194,6 +194,17 @@ impl<'tcx> Key for ty::PolyTraitRef<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Key for GenericArg<'tcx> {
|
||||
type CacheSelector = DefaultCacheSelector;
|
||||
|
||||
fn query_crate(&self) -> CrateNum {
|
||||
LOCAL_CRATE
|
||||
}
|
||||
fn default_span(&self, _: TyCtxt<'_>) -> Span {
|
||||
DUMMY_SP
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Key for &'tcx ty::Const<'tcx> {
|
||||
type CacheSelector = DefaultCacheSelector;
|
||||
|
||||
|
@ -31,7 +31,7 @@ use crate::traits::specialization_graph;
|
||||
use crate::traits::Clauses;
|
||||
use crate::traits::{self, Vtable};
|
||||
use crate::ty::steal::Steal;
|
||||
use crate::ty::subst::SubstsRef;
|
||||
use crate::ty::subst::{GenericArg, SubstsRef};
|
||||
use crate::ty::util::AlwaysRequiresDrop;
|
||||
use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
|
||||
use crate::util::common::ErrorReported;
|
||||
|
@ -128,6 +128,14 @@ impl<'tcx> GenericArg<'tcx> {
|
||||
_ => bug!("expected a type, but found another kind"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Unpack the `GenericArg` as a const when it is known certainly to be a const.
|
||||
pub fn expect_const(self) -> &'tcx ty::Const<'tcx> {
|
||||
match self.unpack() {
|
||||
GenericArgKind::Const(c) => c,
|
||||
_ => bug!("expected a const, but found another kind"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> Lift<'tcx> for GenericArg<'a> {
|
||||
|
@ -1614,7 +1614,7 @@ impl FloatTy {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bit_width(self) -> usize {
|
||||
pub fn bit_width(self) -> u64 {
|
||||
match self {
|
||||
FloatTy::F32 => 32,
|
||||
FloatTy::F64 => 64,
|
||||
@ -1663,7 +1663,7 @@ impl IntTy {
|
||||
format!("{}{}", val as u128, self.name_str())
|
||||
}
|
||||
|
||||
pub fn bit_width(&self) -> Option<usize> {
|
||||
pub fn bit_width(&self) -> Option<u64> {
|
||||
Some(match *self {
|
||||
IntTy::Isize => return None,
|
||||
IntTy::I8 => 8,
|
||||
@ -1725,7 +1725,7 @@ impl UintTy {
|
||||
format!("{}{}", val, self.name_str())
|
||||
}
|
||||
|
||||
pub fn bit_width(&self) -> Option<usize> {
|
||||
pub fn bit_width(&self) -> Option<u64> {
|
||||
Some(match *self {
|
||||
UintTy::Usize => return None,
|
||||
UintTy::U8 => 8,
|
||||
|
@ -1172,8 +1172,8 @@ fn generic_simd_intrinsic(
|
||||
let m_len = match in_ty.kind {
|
||||
// Note that this `.unwrap()` crashes for isize/usize, that's sort
|
||||
// of intentional as there's not currently a use case for that.
|
||||
ty::Int(i) => i.bit_width().unwrap() as u64,
|
||||
ty::Uint(i) => i.bit_width().unwrap() as u64,
|
||||
ty::Int(i) => i.bit_width().unwrap(),
|
||||
ty::Uint(i) => i.bit_width().unwrap(),
|
||||
_ => return_error!("`{}` is not an integral type", in_ty),
|
||||
};
|
||||
require_simd!(arg_tys[1], "argument");
|
||||
@ -1354,20 +1354,18 @@ fn generic_simd_intrinsic(
|
||||
// trailing bits.
|
||||
let expected_int_bits = in_len.max(8);
|
||||
match ret_ty.kind {
|
||||
ty::Uint(i) if i.bit_width() == Some(expected_int_bits as usize) => (),
|
||||
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
|
||||
_ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
|
||||
}
|
||||
|
||||
// Integer vector <i{in_bitwidth} x in_len>:
|
||||
let (i_xn, in_elem_bitwidth) = match in_elem.kind {
|
||||
ty::Int(i) => (
|
||||
args[0].immediate(),
|
||||
i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _),
|
||||
),
|
||||
ty::Uint(i) => (
|
||||
args[0].immediate(),
|
||||
i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _),
|
||||
),
|
||||
ty::Int(i) => {
|
||||
(args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
|
||||
}
|
||||
ty::Uint(i) => {
|
||||
(args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
|
||||
}
|
||||
_ => return_error!(
|
||||
"vector argument `{}`'s element type `{}`, expected integer element type",
|
||||
in_ty,
|
||||
@ -1378,16 +1376,16 @@ fn generic_simd_intrinsic(
|
||||
// Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
|
||||
let shift_indices =
|
||||
vec![
|
||||
bx.cx.const_int(bx.type_ix(in_elem_bitwidth as _), (in_elem_bitwidth - 1) as _);
|
||||
bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
|
||||
in_len as _
|
||||
];
|
||||
let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
|
||||
// Truncate vector to an <i1 x N>
|
||||
let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len as _));
|
||||
let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
|
||||
// Bitcast <i1 x N> to iN:
|
||||
let i_ = bx.bitcast(i1xn, bx.type_ix(in_len as _));
|
||||
let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
|
||||
// Zero-extend iN to the bitmask type:
|
||||
return Ok(bx.zext(i_, bx.type_ix(expected_int_bits as _)));
|
||||
return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
|
||||
}
|
||||
|
||||
fn simd_simple_float_intrinsic(
|
||||
@ -2099,7 +2097,7 @@ fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, boo
|
||||
match ty.kind {
|
||||
ty::Int(t) => Some((
|
||||
match t {
|
||||
ast::IntTy::Isize => cx.tcx.sess.target.ptr_width as u64,
|
||||
ast::IntTy::Isize => u64::from(cx.tcx.sess.target.ptr_width),
|
||||
ast::IntTy::I8 => 8,
|
||||
ast::IntTy::I16 => 16,
|
||||
ast::IntTy::I32 => 32,
|
||||
@ -2110,7 +2108,7 @@ fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, boo
|
||||
)),
|
||||
ty::Uint(t) => Some((
|
||||
match t {
|
||||
ast::UintTy::Usize => cx.tcx.sess.target.ptr_width as u64,
|
||||
ast::UintTy::Usize => u64::from(cx.tcx.sess.target.ptr_width),
|
||||
ast::UintTy::U8 => 8,
|
||||
ast::UintTy::U16 => 16,
|
||||
ast::UintTy::U32 => 32,
|
||||
@ -2127,7 +2125,7 @@ fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, boo
|
||||
// Returns None if the type is not a float
|
||||
fn float_type_width(ty: Ty<'_>) -> Option<u64> {
|
||||
match ty.kind {
|
||||
ty::Float(t) => Some(t.bit_width() as u64),
|
||||
ty::Float(t) => Some(t.bit_width()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
};
|
||||
let a = Scalar::from(Pointer::new(
|
||||
bx.tcx().alloc_map.lock().create_memory_alloc(data),
|
||||
Size::from_bytes(start as u64),
|
||||
Size::from_bytes(start),
|
||||
));
|
||||
let a_llval = bx.scalar_to_backend(
|
||||
a,
|
||||
|
@ -393,6 +393,7 @@ E0701: include_str!("./error_codes/E0701.md"),
|
||||
E0704: include_str!("./error_codes/E0704.md"),
|
||||
E0705: include_str!("./error_codes/E0705.md"),
|
||||
E0706: include_str!("./error_codes/E0706.md"),
|
||||
E0710: include_str!("./error_codes/E0710.md"),
|
||||
E0712: include_str!("./error_codes/E0712.md"),
|
||||
E0713: include_str!("./error_codes/E0713.md"),
|
||||
E0714: include_str!("./error_codes/E0714.md"),
|
||||
@ -604,7 +605,6 @@ E0748: include_str!("./error_codes/E0748.md"),
|
||||
E0708, // `async` non-`move` closures with parameters are not currently
|
||||
// supported
|
||||
// E0709, // multiple different lifetimes used in arguments of `async fn`
|
||||
E0710, // an unknown tool name found in scoped lint
|
||||
E0711, // a feature has been declared with conflicting stability attributes
|
||||
E0717, // rustc_promotable without stability attribute
|
||||
// E0721, // `await` keyword
|
||||
|
34
src/librustc_error_codes/error_codes/E0710.md
Normal file
34
src/librustc_error_codes/error_codes/E0710.md
Normal file
@ -0,0 +1,34 @@
|
||||
An unknown tool name found in scoped lint
|
||||
|
||||
Erroneous code examples:
|
||||
|
||||
```compile_fail,E0710
|
||||
#[allow(clipp::filter_map)] // error!`
|
||||
fn main() {
|
||||
// business logic
|
||||
}
|
||||
```
|
||||
|
||||
```compile_fail,E0710
|
||||
#[warn(clipp::filter_map)] // error!`
|
||||
fn main() {
|
||||
// business logic
|
||||
}
|
||||
```
|
||||
|
||||
Please verify you didn't misspell the tool's name or that you didn't
|
||||
forget to import it in you project:
|
||||
|
||||
```
|
||||
#[allow(clippy::filter_map)] // ok!
|
||||
fn main() {
|
||||
// business logic
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
#[warn(clippy::filter_map)] // ok!
|
||||
fn main() {
|
||||
// business logic
|
||||
}
|
||||
```
|
@ -1,5 +1,7 @@
|
||||
// Not in interpret to make sure we do not use private implementation details
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use rustc::mir;
|
||||
use rustc::ty::layout::VariantIdx;
|
||||
use rustc::ty::{self, TyCtxt};
|
||||
@ -37,7 +39,7 @@ pub(crate) fn const_field<'tcx>(
|
||||
Some(variant) => ecx.operand_downcast(op, variant).unwrap(),
|
||||
};
|
||||
// then project
|
||||
let field = ecx.operand_field(down, field.index() as u64).unwrap();
|
||||
let field = ecx.operand_field(down, field.index()).unwrap();
|
||||
// and finally move back to the const world, always normalizing because
|
||||
// this is not called for statics.
|
||||
op_to_const(&ecx, field)
|
||||
@ -68,10 +70,11 @@ pub(crate) fn destructure_const<'tcx>(
|
||||
|
||||
let variant = ecx.read_discriminant(op).unwrap().1;
|
||||
|
||||
// We go to `usize` as we cannot allocate anything bigger anyway.
|
||||
let field_count = match val.ty.kind {
|
||||
ty::Array(_, len) => len.eval_usize(tcx, param_env),
|
||||
ty::Adt(def, _) => def.variants[variant].fields.len() as u64,
|
||||
ty::Tuple(substs) => substs.len() as u64,
|
||||
ty::Array(_, len) => usize::try_from(len.eval_usize(tcx, param_env)).unwrap(),
|
||||
ty::Adt(def, _) => def.variants[variant].fields.len(),
|
||||
ty::Tuple(substs) => substs.len(),
|
||||
_ => bug!("cannot destructure constant {:?}", val),
|
||||
};
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use rustc::ty::adjustment::PointerCast;
|
||||
use rustc::ty::layout::{self, Size, TyLayout};
|
||||
use rustc::ty::{self, Ty, TypeAndMut, TypeFoldable};
|
||||
@ -206,8 +208,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
||||
Char => {
|
||||
// `u8` to `char` cast
|
||||
assert_eq!(v as u8 as u128, v);
|
||||
Ok(Scalar::from_uint(v, Size::from_bytes(4)))
|
||||
Ok(Scalar::from_uint(u8::try_from(v).unwrap(), Size::from_bytes(4)))
|
||||
}
|
||||
|
||||
// Casts to bool are not permitted by rustc, no need to handle them here.
|
||||
@ -227,16 +228,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
match dest_ty.kind {
|
||||
// float -> uint
|
||||
Uint(t) => {
|
||||
let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits() as usize);
|
||||
let v = f.to_u128(width).value;
|
||||
let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits());
|
||||
let v = f.to_u128(usize::try_from(width).unwrap()).value;
|
||||
// This should already fit the bit width
|
||||
Ok(Scalar::from_uint(v, Size::from_bits(width as u64)))
|
||||
Ok(Scalar::from_uint(v, Size::from_bits(width)))
|
||||
}
|
||||
// float -> int
|
||||
Int(t) => {
|
||||
let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits() as usize);
|
||||
let v = f.to_i128(width).value;
|
||||
Ok(Scalar::from_int(v, Size::from_bits(width as u64)))
|
||||
let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits());
|
||||
let v = f.to_i128(usize::try_from(width).unwrap()).value;
|
||||
Ok(Scalar::from_int(v, Size::from_bits(width)))
|
||||
}
|
||||
// float -> f32
|
||||
Float(FloatTy::F32) => Ok(Scalar::from_f32(f.convert(&mut false).value)),
|
||||
@ -319,11 +320,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// Example: `Arc<T>` -> `Arc<Trait>`
|
||||
// here we need to increase the size of every &T thin ptr field to a fat ptr
|
||||
for i in 0..src.layout.fields.count() {
|
||||
let dst_field = self.place_field(dest, i as u64)?;
|
||||
let dst_field = self.place_field(dest, i)?;
|
||||
if dst_field.layout.is_zst() {
|
||||
continue;
|
||||
}
|
||||
let src_field = self.operand_field(src, i as u64)?;
|
||||
let src_field = self.operand_field(src, i)?;
|
||||
if src_field.layout.ty == dst_field.layout.ty {
|
||||
self.copy_op(src_field, dst_field)?;
|
||||
} else {
|
||||
|
@ -413,6 +413,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// and it also rounds up to alignment, which we want to avoid,
|
||||
// as the unsized field's alignment could be smaller.
|
||||
assert!(!layout.ty.is_simd());
|
||||
assert!(layout.fields.count() > 0);
|
||||
trace!("DST layout: {:?}", layout);
|
||||
|
||||
let sized_size = layout.fields.offset(layout.fields.count() - 1);
|
||||
@ -452,7 +453,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// here. But this is where the add would go.)
|
||||
|
||||
// Return the sum of sizes and max of aligns.
|
||||
let size = sized_size + unsized_size;
|
||||
let size = sized_size + unsized_size; // `Size` addition
|
||||
|
||||
// Choose max of two known alignments (combined value must
|
||||
// be aligned according to more restrictive of the two).
|
||||
|
@ -29,11 +29,11 @@ fn numeric_intrinsic<'tcx, Tag>(
|
||||
Primitive::Int(integer, _) => integer.size(),
|
||||
_ => bug!("invalid `{}` argument: {:?}", name, bits),
|
||||
};
|
||||
let extra = 128 - size.bits() as u128;
|
||||
let extra = 128 - u128::from(size.bits());
|
||||
let bits_out = match name {
|
||||
sym::ctpop => bits.count_ones() as u128,
|
||||
sym::ctlz => bits.leading_zeros() as u128 - extra,
|
||||
sym::cttz => (bits << extra).trailing_zeros() as u128 - extra,
|
||||
sym::ctpop => u128::from(bits.count_ones()),
|
||||
sym::ctlz => u128::from(bits.leading_zeros()) - extra,
|
||||
sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
|
||||
sym::bswap => (bits << extra).swap_bytes(),
|
||||
sym::bitreverse => (bits << extra).reverse_bits(),
|
||||
_ => bug!("not a numeric intrinsic: {}", name),
|
||||
@ -261,7 +261,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let val_bits = self.force_bits(val, layout.size)?;
|
||||
let raw_shift = self.read_scalar(args[1])?.not_undef()?;
|
||||
let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
|
||||
let width_bits = layout.size.bits() as u128;
|
||||
let width_bits = u128::from(layout.size.bits());
|
||||
let shift_bits = raw_shift_bits % width_bits;
|
||||
let inv_shift_bits = (width_bits - shift_bits) % width_bits;
|
||||
let result_bits = if intrinsic_name == sym::rotate_left {
|
||||
@ -350,8 +350,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
);
|
||||
|
||||
for i in 0..len {
|
||||
let place = self.place_field(dest, i)?;
|
||||
let value = if i == index { elem } else { self.operand_field(input, i)? };
|
||||
let place = self.place_index(dest, i)?;
|
||||
let value = if i == index { elem } else { self.operand_index(input, i)? };
|
||||
self.copy_op(value, place)?;
|
||||
}
|
||||
}
|
||||
@ -370,7 +370,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
"Return type `{}` must match vector element type `{}`",
|
||||
dest.layout.ty, e_ty
|
||||
);
|
||||
self.copy_op(self.operand_field(args[0], index)?, dest)?;
|
||||
self.copy_op(self.operand_index(args[0], index)?, dest)?;
|
||||
}
|
||||
_ => return Ok(false),
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use rustc::middle::lang_items::PanicLocationLangItem;
|
||||
use rustc::ty::subst::Subst;
|
||||
use rustc_span::{Span, Symbol};
|
||||
@ -59,8 +61,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
|
||||
(
|
||||
Symbol::intern(&caller.file.name.to_string()),
|
||||
caller.line as u32,
|
||||
caller.col_display as u32 + 1,
|
||||
u32::try_from(caller.line).unwrap(),
|
||||
u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::VecDeque;
|
||||
use std::convert::TryFrom;
|
||||
use std::ptr;
|
||||
|
||||
use rustc::ty::layout::{Align, HasDataLayout, Size, TargetDataLayout};
|
||||
@ -346,7 +347,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
};
|
||||
Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
|
||||
Ok(bits) => {
|
||||
let bits = bits as u64; // it's ptr-sized
|
||||
let bits = u64::try_from(bits).unwrap(); // it's ptr-sized
|
||||
assert!(size.bytes() == 0);
|
||||
// Must be non-NULL.
|
||||
if bits == 0 {
|
||||
@ -473,7 +474,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
}
|
||||
|
||||
/// Gives raw access to the `Allocation`, without bounds or alignment checks.
|
||||
/// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead!
|
||||
/// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
|
||||
pub fn get_raw(
|
||||
&self,
|
||||
id: AllocId,
|
||||
@ -510,7 +511,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
}
|
||||
|
||||
/// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
|
||||
/// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead!
|
||||
/// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
|
||||
pub fn get_raw_mut(
|
||||
&mut self,
|
||||
id: AllocId,
|
||||
@ -667,7 +668,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
}
|
||||
if alloc.undef_mask().is_range_defined(i, i + Size::from_bytes(1)).is_ok() {
|
||||
// this `as usize` is fine, since `i` came from a `usize`
|
||||
let i = i.bytes() as usize;
|
||||
let i = i.bytes_usize();
|
||||
|
||||
// Checked definedness (and thus range) and relocations. This access also doesn't
|
||||
// influence interpreter execution but is only for debugging.
|
||||
@ -692,8 +693,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
let mut pos = Size::ZERO;
|
||||
let relocation_width = (self.pointer_size().bytes() - 1) * 3;
|
||||
for (i, target_id) in relocations {
|
||||
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
|
||||
write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
|
||||
write!(msg, "{:1$}", "", ((i - pos) * 3).bytes_usize()).unwrap();
|
||||
let target = format!("({})", target_id);
|
||||
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
|
||||
write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
|
||||
@ -834,8 +834,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
src: impl IntoIterator<Item = u8>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let src = src.into_iter();
|
||||
let size = Size::from_bytes(src.size_hint().0 as u64);
|
||||
// `write_bytes` checks that this lower bound matches the upper bound matches reality.
|
||||
let size = Size::from_bytes(src.size_hint().0);
|
||||
// `write_bytes` checks that this lower bound `size` matches the upper bound and reality.
|
||||
let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
|
||||
Some(ptr) => ptr,
|
||||
None => return Ok(()), // zero-sized access
|
||||
@ -874,14 +874,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
|
||||
let tcx = self.tcx.tcx;
|
||||
|
||||
// The bits have to be saved locally before writing to dest in case src and dest overlap.
|
||||
assert_eq!(size.bytes() as usize as u64, size.bytes());
|
||||
|
||||
// This checks relocation edges on the src.
|
||||
let src_bytes =
|
||||
self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr();
|
||||
let dest_bytes =
|
||||
self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?;
|
||||
self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; // `Size` multiplication
|
||||
|
||||
// If `dest_bytes` is empty we just optimize to not run anything for zsts.
|
||||
// See #67539
|
||||
@ -902,7 +899,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
// touched if the bytes stay undef for the whole interpreter execution. On contemporary
|
||||
// operating system this can avoid physically allocating the page.
|
||||
let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
|
||||
dest_alloc.mark_definedness(dest, size * length, false);
|
||||
dest_alloc.mark_definedness(dest, size * length, false); // `Size` multiplication
|
||||
dest_alloc.mark_relocation_range(relocations);
|
||||
return Ok(());
|
||||
}
|
||||
@ -913,9 +910,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
// The pointers above remain valid even if the `HashMap` table is moved around because they
|
||||
// point into the `Vec` storing the bytes.
|
||||
unsafe {
|
||||
assert_eq!(size.bytes() as usize as u64, size.bytes());
|
||||
if src.alloc_id == dest.alloc_id {
|
||||
if nonoverlapping {
|
||||
// `Size` additions
|
||||
if (src.offset <= dest.offset && src.offset + size > dest.offset)
|
||||
|| (dest.offset <= src.offset && dest.offset + size > src.offset)
|
||||
{
|
||||
@ -926,16 +923,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
for i in 0..length {
|
||||
ptr::copy(
|
||||
src_bytes,
|
||||
dest_bytes.offset((size.bytes() * i) as isize),
|
||||
size.bytes() as usize,
|
||||
dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
|
||||
size.bytes_usize(),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
for i in 0..length {
|
||||
ptr::copy_nonoverlapping(
|
||||
src_bytes,
|
||||
dest_bytes.offset((size.bytes() * i) as isize),
|
||||
size.bytes() as usize,
|
||||
dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
|
||||
size.bytes_usize(),
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -975,7 +972,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
) -> InterpResult<'tcx, u128> {
|
||||
match scalar.to_bits_or_ptr(size, self) {
|
||||
Ok(bits) => Ok(bits),
|
||||
Err(ptr) => Ok(M::ptr_to_int(&self, ptr)? as u128),
|
||||
Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
//! Functions concerning immediate values and operands, and reading from operands.
|
||||
//! All high-level functions to read from memory work on operands as sources.
|
||||
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use super::{InterpCx, MPlaceTy, Machine, MemPlace, Place, PlaceTy};
|
||||
pub use rustc::mir::interpret::ScalarMaybeUndef;
|
||||
@ -341,7 +341,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// Turn the wide MPlace into a string (must already be dereferenced!)
|
||||
pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
|
||||
let len = mplace.len(self)?;
|
||||
let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?;
|
||||
let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?;
|
||||
let str = ::std::str::from_utf8(bytes)
|
||||
.map_err(|err| err_ub_format!("this string is not valid UTF-8: {}", err))?;
|
||||
Ok(str)
|
||||
@ -351,7 +351,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn operand_field(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
field: u64,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
let base = match op.try_as_mplace(self) {
|
||||
Ok(mplace) => {
|
||||
@ -362,7 +362,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
Err(value) => value,
|
||||
};
|
||||
|
||||
let field = field.try_into().unwrap();
|
||||
let field_layout = op.layout.field(self, field)?;
|
||||
if field_layout.is_zst() {
|
||||
let immediate = Scalar::zst().into();
|
||||
@ -384,6 +383,21 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })
|
||||
}
|
||||
|
||||
pub fn operand_index(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
index: u64,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
if let Ok(index) = usize::try_from(index) {
|
||||
// We can just treat this as a field.
|
||||
self.operand_field(op, index)
|
||||
} else {
|
||||
// Indexing into a big array. This must be an mplace.
|
||||
let mplace = op.assert_mem_place(self);
|
||||
Ok(self.mplace_index(mplace, index)?.into())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn operand_downcast(
|
||||
&self,
|
||||
op: OpTy<'tcx, M::PointerTag>,
|
||||
@ -406,7 +420,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
use rustc::mir::ProjectionElem::*;
|
||||
Ok(match *proj_elem {
|
||||
Field(field, _) => self.operand_field(base, field.index() as u64)?,
|
||||
Field(field, _) => self.operand_field(base, field.index())?,
|
||||
Downcast(_, variant) => self.operand_downcast(base, variant)?,
|
||||
Deref => self.deref_operand(base)?.into(),
|
||||
Subslice { .. } | ConstantIndex { .. } | Index(_) => {
|
||||
@ -556,11 +570,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// where none should happen.
|
||||
let ptr = Pointer::new(
|
||||
self.tcx.alloc_map.lock().create_memory_alloc(data),
|
||||
Size::from_bytes(start as u64), // offset: `start`
|
||||
Size::from_bytes(start), // offset: `start`
|
||||
);
|
||||
Operand::Immediate(Immediate::new_slice(
|
||||
self.tag_global_base_pointer(ptr).into(),
|
||||
(end - start) as u64, // len: `end - start`
|
||||
u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
|
||||
self,
|
||||
))
|
||||
}
|
||||
@ -581,7 +595,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
.layout
|
||||
.ty
|
||||
.discriminant_for_variant(*self.tcx, index)
|
||||
.map_or(index.as_u32() as u128, |discr| discr.val);
|
||||
.map_or(u128::from(index.as_u32()), |discr| discr.val);
|
||||
return Ok((discr_val, index));
|
||||
}
|
||||
layout::Variants::Multiple {
|
||||
@ -593,7 +607,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
};
|
||||
|
||||
// read raw discriminant value
|
||||
let discr_op = self.operand_field(rval, discr_index as u64)?;
|
||||
let discr_op = self.operand_field(rval, discr_index)?;
|
||||
let discr_val = self.read_immediate(discr_op)?;
|
||||
let raw_discr = discr_val.to_scalar_or_undef();
|
||||
trace!("discr value: {:?}", raw_discr);
|
||||
@ -657,7 +671,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
if !ptr_valid {
|
||||
throw_ub!(InvalidDiscriminant(raw_discr.erase_tag().into()))
|
||||
}
|
||||
(dataful_variant.as_u32() as u128, dataful_variant)
|
||||
(u128::from(dataful_variant.as_u32()), dataful_variant)
|
||||
}
|
||||
Ok(raw_discr) => {
|
||||
// We need to use machine arithmetic to get the relative variant idx:
|
||||
@ -686,7 +700,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
.expect("tagged layout for non adt")
|
||||
.variants
|
||||
.len();
|
||||
assert!((variant_index as usize) < variants_len);
|
||||
assert!(usize::try_from(variant_index).unwrap() < variants_len);
|
||||
(u128::from(variant_index), VariantIdx::from_u32(variant_index))
|
||||
} else {
|
||||
(u128::from(dataful_variant.as_u32()), dataful_variant)
|
||||
|
@ -1,3 +1,5 @@
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use rustc::mir;
|
||||
use rustc::mir::interpret::{InterpResult, Scalar};
|
||||
use rustc::ty::{
|
||||
@ -130,28 +132,27 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// Shift ops can have an RHS with a different numeric type.
|
||||
if bin_op == Shl || bin_op == Shr {
|
||||
let signed = left_layout.abi.is_signed();
|
||||
let mut oflo = (r as u32 as u128) != r;
|
||||
let mut r = r as u32;
|
||||
let size = left_layout.size;
|
||||
oflo |= r >= size.bits() as u32;
|
||||
r %= size.bits() as u32;
|
||||
let size = u128::from(left_layout.size.bits());
|
||||
let overflow = r >= size;
|
||||
let r = r % size; // mask to type size
|
||||
let r = u32::try_from(r).unwrap(); // we masked so this will always fit
|
||||
let result = if signed {
|
||||
let l = self.sign_extend(l, left_layout) as i128;
|
||||
let result = match bin_op {
|
||||
Shl => l << r,
|
||||
Shr => l >> r,
|
||||
Shl => l.checked_shl(r).unwrap(),
|
||||
Shr => l.checked_shr(r).unwrap(),
|
||||
_ => bug!("it has already been checked that this is a shift op"),
|
||||
};
|
||||
result as u128
|
||||
} else {
|
||||
match bin_op {
|
||||
Shl => l << r,
|
||||
Shr => l >> r,
|
||||
Shl => l.checked_shl(r).unwrap(),
|
||||
Shr => l.checked_shr(r).unwrap(),
|
||||
_ => bug!("it has already been checked that this is a shift op"),
|
||||
}
|
||||
};
|
||||
let truncated = self.truncate(result, left_layout);
|
||||
return Ok((Scalar::from_uint(truncated, size), oflo, left_layout.ty));
|
||||
return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
|
||||
}
|
||||
|
||||
// For the remaining ops, the types must be the same on both sides
|
||||
@ -193,7 +194,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
_ => None,
|
||||
};
|
||||
if let Some(op) = op {
|
||||
let l128 = self.sign_extend(l, left_layout) as i128;
|
||||
let r = self.sign_extend(r, right_layout) as i128;
|
||||
// We need a special check for overflowing remainder:
|
||||
// "int_min % -1" overflows and returns 0, but after casting things to a larger int
|
||||
@ -206,8 +206,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
let l = self.sign_extend(l, left_layout) as i128;
|
||||
|
||||
let (result, oflo) = op(l128, r);
|
||||
let (result, oflo) = op(l, r);
|
||||
// This may be out-of-bounds for the result type, so we have to truncate ourselves.
|
||||
// If that truncation loses any information, we have an overflow.
|
||||
let result = result as u128;
|
||||
|
@ -385,43 +385,20 @@ where
|
||||
Ok(place)
|
||||
}
|
||||
|
||||
/// Offset a pointer to project to a field. Unlike `place_field`, this is always
|
||||
/// possible without allocating, so it can take `&self`. Also return the field's layout.
|
||||
/// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
|
||||
/// always possible without allocating, so it can take `&self`. Also return the field's layout.
|
||||
/// This supports both struct and array fields.
|
||||
///
|
||||
/// This also works for arrays, but then the `usize` index type is restricting.
|
||||
/// For indexing into arrays, use `mplace_index`.
|
||||
#[inline(always)]
|
||||
pub fn mplace_field(
|
||||
&self,
|
||||
base: MPlaceTy<'tcx, M::PointerTag>,
|
||||
field: u64,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
// Not using the layout method because we want to compute on u64
|
||||
let offset = match base.layout.fields {
|
||||
layout::FieldPlacement::Arbitrary { ref offsets, .. } => {
|
||||
offsets[usize::try_from(field).unwrap()]
|
||||
}
|
||||
layout::FieldPlacement::Array { stride, .. } => {
|
||||
let len = base.len(self)?;
|
||||
if field >= len {
|
||||
// This can only be reached in ConstProp and non-rustc-MIR.
|
||||
throw_ub!(BoundsCheckFailed { len, index: field });
|
||||
}
|
||||
stride * field
|
||||
}
|
||||
layout::FieldPlacement::Union(count) => {
|
||||
assert!(
|
||||
field < count as u64,
|
||||
"Tried to access field {} of union {:#?} with {} fields",
|
||||
field,
|
||||
base.layout,
|
||||
count
|
||||
);
|
||||
// Offset is always 0
|
||||
Size::from_bytes(0)
|
||||
}
|
||||
};
|
||||
// the only way conversion can fail if is this is an array (otherwise we already panicked
|
||||
// above). In that case, all fields are equal.
|
||||
let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?;
|
||||
let offset = base.layout.fields.offset(field);
|
||||
let field_layout = base.layout.field(self, field)?;
|
||||
|
||||
// Offset may need adjustment for unsized fields.
|
||||
let (meta, offset) = if field_layout.is_unsized() {
|
||||
@ -451,6 +428,32 @@ where
|
||||
base.offset(offset, meta, field_layout, self)
|
||||
}
|
||||
|
||||
/// Index into an array.
|
||||
#[inline(always)]
|
||||
pub fn mplace_index(
|
||||
&self,
|
||||
base: MPlaceTy<'tcx, M::PointerTag>,
|
||||
index: u64,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
// Not using the layout method because we want to compute on u64
|
||||
match base.layout.fields {
|
||||
layout::FieldPlacement::Array { stride, .. } => {
|
||||
let len = base.len(self)?;
|
||||
if index >= len {
|
||||
// This can only be reached in ConstProp and non-rustc-MIR.
|
||||
throw_ub!(BoundsCheckFailed { len, index });
|
||||
}
|
||||
let offset = stride * index; // `Size` multiplication
|
||||
// All fields have the same layout.
|
||||
let field_layout = base.layout.field(self, 0)?;
|
||||
|
||||
assert!(!field_layout.is_unsized());
|
||||
base.offset(offset, MemPlaceMeta::None, field_layout, self)
|
||||
}
|
||||
_ => bug!("`mplace_index` called on non-array type {:?}", base.layout.ty),
|
||||
}
|
||||
}
|
||||
|
||||
// Iterates over all fields of an array. Much more efficient than doing the
|
||||
// same by repeatedly calling `mplace_array`.
|
||||
pub(super) fn mplace_array_fields(
|
||||
@ -465,7 +468,8 @@ where
|
||||
};
|
||||
let layout = base.layout.field(self, 0)?;
|
||||
let dl = &self.tcx.data_layout;
|
||||
Ok((0..len).map(move |i| base.offset(i * stride, MemPlaceMeta::None, layout, dl)))
|
||||
// `Size` multiplication
|
||||
Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl)))
|
||||
}
|
||||
|
||||
fn mplace_subslice(
|
||||
@ -477,11 +481,11 @@ where
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
let len = base.len(self)?; // also asserts that we have a type where this makes sense
|
||||
let actual_to = if from_end {
|
||||
if from + to > len {
|
||||
if from.checked_add(to).map_or(true, |to| to > len) {
|
||||
// This can only be reached in ConstProp and non-rustc-MIR.
|
||||
throw_ub!(BoundsCheckFailed { len: len as u64, index: from as u64 + to as u64 });
|
||||
throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
|
||||
}
|
||||
len - to
|
||||
len.checked_sub(to).unwrap()
|
||||
} else {
|
||||
to
|
||||
};
|
||||
@ -489,12 +493,12 @@ where
|
||||
// Not using layout method because that works with usize, and does not work with slices
|
||||
// (that have count 0 in their layout).
|
||||
let from_offset = match base.layout.fields {
|
||||
layout::FieldPlacement::Array { stride, .. } => stride * from,
|
||||
layout::FieldPlacement::Array { stride, .. } => stride * from, // `Size` multiplication is checked
|
||||
_ => bug!("Unexpected layout of index access: {:#?}", base.layout),
|
||||
};
|
||||
|
||||
// Compute meta and new layout
|
||||
let inner_len = actual_to - from;
|
||||
let inner_len = actual_to.checked_sub(from).unwrap();
|
||||
let (meta, ty) = match base.layout.ty.kind {
|
||||
// It is not nice to match on the type, but that seems to be the only way to
|
||||
// implement this.
|
||||
@ -527,7 +531,7 @@ where
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
use rustc::mir::ProjectionElem::*;
|
||||
Ok(match *proj_elem {
|
||||
Field(field, _) => self.mplace_field(base, field.index() as u64)?,
|
||||
Field(field, _) => self.mplace_field(base, field.index())?,
|
||||
Downcast(_, variant) => self.mplace_downcast(base, variant)?,
|
||||
Deref => self.deref_operand(base.into())?,
|
||||
|
||||
@ -535,26 +539,29 @@ where
|
||||
let layout = self.layout_of(self.tcx.types.usize)?;
|
||||
let n = self.access_local(self.frame(), local, Some(layout))?;
|
||||
let n = self.read_scalar(n)?;
|
||||
let n = self.force_bits(n.not_undef()?, self.tcx.data_layout.pointer_size)?;
|
||||
self.mplace_field(base, u64::try_from(n).unwrap())?
|
||||
let n = u64::try_from(
|
||||
self.force_bits(n.not_undef()?, self.tcx.data_layout.pointer_size)?,
|
||||
)
|
||||
.unwrap();
|
||||
self.mplace_index(base, n)?
|
||||
}
|
||||
|
||||
ConstantIndex { offset, min_length, from_end } => {
|
||||
let n = base.len(self)?;
|
||||
if n < min_length as u64 {
|
||||
if n < u64::from(min_length) {
|
||||
// This can only be reached in ConstProp and non-rustc-MIR.
|
||||
throw_ub!(BoundsCheckFailed { len: min_length as u64, index: n as u64 });
|
||||
throw_ub!(BoundsCheckFailed { len: min_length.into(), index: n.into() });
|
||||
}
|
||||
|
||||
let index = if from_end {
|
||||
assert!(0 < offset && offset - 1 < min_length);
|
||||
n - u64::from(offset)
|
||||
assert!(0 < offset && offset <= min_length);
|
||||
n.checked_sub(u64::from(offset)).unwrap()
|
||||
} else {
|
||||
assert!(offset < min_length);
|
||||
u64::from(offset)
|
||||
};
|
||||
|
||||
self.mplace_field(base, index)?
|
||||
self.mplace_index(base, index)?
|
||||
}
|
||||
|
||||
Subslice { from, to, from_end } => {
|
||||
@ -570,7 +577,7 @@ where
|
||||
pub fn place_field(
|
||||
&mut self,
|
||||
base: PlaceTy<'tcx, M::PointerTag>,
|
||||
field: u64,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
|
||||
// FIXME: We could try to be smarter and avoid allocation for fields that span the
|
||||
// entire place.
|
||||
@ -578,6 +585,15 @@ where
|
||||
Ok(self.mplace_field(mplace, field)?.into())
|
||||
}
|
||||
|
||||
pub fn place_index(
|
||||
&mut self,
|
||||
base: PlaceTy<'tcx, M::PointerTag>,
|
||||
index: u64,
|
||||
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
|
||||
let mplace = self.force_allocation(base)?;
|
||||
Ok(self.mplace_index(mplace, index)?.into())
|
||||
}
|
||||
|
||||
pub fn place_downcast(
|
||||
&self,
|
||||
base: PlaceTy<'tcx, M::PointerTag>,
|
||||
@ -603,7 +619,7 @@ where
|
||||
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
|
||||
use rustc::mir::ProjectionElem::*;
|
||||
Ok(match *proj_elem {
|
||||
Field(field, _) => self.place_field(base, field.index() as u64)?,
|
||||
Field(field, _) => self.place_field(base, field.index())?,
|
||||
Downcast(_, variant) => self.place_downcast(base, variant)?,
|
||||
Deref => self.deref_operand(self.place_to_op(base)?)?.into(),
|
||||
// For the other variants, we have to force an allocation.
|
||||
@ -723,7 +739,7 @@ where
|
||||
),
|
||||
Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Raw { size, .. })) => {
|
||||
assert_eq!(
|
||||
Size::from_bytes(size.into()),
|
||||
Size::from_bytes(size),
|
||||
dest.layout.size,
|
||||
"Size mismatch when writing bits"
|
||||
)
|
||||
@ -1028,7 +1044,7 @@ where
|
||||
kind: MemoryKind<M::MemoryKind>,
|
||||
) -> MPlaceTy<'tcx, M::PointerTag> {
|
||||
let ptr = self.memory.allocate_bytes(str.as_bytes(), kind);
|
||||
let meta = Scalar::from_uint(str.len() as u128, self.pointer_size());
|
||||
let meta = Scalar::from_uint(u128::try_from(str.len()).unwrap(), self.pointer_size());
|
||||
let mplace = MemPlace {
|
||||
ptr: ptr.into(),
|
||||
align: Align::from_bytes(1).unwrap(),
|
||||
@ -1072,7 +1088,7 @@ where
|
||||
let size = discr_layout.value.size(self);
|
||||
let discr_val = truncate(discr_val, size);
|
||||
|
||||
let discr_dest = self.place_field(dest, discr_index as u64)?;
|
||||
let discr_dest = self.place_field(dest, discr_index)?;
|
||||
self.write_scalar(Scalar::from_uint(discr_val, size), discr_dest)?;
|
||||
}
|
||||
layout::Variants::Multiple {
|
||||
@ -1103,7 +1119,7 @@ where
|
||||
niche_start_val,
|
||||
)?;
|
||||
// Write result.
|
||||
let niche_dest = self.place_field(dest, discr_index as u64)?;
|
||||
let niche_dest = self.place_field(dest, discr_index)?;
|
||||
self.write_immediate(*discr_val, niche_dest)?;
|
||||
}
|
||||
}
|
||||
|
@ -192,7 +192,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// Ignore zero-sized fields.
|
||||
if !op.layout.is_zst() {
|
||||
let field_index = active_field_index.unwrap_or(i);
|
||||
let field_dest = self.place_field(dest, field_index as u64)?;
|
||||
let field_dest = self.place_field(dest, field_index)?;
|
||||
self.copy_op(op, field_dest)?;
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
use std::borrow::Cow;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use rustc::ty::layout::{self, LayoutOf, TyLayout};
|
||||
use rustc::ty::Instance;
|
||||
@ -29,6 +30,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
trace!("SwitchInt({:?})", *discr);
|
||||
|
||||
// Branch to the `otherwise` case by default, if no match is found.
|
||||
assert!(targets.len() > 0);
|
||||
let mut target_block = targets[targets.len() - 1];
|
||||
|
||||
for (index, &const_int) in values.iter().enumerate() {
|
||||
@ -307,7 +309,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
.map(|&a| Ok(a))
|
||||
.chain(
|
||||
(0..untuple_arg.layout.fields.count())
|
||||
.map(|i| self.operand_field(untuple_arg, i as u64)),
|
||||
.map(|i| self.operand_field(untuple_arg, i)),
|
||||
)
|
||||
.collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>(
|
||||
)?,
|
||||
@ -330,7 +332,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
if Some(local) == body.spread_arg {
|
||||
// Must be a tuple
|
||||
for i in 0..dest.layout.fields.count() {
|
||||
let dest = self.place_field(dest, i as u64)?;
|
||||
let dest = self.place_field(dest, i)?;
|
||||
self.pass_argument(rust_abi, &mut caller_iter, dest)?;
|
||||
}
|
||||
} else {
|
||||
@ -392,7 +394,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
};
|
||||
// Find and consult vtable
|
||||
let vtable = receiver_place.vtable();
|
||||
let drop_fn = self.get_vtable_slot(vtable, idx)?;
|
||||
let drop_fn = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
|
||||
|
||||
// `*mut receiver_place.layout.ty` is almost the layout that we
|
||||
// want for args[0]: We have to project to field 0 because we want
|
||||
|
@ -1,9 +1,11 @@
|
||||
use super::{FnVal, InterpCx, Machine, MemoryKind};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use rustc::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar};
|
||||
use rustc::ty::layout::{Align, HasDataLayout, LayoutOf, Size};
|
||||
use rustc::ty::{self, Instance, Ty, TypeFoldable};
|
||||
|
||||
use super::{FnVal, InterpCx, Machine, MemoryKind};
|
||||
|
||||
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for
|
||||
/// objects.
|
||||
@ -54,7 +56,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// `get_vtable` in `rust_codegen_llvm/meth.rs`.
|
||||
// /////////////////////////////////////////////////////////////////////////////////////////
|
||||
let vtable = self.memory.allocate(
|
||||
ptr_size * (3 + methods.len() as u64),
|
||||
ptr_size * u64::try_from(methods.len()).unwrap().checked_add(3).unwrap(),
|
||||
ptr_align,
|
||||
MemoryKind::Vtable,
|
||||
);
|
||||
@ -103,11 +105,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn get_vtable_slot(
|
||||
&self,
|
||||
vtable: Scalar<M::PointerTag>,
|
||||
idx: usize,
|
||||
idx: u64,
|
||||
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
|
||||
let ptr_size = self.pointer_size();
|
||||
// Skip over the 'drop_ptr', 'size', and 'align' fields.
|
||||
let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?;
|
||||
let vtable_slot = vtable.ptr_offset(ptr_size * idx.checked_add(3).unwrap(), self)?;
|
||||
let vtable_slot = self
|
||||
.memory
|
||||
.check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
|
||||
@ -169,10 +171,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
.expect("cannot be a ZST");
|
||||
let alloc = self.memory.get_raw(vtable.alloc_id)?;
|
||||
let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)?.not_undef()?;
|
||||
let size = self.force_bits(size, pointer_size)? as u64;
|
||||
let size = u64::try_from(self.force_bits(size, pointer_size)?).unwrap();
|
||||
let align =
|
||||
alloc.read_ptr_sized(self, vtable.offset(pointer_size * 2, self)?)?.not_undef()?;
|
||||
let align = self.force_bits(align, pointer_size)? as u64;
|
||||
let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap();
|
||||
|
||||
if size >= self.tcx.data_layout().obj_size_bound() {
|
||||
throw_ub_format!(
|
||||
|
@ -4,6 +4,7 @@
|
||||
//! That's useful because it means other passes (e.g. promotion) can rely on `const`s
|
||||
//! to be const-safe.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt::Write;
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
@ -746,7 +747,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
}
|
||||
// This is the element type size.
|
||||
let layout = self.ecx.layout_of(tys)?;
|
||||
// This is the size in bytes of the whole array.
|
||||
// This is the size in bytes of the whole array. (This checks for overflow.)
|
||||
let size = layout.size * len;
|
||||
// Size is not 0, get a pointer.
|
||||
let ptr = self.ecx.force_ptr(mplace.ptr)?;
|
||||
@ -777,7 +778,8 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
// Some byte was undefined, determine which
|
||||
// element that byte belongs to so we can
|
||||
// provide an index.
|
||||
let i = (ptr.offset.bytes() / layout.size.bytes()) as usize;
|
||||
let i = usize::try_from(ptr.offset.bytes() / layout.size.bytes())
|
||||
.unwrap();
|
||||
self.path.push(PathElem::ArrayElem(i));
|
||||
|
||||
throw_validation_failure!("undefined bytes", self.path)
|
||||
|
@ -28,7 +28,8 @@ pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy {
|
||||
) -> InterpResult<'tcx, Self>;
|
||||
|
||||
/// Projects to the n-th field.
|
||||
fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: u64) -> InterpResult<'tcx, Self>;
|
||||
fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize)
|
||||
-> InterpResult<'tcx, Self>;
|
||||
}
|
||||
|
||||
// Operands and memory-places are both values.
|
||||
@ -62,7 +63,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: u64) -> InterpResult<'tcx, Self> {
|
||||
fn project_field(
|
||||
self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
ecx.operand_field(self, field)
|
||||
}
|
||||
}
|
||||
@ -96,7 +101,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for MPlaceTy<'tcx,
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: u64) -> InterpResult<'tcx, Self> {
|
||||
fn project_field(
|
||||
self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
ecx.mplace_field(self, field)
|
||||
}
|
||||
}
|
||||
@ -206,7 +215,7 @@ macro_rules! make_value_visitor {
|
||||
// errors: Projecting to a field needs access to `ecx`.
|
||||
let fields: Vec<InterpResult<'tcx, Self::V>> =
|
||||
(0..offsets.len()).map(|i| {
|
||||
v.project_field(self.ecx(), i as u64)
|
||||
v.project_field(self.ecx(), i)
|
||||
})
|
||||
.collect();
|
||||
self.visit_aggregate(v, fields.into_iter())?;
|
||||
|
@ -1920,8 +1920,8 @@ fn slice_pat_covered_by_const<'tcx>(
|
||||
}
|
||||
(ConstValue::Slice { data, start, end }, ty::Slice(t)) => {
|
||||
assert_eq!(*t, tcx.types.u8);
|
||||
let ptr = Pointer::new(AllocId(0), Size::from_bytes(start as u64));
|
||||
data.get_bytes(&tcx, ptr, Size::from_bytes((end - start) as u64)).unwrap()
|
||||
let ptr = Pointer::new(AllocId(0), Size::from_bytes(start));
|
||||
data.get_bytes(&tcx, ptr, Size::from_bytes(end - start)).unwrap()
|
||||
}
|
||||
// FIXME(oli-obk): create a way to extract fat pointers from ByRef
|
||||
(_, ty::Slice(_)) => return Ok(false),
|
||||
@ -2375,7 +2375,7 @@ fn specialize_one_pattern<'p, 'tcx>(
|
||||
ty::Slice(t) => {
|
||||
match value.val {
|
||||
ty::ConstKind::Value(ConstValue::Slice { data, start, end }) => {
|
||||
let offset = Size::from_bytes(start as u64);
|
||||
let offset = Size::from_bytes(start);
|
||||
let n = (end - start) as u64;
|
||||
(Cow::Borrowed(data), offset, n, t)
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ pub struct PerfStats {
|
||||
/// Total number of values canonicalized queries constructed.
|
||||
pub queries_canonicalized: AtomicUsize,
|
||||
/// Number of times this query is invoked.
|
||||
pub normalize_ty_after_erasing_regions: AtomicUsize,
|
||||
pub normalize_generic_arg_after_erasing_regions: AtomicUsize,
|
||||
/// Number of times this query is invoked.
|
||||
pub normalize_projection_ty: AtomicUsize,
|
||||
}
|
||||
@ -707,8 +707,8 @@ impl Session {
|
||||
self.perf_stats.queries_canonicalized.load(Ordering::Relaxed)
|
||||
);
|
||||
println!(
|
||||
"normalize_ty_after_erasing_regions: {}",
|
||||
self.perf_stats.normalize_ty_after_erasing_regions.load(Ordering::Relaxed)
|
||||
"normalize_generic_arg_after_erasing_regions: {}",
|
||||
self.perf_stats.normalize_generic_arg_after_erasing_regions.load(Ordering::Relaxed)
|
||||
);
|
||||
println!(
|
||||
"normalize_projection_ty: {}",
|
||||
@ -1080,7 +1080,7 @@ fn build_session_(
|
||||
symbol_hash_time: Lock::new(Duration::from_secs(0)),
|
||||
decode_def_path_tables_time: Lock::new(Duration::from_secs(0)),
|
||||
queries_canonicalized: AtomicUsize::new(0),
|
||||
normalize_ty_after_erasing_regions: AtomicUsize::new(0),
|
||||
normalize_generic_arg_after_erasing_regions: AtomicUsize::new(0),
|
||||
normalize_projection_ty: AtomicUsize::new(0),
|
||||
},
|
||||
code_stats: Default::default(),
|
||||
|
@ -3,6 +3,7 @@ pub use Primitive::*;
|
||||
|
||||
use crate::spec::Target;
|
||||
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::ops::{Add, AddAssign, Deref, Mul, Range, RangeInclusive, Sub};
|
||||
|
||||
use rustc_index::vec::{Idx, IndexVec};
|
||||
@ -240,17 +241,18 @@ pub struct Size {
|
||||
}
|
||||
|
||||
impl Size {
|
||||
pub const ZERO: Size = Self::from_bytes(0);
|
||||
pub const ZERO: Size = Size { raw: 0 };
|
||||
|
||||
#[inline]
|
||||
pub fn from_bits(bits: u64) -> Size {
|
||||
pub fn from_bits(bits: impl TryInto<u64>) -> Size {
|
||||
let bits = bits.try_into().ok().unwrap();
|
||||
// Avoid potential overflow from `bits + 7`.
|
||||
Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub const fn from_bytes(bytes: u64) -> Size {
|
||||
Size { raw: bytes }
|
||||
pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
|
||||
Size { raw: bytes.try_into().ok().unwrap() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -258,6 +260,11 @@ impl Size {
|
||||
self.raw
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn bytes_usize(self) -> usize {
|
||||
self.bytes().try_into().unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn bits(self) -> u64 {
|
||||
self.bytes().checked_mul(8).unwrap_or_else(|| {
|
||||
@ -265,6 +272,11 @@ impl Size {
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn bits_usize(self) -> usize {
|
||||
self.bits().try_into().unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn align_to(self, align: Align) -> Size {
|
||||
let mask = align.bytes() - 1;
|
||||
@ -665,7 +677,7 @@ impl FieldPlacement {
|
||||
Size::ZERO
|
||||
}
|
||||
FieldPlacement::Array { stride, count } => {
|
||||
let i = i as u64;
|
||||
let i = u64::try_from(i).unwrap();
|
||||
assert!(i < count);
|
||||
stride * i
|
||||
}
|
||||
|
@ -387,6 +387,7 @@ impl<'a, 'b, 'tcx> TypeFolder<'tcx> for AssocTypeNormalizer<'a, 'b, 'tcx> {
|
||||
}
|
||||
|
||||
fn fold_const(&mut self, constant: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> {
|
||||
let constant = constant.super_fold_with(self);
|
||||
constant.eval(self.selcx.tcx(), self.param_env)
|
||||
}
|
||||
}
|
||||
|
@ -191,6 +191,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> {
|
||||
}
|
||||
|
||||
fn fold_const(&mut self, constant: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> {
|
||||
let constant = constant.super_fold_with(self);
|
||||
constant.eval(self.infcx.tcx, self.param_env)
|
||||
}
|
||||
}
|
||||
|
@ -1,23 +1,24 @@
|
||||
use rustc::traits::query::NoSolution;
|
||||
use rustc::ty::query::Providers;
|
||||
use rustc::ty::{self, ParamEnvAnd, Ty, TyCtxt};
|
||||
use rustc::ty::subst::GenericArg;
|
||||
use rustc::ty::{self, ParamEnvAnd, TyCtxt};
|
||||
use rustc_infer::infer::TyCtxtInferExt;
|
||||
use rustc_trait_selection::traits::query::normalize::AtExt;
|
||||
use rustc_trait_selection::traits::{Normalized, ObligationCause};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
crate fn provide(p: &mut Providers<'_>) {
|
||||
*p = Providers { normalize_ty_after_erasing_regions, ..*p };
|
||||
*p = Providers { normalize_generic_arg_after_erasing_regions, ..*p };
|
||||
}
|
||||
|
||||
fn normalize_ty_after_erasing_regions<'tcx>(
|
||||
fn normalize_generic_arg_after_erasing_regions<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
goal: ParamEnvAnd<'tcx, Ty<'tcx>>,
|
||||
) -> Ty<'tcx> {
|
||||
debug!("normalize_ty_after_erasing_regions(goal={:#?})", goal);
|
||||
goal: ParamEnvAnd<'tcx, GenericArg<'tcx>>,
|
||||
) -> GenericArg<'tcx> {
|
||||
debug!("normalize_generic_arg_after_erasing_regions(goal={:#?})", goal);
|
||||
|
||||
let ParamEnvAnd { param_env, value } = goal;
|
||||
tcx.sess.perf_stats.normalize_ty_after_erasing_regions.fetch_add(1, Ordering::Relaxed);
|
||||
tcx.sess.perf_stats.normalize_generic_arg_after_erasing_regions.fetch_add(1, Ordering::Relaxed);
|
||||
tcx.infer_ctxt().enter(|infcx| {
|
||||
let cause = ObligationCause::dummy();
|
||||
match infcx.at(&cause, param_env).normalize(&value) {
|
||||
|
@ -1,6 +1,7 @@
|
||||
// ignore-tidy-linelength
|
||||
// ignore-wasm32-bare compiled with panic=abort by default
|
||||
// compile-flags: -Z mir-opt-level=3
|
||||
// only-64bit FIXME: the mir representation of RawVec depends on ptr size
|
||||
#![feature(box_syntax)]
|
||||
|
||||
fn main() {
|
||||
@ -55,7 +56,7 @@ fn main() {
|
||||
// StorageLive(_2);
|
||||
// _2 = Box(std::vec::Vec<u32>);
|
||||
// _4 = &mut (*_2);
|
||||
// ((*_4).0: alloc::raw_vec::RawVec<u32>) = const alloc::raw_vec::RawVec::<u32>::NEW;
|
||||
// ((*_4).0: alloc::raw_vec::RawVec<u32>) = const ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), undef_mask: UndefMask { blocks: [65535], len: Size { raw: 16 } }, size: Size { raw: 16 }, align: Align { pow2: 3 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }: alloc::raw_vec::RawVec::<u32>;
|
||||
// ((*_4).1: usize) = const 0usize;
|
||||
// _1 = move _2;
|
||||
// StorageDead(_2);
|
||||
|
@ -1,9 +1,9 @@
|
||||
// build-fail
|
||||
//~^ ERROR cycle detected when normalizing `<() as Tr>::A`
|
||||
|
||||
// Cyclic assoc. const defaults don't error unless *used*
|
||||
trait Tr {
|
||||
const A: u8 = Self::B;
|
||||
//~^ ERROR cycle detected when const-evaluating + checking `Tr::A`
|
||||
|
||||
const B: u8 = Self::A;
|
||||
}
|
||||
|
@ -1,30 +1,42 @@
|
||||
error[E0391]: cycle detected when const-evaluating + checking `Tr::A`
|
||||
--> $DIR/defaults-cyclic-fail.rs:5:5
|
||||
error[E0391]: cycle detected when normalizing `<() as Tr>::A`
|
||||
|
|
||||
note: ...which requires const-evaluating + checking `Tr::A`...
|
||||
--> $DIR/defaults-cyclic-fail.rs:6:5
|
||||
|
|
||||
LL | const A: u8 = Self::B;
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
note: ...which requires const-evaluating `Tr::A`...
|
||||
--> $DIR/defaults-cyclic-fail.rs:5:19
|
||||
note: ...which requires const-evaluating + checking `Tr::A`...
|
||||
--> $DIR/defaults-cyclic-fail.rs:6:5
|
||||
|
|
||||
LL | const A: u8 = Self::B;
|
||||
| ^^^^^^^
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
note: ...which requires const-evaluating `Tr::A`...
|
||||
--> $DIR/defaults-cyclic-fail.rs:6:5
|
||||
|
|
||||
LL | const A: u8 = Self::B;
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
= note: ...which requires normalizing `<() as Tr>::B`...
|
||||
note: ...which requires const-evaluating + checking `Tr::B`...
|
||||
--> $DIR/defaults-cyclic-fail.rs:8:5
|
||||
|
|
||||
LL | const B: u8 = Self::A;
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
note: ...which requires const-evaluating + checking `Tr::B`...
|
||||
--> $DIR/defaults-cyclic-fail.rs:8:5
|
||||
|
|
||||
LL | const B: u8 = Self::A;
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
note: ...which requires const-evaluating `Tr::B`...
|
||||
--> $DIR/defaults-cyclic-fail.rs:8:19
|
||||
--> $DIR/defaults-cyclic-fail.rs:8:5
|
||||
|
|
||||
LL | const B: u8 = Self::A;
|
||||
| ^^^^^^^
|
||||
= note: ...which again requires const-evaluating + checking `Tr::A`, completing the cycle
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
= note: ...which again requires normalizing `<() as Tr>::A`, completing the cycle
|
||||
note: cycle used when const-evaluating `main`
|
||||
--> $DIR/defaults-cyclic-fail.rs:16:16
|
||||
--> $DIR/defaults-cyclic-fail.rs:14:1
|
||||
|
|
||||
LL | assert_eq!(<() as Tr>::A, 0);
|
||||
| ^^^^^^^^^^^^^
|
||||
LL | fn main() {
|
||||
| ^^^^^^^^^
|
||||
|
||||
error: aborting due to previous error
|
||||
|
||||
|
@ -25,7 +25,7 @@ note: ...which requires const-evaluating + checking `std::intrinsics::size_of`..
|
||||
LL | pub fn size_of<T>() -> usize;
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
= note: ...which requires computing layout of `Foo`...
|
||||
= note: ...which requires normalizing `ParamEnvAnd { param_env: ParamEnv { caller_bounds: [], reveal: All, def_id: None }, value: [u8; _] }`...
|
||||
= note: ...which requires normalizing `[u8; _]`...
|
||||
= note: ...which again requires const-evaluating + checking `Foo::bytes::{{constant}}#0`, completing the cycle
|
||||
note: cycle used when processing `Foo`
|
||||
--> $DIR/const-size_of-cycle.rs:7:1
|
||||
|
@ -18,3 +18,4 @@ LL | #[warn(foo::bar)]
|
||||
|
||||
error: aborting due to 3 previous errors
|
||||
|
||||
For more information about this error, try `rustc --explain E0710`.
|
||||
|
@ -36,3 +36,4 @@ LL | #[allow(foo::bar)]
|
||||
|
||||
error: aborting due to 6 previous errors
|
||||
|
||||
For more information about this error, try `rustc --explain E0710`.
|
||||
|
Loading…
Reference in New Issue
Block a user