Auto merge of #45002 - oli-obk:miri, r=eddyb

Validate miri against the HIR const evaluator

r? @eddyb

cc @alexcrichton @arielb1 @RalfJung

The interesting parts are the last few functions in `librustc_const_eval/eval.rs`

* We warn if miri produces an error while HIR const eval does not.
* We warn if miri produces a value that does not match the value produced by HIR const eval
* if miri succeeds and HIR const eval fails, nothing is emitted, but we still return the HIR error
* if both error, nothing is emitted and the HIR const eval error is returned

So there are no actual changes, except that miri is forced to produce the same values as the old const eval.

* This does **not** touch the const evaluator in trans at all. That will come in a future PR.
* This does **not** cause any code to compile that didn't compile before. That will also come in the future

It would be great if someone could start a crater run if travis passes
This commit is contained in:
bors 2017-12-14 15:37:39 +00:00
commit 2974104276
40 changed files with 6548 additions and 59 deletions

View File

@ -294,6 +294,7 @@ before_deploy:
cp -r obj/build/dist/* deploy/$TRAVIS_COMMIT;
fi
- travis_retry gem update --system
- ls -la deploy/$TRAVIS_COMMIT
deploy:
- provider: s3

21
src/Cargo.lock generated
View File

@ -164,6 +164,11 @@ dependencies = [
"filetime 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "byteorder"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cargo"
version = "0.25.0"
@ -1020,6 +1025,14 @@ name = "log"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "log_settings"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "lzma-sys"
version = "0.1.9"
@ -1598,13 +1611,16 @@ name = "rustc"
version = "0.0.0"
dependencies = [
"arena 0.0.0",
"backtrace 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)",
"fmt_macros 0.0.0",
"graphviz 0.0.0",
"jobserver 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_apfloat 0.0.0",
"rustc_back 0.0.0",
"rustc_const_math 0.0.0",
"rustc_data_structures 0.0.0",
@ -1844,9 +1860,12 @@ name = "rustc_mir"
version = "0.0.0"
dependencies = [
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"graphviz 0.0.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc_apfloat 0.0.0",
"rustc_const_eval 0.0.0",
"rustc_const_math 0.0.0",
"rustc_data_structures 0.0.0",
@ -2679,6 +2698,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"
"checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf"
"checksum bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f2f382711e76b9de6c744cc00d0497baba02fb00a787f088c879f01d09468e32"
"checksum byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff81738b726f5d099632ceaffe7fb65b90212e8dce59d518729e7e8634032d3d"
"checksum cargo_metadata 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "be1057b8462184f634c3a208ee35b0f935cfd94b694b26deadccd98732088d7b"
"checksum cargo_metadata 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1f56ec3e469bca7c276f2eea015aa05c5e381356febdbb0683c2580189604537"
"checksum cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a9b13a57efd6b30ecd6598ebdb302cca617930b5470647570468a65d12ef9719"
@ -2749,6 +2769,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0db4ec23611747ef772db1c4d650f8bd762f07b461727ec998f953c614024b75"
"checksum libz-sys 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)" = "87f737ad6cc6fd6eefe3d9dc5412f1573865bded441300904d2f42269e140f16"
"checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b"
"checksum log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3d382732ea0fbc09790c4899db3255bdea0fc78b54bf234bd18a63bb603915b6"
"checksum lzma-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c1b93b78f89e8737dac81837fc8f5521ac162abcba902e1a3db949d55346d1da"
"checksum mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4"
"checksum markup5ever 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "047150a0e03b57e638fc45af33a0b63a0362305d5b9f92ecef81df472a4cceb0"

View File

@ -246,6 +246,9 @@ fn main() {
// When running miri tests, we need to generate MIR for all libraries
if env::var("TEST_MIRI").ok().map_or(false, |val| val == "true") {
cmd.arg("-Zalways-encode-mir");
if stage != "0" {
cmd.arg("-Zmiri");
}
cmd.arg("-Zmir-emit-validate=1");
}

View File

@ -499,9 +499,10 @@ impl<'a> Builder<'a> {
if mode != Mode::Tool {
// Tools don't get debuginfo right now, e.g. cargo and rls don't
// get compiled with debuginfo.
cargo.env("RUSTC_DEBUGINFO", self.config.rust_debuginfo.to_string())
.env("RUSTC_DEBUGINFO_LINES", self.config.rust_debuginfo_lines.to_string())
.env("RUSTC_FORCE_UNSTABLE", "1");
// Adding debuginfo increases their sizes by a factor of 3-4.
cargo.env("RUSTC_DEBUGINFO", self.config.rust_debuginfo.to_string());
cargo.env("RUSTC_DEBUGINFO_LINES", self.config.rust_debuginfo_lines.to_string());
cargo.env("RUSTC_FORCE_UNSTABLE", "1");
// Currently the compiler depends on crates from crates.io, and
// then other crates can depend on the compiler (e.g. proc-macro

View File

@ -321,6 +321,7 @@ impl Step for Rustfmt {
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Miri {
stage: u32,
host: Interned<String>,
}
@ -336,6 +337,7 @@ impl Step for Miri {
fn make_run(run: RunConfig) {
run.builder.ensure(Miri {
stage: run.builder.top_stage,
host: run.target,
});
}
@ -343,8 +345,9 @@ impl Step for Miri {
/// Runs `cargo test` for miri.
fn run(self, builder: &Builder) {
let build = builder.build;
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(1, host);
let compiler = builder.compiler(stage, host);
if let Some(miri) = builder.ensure(tool::Miri { compiler, target: self.host }) {
let mut cargo = builder.cargo(compiler, Mode::Tool, host, "test");
@ -766,6 +769,7 @@ impl Step for Compiletest {
if build.config.rust_debuginfo_tests {
flags.push("-g".to_string());
}
flags.push("-Zmiri -Zunstable-options".to_string());
if let Some(linker) = build.linker(target) {
cmd.arg("--linker").arg(linker);

View File

@ -16,6 +16,7 @@ graphviz = { path = "../libgraphviz" }
jobserver = "0.1"
log = "0.3"
owning_ref = "0.3.3"
rustc_apfloat = { path = "../librustc_apfloat" }
rustc_back = { path = "../librustc_back" }
rustc_const_math = { path = "../librustc_const_math" }
rustc_data_structures = { path = "../librustc_data_structures" }
@ -23,6 +24,9 @@ rustc_errors = { path = "../librustc_errors" }
serialize = { path = "../libserialize" }
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
backtrace = "0.3.3"
byteorder = { version = "1.1", features = ["i128"]}
# Note that these dependencies are a lie, they're just here to get linkage to
# work.

View File

@ -64,6 +64,7 @@
#![feature(unboxed_closures)]
#![feature(underscore_lifetimes)]
#![feature(trace_macros)]
#![feature(catch_expr)]
#![feature(test)]
#![recursion_limit="512"]
@ -89,6 +90,10 @@ extern crate jobserver;
extern crate serialize as rustc_serialize; // used by deriving
extern crate rustc_apfloat;
extern crate byteorder;
extern crate backtrace;
// Note that librustc doesn't actually depend on these crates, see the note in
// `Cargo.toml` for this crate about why these are here.
#[allow(unused_extern_crates)]

View File

@ -0,0 +1,322 @@
use std::error::Error;
use std::{fmt, env};
use mir;
use ty::{FnSig, Ty, layout};
use super::{
MemoryPointer, Lock, AccessKind
};
use rustc_const_math::ConstMathErr;
use syntax::codemap::Span;
use backtrace::Backtrace;
#[derive(Debug)]
pub struct EvalError<'tcx> {
pub kind: EvalErrorKind<'tcx>,
pub backtrace: Option<Backtrace>,
}
impl<'tcx> From<EvalErrorKind<'tcx>> for EvalError<'tcx> {
fn from(kind: EvalErrorKind<'tcx>) -> Self {
let backtrace = match env::var("RUST_BACKTRACE") {
Ok(ref val) if !val.is_empty() => Some(Backtrace::new_unresolved()),
_ => None
};
EvalError {
kind,
backtrace,
}
}
}
#[derive(Debug)]
pub enum EvalErrorKind<'tcx> {
/// This variant is used by machines to signal their own errors that do not
/// match an existing variant
MachineError(Box<Error>),
FunctionPointerTyMismatch(FnSig<'tcx>, FnSig<'tcx>),
NoMirFor(String),
UnterminatedCString(MemoryPointer),
DanglingPointerDeref,
DoubleFree,
InvalidMemoryAccess,
InvalidFunctionPointer,
InvalidBool,
InvalidDiscriminant,
PointerOutOfBounds {
ptr: MemoryPointer,
access: bool,
allocation_size: u64,
},
InvalidNullPointerUsage,
ReadPointerAsBytes,
ReadBytesAsPointer,
InvalidPointerMath,
ReadUndefBytes,
DeadLocal,
InvalidBoolOp(mir::BinOp),
Unimplemented(String),
DerefFunctionPointer,
ExecuteMemory,
ArrayIndexOutOfBounds(Span, u64, u64),
Math(Span, ConstMathErr),
Intrinsic(String),
OverflowingMath,
InvalidChar(u128),
OutOfMemory {
allocation_size: u64,
memory_size: u64,
memory_usage: u64,
},
ExecutionTimeLimitReached,
StackFrameLimitReached,
OutOfTls,
TlsOutOfBounds,
AbiViolation(String),
AlignmentCheckFailed {
required: u64,
has: u64,
},
MemoryLockViolation {
ptr: MemoryPointer,
len: u64,
frame: usize,
access: AccessKind,
lock: Lock,
},
MemoryAcquireConflict {
ptr: MemoryPointer,
len: u64,
kind: AccessKind,
lock: Lock,
},
InvalidMemoryLockRelease {
ptr: MemoryPointer,
len: u64,
frame: usize,
lock: Lock,
},
DeallocatedLockedMemory {
ptr: MemoryPointer,
lock: Lock,
},
ValidationFailure(String),
CalledClosureAsFunction,
VtableForArgumentlessMethod,
ModifiedConstantMemory,
AssumptionNotHeld,
InlineAsm,
TypeNotPrimitive(Ty<'tcx>),
ReallocatedWrongMemoryKind(String, String),
DeallocatedWrongMemoryKind(String, String),
ReallocateNonBasePtr,
DeallocateNonBasePtr,
IncorrectAllocationInformation(u64, usize, u64, u64),
Layout(layout::LayoutError<'tcx>),
HeapAllocZeroBytes,
HeapAllocNonPowerOfTwoAlignment(u64),
Unreachable,
Panic,
ReadFromReturnPointer,
PathNotFound(Vec<String>),
UnimplementedTraitSelection,
/// Abort in case type errors are reached
TypeckError,
}
pub type EvalResult<'tcx, T = ()> = Result<T, EvalError<'tcx>>;
impl<'tcx> Error for EvalError<'tcx> {
fn description(&self) -> &str {
use self::EvalErrorKind::*;
match self.kind {
MachineError(ref inner) => inner.description(),
FunctionPointerTyMismatch(..) =>
"tried to call a function through a function pointer of a different type",
InvalidMemoryAccess =>
"tried to access memory through an invalid pointer",
DanglingPointerDeref =>
"dangling pointer was dereferenced",
DoubleFree =>
"tried to deallocate dangling pointer",
InvalidFunctionPointer =>
"tried to use a function pointer after offsetting it",
InvalidBool =>
"invalid boolean value read",
InvalidDiscriminant =>
"invalid enum discriminant value read",
PointerOutOfBounds { .. } =>
"pointer offset outside bounds of allocation",
InvalidNullPointerUsage =>
"invalid use of NULL pointer",
MemoryLockViolation { .. } =>
"memory access conflicts with lock",
MemoryAcquireConflict { .. } =>
"new memory lock conflicts with existing lock",
ValidationFailure(..) =>
"type validation failed",
InvalidMemoryLockRelease { .. } =>
"invalid attempt to release write lock",
DeallocatedLockedMemory { .. } =>
"tried to deallocate memory in conflict with a lock",
ReadPointerAsBytes =>
"a raw memory access tried to access part of a pointer value as raw bytes",
ReadBytesAsPointer =>
"a memory access tried to interpret some bytes as a pointer",
InvalidPointerMath =>
"attempted to do invalid arithmetic on pointers that would leak base addresses, e.g. comparing pointers into different allocations",
ReadUndefBytes =>
"attempted to read undefined bytes",
DeadLocal =>
"tried to access a dead local variable",
InvalidBoolOp(_) =>
"invalid boolean operation",
Unimplemented(ref msg) => msg,
DerefFunctionPointer =>
"tried to dereference a function pointer",
ExecuteMemory =>
"tried to treat a memory pointer as a function pointer",
ArrayIndexOutOfBounds(..) =>
"array index out of bounds",
Math(..) =>
"mathematical operation failed",
Intrinsic(..) =>
"intrinsic failed",
OverflowingMath =>
"attempted to do overflowing math",
NoMirFor(..) =>
"mir not found",
InvalidChar(..) =>
"tried to interpret an invalid 32-bit value as a char",
OutOfMemory{..} =>
"could not allocate more memory",
ExecutionTimeLimitReached =>
"reached the configured maximum execution time",
StackFrameLimitReached =>
"reached the configured maximum number of stack frames",
OutOfTls =>
"reached the maximum number of representable TLS keys",
TlsOutOfBounds =>
"accessed an invalid (unallocated) TLS key",
AbiViolation(ref msg) => msg,
AlignmentCheckFailed{..} =>
"tried to execute a misaligned read or write",
CalledClosureAsFunction =>
"tried to call a closure through a function pointer",
VtableForArgumentlessMethod =>
"tried to call a vtable function without arguments",
ModifiedConstantMemory =>
"tried to modify constant memory",
AssumptionNotHeld =>
"`assume` argument was false",
InlineAsm =>
"miri does not support inline assembly",
TypeNotPrimitive(_) =>
"expected primitive type, got nonprimitive",
ReallocatedWrongMemoryKind(_, _) =>
"tried to reallocate memory from one kind to another",
DeallocatedWrongMemoryKind(_, _) =>
"tried to deallocate memory of the wrong kind",
ReallocateNonBasePtr =>
"tried to reallocate with a pointer not to the beginning of an existing object",
DeallocateNonBasePtr =>
"tried to deallocate with a pointer not to the beginning of an existing object",
IncorrectAllocationInformation(..) =>
"tried to deallocate or reallocate using incorrect alignment or size",
Layout(_) =>
"rustc layout computation failed",
UnterminatedCString(_) =>
"attempted to get length of a null terminated string, but no null found before end of allocation",
HeapAllocZeroBytes =>
"tried to re-, de- or allocate zero bytes on the heap",
HeapAllocNonPowerOfTwoAlignment(_) =>
"tried to re-, de-, or allocate heap memory with alignment that is not a power of two",
Unreachable =>
"entered unreachable code",
Panic =>
"the evaluated program panicked",
ReadFromReturnPointer =>
"tried to read from the return pointer",
EvalErrorKind::PathNotFound(_) =>
"a path could not be resolved, maybe the crate is not loaded",
UnimplementedTraitSelection =>
"there were unresolved type arguments during trait selection",
TypeckError =>
"encountered constants with type errors, stopping evaluation",
}
}
fn cause(&self) -> Option<&Error> {
use self::EvalErrorKind::*;
match self.kind {
MachineError(ref inner) => Some(&**inner),
_ => None,
}
}
}
impl<'tcx> fmt::Display for EvalError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::EvalErrorKind::*;
match self.kind {
PointerOutOfBounds { ptr, access, allocation_size } => {
write!(f, "{} at offset {}, outside bounds of allocation {} which has size {}",
if access { "memory access" } else { "pointer computed" },
ptr.offset, ptr.alloc_id, allocation_size)
},
MemoryLockViolation { ptr, len, frame, access, ref lock } => {
write!(f, "{:?} access by frame {} at {:?}, size {}, is in conflict with lock {:?}",
access, frame, ptr, len, lock)
}
MemoryAcquireConflict { ptr, len, kind, ref lock } => {
write!(f, "new {:?} lock at {:?}, size {}, is in conflict with lock {:?}",
kind, ptr, len, lock)
}
InvalidMemoryLockRelease { ptr, len, frame, ref lock } => {
write!(f, "frame {} tried to release memory write lock at {:?}, size {}, but cannot release lock {:?}",
frame, ptr, len, lock)
}
DeallocatedLockedMemory { ptr, ref lock } => {
write!(f, "tried to deallocate memory at {:?} in conflict with lock {:?}",
ptr, lock)
}
ValidationFailure(ref err) => {
write!(f, "type validation failed: {}", err)
}
NoMirFor(ref func) => write!(f, "no mir for `{}`", func),
FunctionPointerTyMismatch(sig, got) =>
write!(f, "tried to call a function with sig {} through a function pointer of type {}", sig, got),
ArrayIndexOutOfBounds(span, len, index) =>
write!(f, "index out of bounds: the len is {} but the index is {} at {:?}", len, index, span),
ReallocatedWrongMemoryKind(ref old, ref new) =>
write!(f, "tried to reallocate memory from {} to {}", old, new),
DeallocatedWrongMemoryKind(ref old, ref new) =>
write!(f, "tried to deallocate {} memory but gave {} as the kind", old, new),
Math(span, ref err) =>
write!(f, "{:?} at {:?}", err, span),
Intrinsic(ref err) =>
write!(f, "{}", err),
InvalidChar(c) =>
write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c),
OutOfMemory { allocation_size, memory_size, memory_usage } =>
write!(f, "tried to allocate {} more bytes, but only {} bytes are free of the {} byte memory",
allocation_size, memory_size - memory_usage, memory_size),
AlignmentCheckFailed { required, has } =>
write!(f, "tried to access memory with alignment {}, but alignment {} is required",
has, required),
TypeNotPrimitive(ty) =>
write!(f, "expected primitive type, got {}", ty),
Layout(ref err) =>
write!(f, "rustc layout computation failed: {:?}", err),
PathNotFound(ref path) =>
write!(f, "Cannot find path {:?}", path),
MachineError(ref inner) =>
write!(f, "machine error: {}", inner),
IncorrectAllocationInformation(size, size2, align, align2) =>
write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and align {}", size, align, size2, align2),
_ => write!(f, "{}", self.description()),
}
}
}

View File

@ -0,0 +1,270 @@
//! An interpreter for MIR used in CTFE and by miri
#[macro_export]
macro_rules! err {
($($tt:tt)*) => { Err($crate::mir::interpret::EvalErrorKind::$($tt)*.into()) };
}
mod error;
mod value;
pub use self::error::{EvalError, EvalResult, EvalErrorKind};
pub use self::value::{PrimVal, PrimValKind, Value, Pointer, PtrAndAlign, bytes_to_f32, bytes_to_f64};
use std::collections::BTreeMap;
use ty::layout::HasDataLayout;
use std::fmt;
use ty::layout;
use mir;
use ty;
use middle::region;
use std::iter;
#[derive(Clone, Debug, PartialEq)]
pub enum Lock {
NoLock,
WriteLock(DynamicLifetime),
/// This should never be empty -- that would be a read lock held and nobody there to release it...
ReadLock(Vec<DynamicLifetime>),
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct DynamicLifetime {
pub frame: usize,
pub region: Option<region::Scope>, // "None" indicates "until the function ends"
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum AccessKind {
Read,
Write,
}
/// Uniquely identifies a specific constant or static.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct GlobalId<'tcx> {
/// For a constant or static, the `Instance` of the item itself.
/// For a promoted global, the `Instance` of the function they belong to.
pub instance: ty::Instance<'tcx>,
/// The index for promoted globals within their function's `Mir`.
pub promoted: Option<mir::Promoted>,
}
////////////////////////////////////////////////////////////////////////////////
// Pointer arithmetic
////////////////////////////////////////////////////////////////////////////////
pub trait PointerArithmetic: layout::HasDataLayout {
// These are not supposed to be overriden.
//// Trunace the given value to the pointer size; also return whether there was an overflow
fn truncate_to_ptr(self, val: u128) -> (u64, bool) {
let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits();
((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1)
}
// Overflow checking only works properly on the range from -u64 to +u64.
fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) {
// FIXME: is it possible to over/underflow here?
if i < 0 {
// trickery to ensure that i64::min_value() works fine
// this formula only works for true negative values, it panics for zero!
let n = u64::max_value() - (i as u64) + 1;
val.overflowing_sub(n)
} else {
self.overflowing_offset(val, i as u64)
}
}
fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) {
let (res, over1) = val.overflowing_add(i);
let (res, over2) = self.truncate_to_ptr(res as u128);
(res, over1 || over2)
}
fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
let (res, over) = self.overflowing_signed_offset(val, i as i128);
if over { err!(OverflowingMath) } else { Ok(res) }
}
fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
let (res, over) = self.overflowing_offset(val, i);
if over { err!(OverflowingMath) } else { Ok(res) }
}
fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 {
self.overflowing_signed_offset(val, i as i128).0
}
}
impl<T: layout::HasDataLayout> PointerArithmetic for T {}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct MemoryPointer {
pub alloc_id: AllocId,
pub offset: u64,
}
impl<'tcx> MemoryPointer {
pub fn new(alloc_id: AllocId, offset: u64) -> Self {
MemoryPointer { alloc_id, offset }
}
pub(crate) fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
MemoryPointer::new(
self.alloc_id,
cx.data_layout().wrapping_signed_offset(self.offset, i),
)
}
pub fn overflowing_signed_offset<C: HasDataLayout>(self, i: i128, cx: C) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset, i);
(MemoryPointer::new(self.alloc_id, res), over)
}
pub(crate) fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
Ok(MemoryPointer::new(
self.alloc_id,
cx.data_layout().signed_offset(self.offset, i)?,
))
}
pub fn overflowing_offset<C: HasDataLayout>(self, i: u64, cx: C) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_offset(self.offset, i);
(MemoryPointer::new(self.alloc_id, res), over)
}
pub fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
Ok(MemoryPointer::new(
self.alloc_id,
cx.data_layout().offset(self.offset, i)?,
))
}
}
#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd, Debug)]
pub struct AllocId(pub u64);
impl fmt::Display for AllocId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
#[derive(Debug, Eq, PartialEq, Hash)]
pub struct Allocation {
/// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer
pub bytes: Vec<u8>,
/// Maps from byte addresses to allocations.
/// Only the first byte of a pointer is inserted into the map.
pub relocations: BTreeMap<u64, AllocId>,
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
pub undef_mask: UndefMask,
/// The alignment of the allocation to detect unaligned reads.
pub align: u64,
}
impl Allocation {
pub fn from_bytes(slice: &[u8]) -> Self {
let mut undef_mask = UndefMask::new(0);
undef_mask.grow(slice.len() as u64, true);
Self {
bytes: slice.to_owned(),
relocations: BTreeMap::new(),
undef_mask,
align: 1,
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Undefined byte tracking
////////////////////////////////////////////////////////////////////////////////
type Block = u64;
const BLOCK_SIZE: u64 = 64;
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct UndefMask {
blocks: Vec<Block>,
len: u64,
}
impl UndefMask {
pub fn new(size: u64) -> Self {
let mut m = UndefMask {
blocks: vec![],
len: 0,
};
m.grow(size, false);
m
}
/// Check whether the range `start..end` (end-exclusive) is entirely defined.
pub fn is_range_defined(&self, start: u64, end: u64) -> bool {
if end > self.len {
return false;
}
for i in start..end {
if !self.get(i) {
return false;
}
}
true
}
pub fn set_range(&mut self, start: u64, end: u64, new_state: bool) {
let len = self.len;
if end > len {
self.grow(end - len, new_state);
}
self.set_range_inbounds(start, end, new_state);
}
pub fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) {
for i in start..end {
self.set(i, new_state);
}
}
pub fn get(&self, i: u64) -> bool {
let (block, bit) = bit_index(i);
(self.blocks[block] & 1 << bit) != 0
}
pub fn set(&mut self, i: u64, new_state: bool) {
let (block, bit) = bit_index(i);
if new_state {
self.blocks[block] |= 1 << bit;
} else {
self.blocks[block] &= !(1 << bit);
}
}
pub fn grow(&mut self, amount: u64, new_state: bool) {
let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len;
if amount > unused_trailing_bits {
let additional_blocks = amount / BLOCK_SIZE + 1;
assert_eq!(additional_blocks as usize as u64, additional_blocks);
self.blocks.extend(
iter::repeat(0).take(additional_blocks as usize),
);
}
let start = self.len;
self.len += amount;
self.set_range_inbounds(start, start + amount, new_state);
}
}
fn bit_index(bits: u64) -> (usize, usize) {
let a = bits / BLOCK_SIZE;
let b = bits % BLOCK_SIZE;
assert_eq!(a as usize as u64, a);
assert_eq!(b as usize as u64, b);
(a as usize, b as usize)
}

View File

@ -0,0 +1,350 @@
#![allow(unknown_lints)]
use ty::layout::HasDataLayout;
use super::{EvalResult, MemoryPointer, PointerArithmetic};
use syntax::ast::FloatTy;
use rustc_const_math::ConstFloat;
#[derive(Copy, Clone, Debug)]
pub struct PtrAndAlign {
pub ptr: Pointer,
/// Remember whether this place is *supposed* to be aligned.
pub aligned: bool,
}
impl PtrAndAlign {
pub fn to_ptr<'tcx>(self) -> EvalResult<'tcx, MemoryPointer> {
self.ptr.to_ptr()
}
pub fn offset<'tcx, C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
Ok(PtrAndAlign {
ptr: self.ptr.offset(i, cx)?,
aligned: self.aligned,
})
}
}
pub fn bytes_to_f32(bits: u128) -> ConstFloat {
ConstFloat {
bits,
ty: FloatTy::F32,
}
}
pub fn bytes_to_f64(bits: u128) -> ConstFloat {
ConstFloat {
bits,
ty: FloatTy::F64,
}
}
/// A `Value` represents a single self-contained Rust value.
///
/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitve
/// value held directly, outside of any allocation (`ByVal`). For `ByRef`-values, we remember
/// whether the pointer is supposed to be aligned or not (also see Place).
///
/// For optimization of a few very common cases, there is also a representation for a pair of
/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary
/// operations and fat pointers. This idea was taken from rustc's trans.
#[derive(Clone, Copy, Debug)]
pub enum Value {
ByRef(PtrAndAlign),
ByVal(PrimVal),
ByValPair(PrimVal, PrimVal),
}
/// A wrapper type around `PrimVal` that cannot be turned back into a `PrimVal` accidentally.
/// This type clears up a few APIs where having a `PrimVal` argument for something that is
/// potentially an integer pointer or a pointer to an allocation was unclear.
///
/// I (@oli-obk) believe it is less easy to mix up generic primvals and primvals that are just
/// the representation of pointers. Also all the sites that convert between primvals and pointers
/// are explicit now (and rare!)
#[derive(Clone, Copy, Debug)]
pub struct Pointer {
primval: PrimVal,
}
impl<'tcx> Pointer {
pub fn null() -> Self {
PrimVal::Bytes(0).into()
}
pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
self.primval.to_ptr()
}
pub fn into_inner_primval(self) -> PrimVal {
self.primval
}
pub fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self.primval {
PrimVal::Bytes(b) => {
assert_eq!(b as u64 as u128, b);
Ok(Pointer::from(
PrimVal::Bytes(layout.signed_offset(b as u64, i)? as u128),
))
}
PrimVal::Ptr(ptr) => ptr.signed_offset(i, layout).map(Pointer::from),
PrimVal::Undef => err!(ReadUndefBytes),
}
}
pub fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self.primval {
PrimVal::Bytes(b) => {
assert_eq!(b as u64 as u128, b);
Ok(Pointer::from(
PrimVal::Bytes(layout.offset(b as u64, i)? as u128),
))
}
PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(Pointer::from),
PrimVal::Undef => err!(ReadUndefBytes),
}
}
pub fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self.primval {
PrimVal::Bytes(b) => {
assert_eq!(b as u64 as u128, b);
Ok(Pointer::from(PrimVal::Bytes(
layout.wrapping_signed_offset(b as u64, i) as u128,
)))
}
PrimVal::Ptr(ptr) => Ok(Pointer::from(ptr.wrapping_signed_offset(i, layout))),
PrimVal::Undef => err!(ReadUndefBytes),
}
}
pub fn is_null(self) -> EvalResult<'tcx, bool> {
match self.primval {
PrimVal::Bytes(b) => Ok(b == 0),
PrimVal::Ptr(_) => Ok(false),
PrimVal::Undef => err!(ReadUndefBytes),
}
}
pub fn to_value_with_len(self, len: u64) -> Value {
Value::ByValPair(self.primval, PrimVal::from_u128(len as u128))
}
pub fn to_value_with_vtable(self, vtable: MemoryPointer) -> Value {
Value::ByValPair(self.primval, PrimVal::Ptr(vtable))
}
pub fn to_value(self) -> Value {
Value::ByVal(self.primval)
}
}
impl ::std::convert::From<PrimVal> for Pointer {
fn from(primval: PrimVal) -> Self {
Pointer { primval }
}
}
impl ::std::convert::From<MemoryPointer> for Pointer {
fn from(ptr: MemoryPointer) -> Self {
PrimVal::Ptr(ptr).into()
}
}
/// A `PrimVal` represents an immediate, primitive value existing outside of a
/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in
/// size. Like a range of bytes in an `Allocation`, a `PrimVal` can either represent the raw bytes
/// of a simple value, a pointer into another `Allocation`, or be undefined.
#[derive(Clone, Copy, Debug)]
pub enum PrimVal {
/// The raw bytes of a simple value.
Bytes(u128),
/// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
/// relocations, but a `PrimVal` is only large enough to contain one, so we just represent the
/// relocation and its associated offset together as a `MemoryPointer` here.
Ptr(MemoryPointer),
/// An undefined `PrimVal`, for representing values that aren't safe to examine, but are safe
/// to copy around, just like undefined bytes in an `Allocation`.
Undef,
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PrimValKind {
I8, I16, I32, I64, I128,
U8, U16, U32, U64, U128,
F32, F64,
Ptr, FnPtr,
Bool,
Char,
}
impl<'a, 'tcx: 'a> Value {
#[inline]
pub fn by_ref(ptr: Pointer) -> Self {
Value::ByRef(PtrAndAlign { ptr, aligned: true })
}
}
impl<'tcx> PrimVal {
pub fn from_u128(n: u128) -> Self {
PrimVal::Bytes(n)
}
pub fn from_i128(n: i128) -> Self {
PrimVal::Bytes(n as u128)
}
pub fn from_float(f: ConstFloat) -> Self {
PrimVal::Bytes(f.bits)
}
pub fn from_bool(b: bool) -> Self {
PrimVal::Bytes(b as u128)
}
pub fn from_char(c: char) -> Self {
PrimVal::Bytes(c as u128)
}
pub fn to_bytes(self) -> EvalResult<'tcx, u128> {
match self {
PrimVal::Bytes(b) => Ok(b),
PrimVal::Ptr(_) => err!(ReadPointerAsBytes),
PrimVal::Undef => err!(ReadUndefBytes),
}
}
pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
match self {
PrimVal::Bytes(_) => err!(ReadBytesAsPointer),
PrimVal::Ptr(p) => Ok(p),
PrimVal::Undef => err!(ReadUndefBytes),
}
}
pub fn is_bytes(self) -> bool {
match self {
PrimVal::Bytes(_) => true,
_ => false,
}
}
pub fn is_ptr(self) -> bool {
match self {
PrimVal::Ptr(_) => true,
_ => false,
}
}
pub fn is_undef(self) -> bool {
match self {
PrimVal::Undef => true,
_ => false,
}
}
pub fn to_u128(self) -> EvalResult<'tcx, u128> {
self.to_bytes()
}
pub fn to_u64(self) -> EvalResult<'tcx, u64> {
self.to_bytes().map(|b| {
assert_eq!(b as u64 as u128, b);
b as u64
})
}
pub fn to_i32(self) -> EvalResult<'tcx, i32> {
self.to_bytes().map(|b| {
assert_eq!(b as i32 as u128, b);
b as i32
})
}
pub fn to_i128(self) -> EvalResult<'tcx, i128> {
self.to_bytes().map(|b| b as i128)
}
pub fn to_i64(self) -> EvalResult<'tcx, i64> {
self.to_bytes().map(|b| {
assert_eq!(b as i64 as u128, b);
b as i64
})
}
pub fn to_f32(self) -> EvalResult<'tcx, ConstFloat> {
self.to_bytes().map(bytes_to_f32)
}
pub fn to_f64(self) -> EvalResult<'tcx, ConstFloat> {
self.to_bytes().map(bytes_to_f64)
}
pub fn to_bool(self) -> EvalResult<'tcx, bool> {
match self.to_bytes()? {
0 => Ok(false),
1 => Ok(true),
_ => err!(InvalidBool),
}
}
}
impl PrimValKind {
pub fn is_int(self) -> bool {
use self::PrimValKind::*;
match self {
I8 | I16 | I32 | I64 | I128 | U8 | U16 | U32 | U64 | U128 => true,
_ => false,
}
}
pub fn is_signed_int(self) -> bool {
use self::PrimValKind::*;
match self {
I8 | I16 | I32 | I64 | I128 => true,
_ => false,
}
}
pub fn is_float(self) -> bool {
use self::PrimValKind::*;
match self {
F32 | F64 => true,
_ => false,
}
}
pub fn from_uint_size(size: u64) -> Self {
match size {
1 => PrimValKind::U8,
2 => PrimValKind::U16,
4 => PrimValKind::U32,
8 => PrimValKind::U64,
16 => PrimValKind::U128,
_ => bug!("can't make uint with size {}", size),
}
}
pub fn from_int_size(size: u64) -> Self {
match size {
1 => PrimValKind::I8,
2 => PrimValKind::I16,
4 => PrimValKind::I32,
8 => PrimValKind::I64,
16 => PrimValKind::I128,
_ => bug!("can't make int with size {}", size),
}
}
pub fn is_ptr(self) -> bool {
use self::PrimValKind::*;
match self {
Ptr | FnPtr => true,
_ => false,
}
}
}

View File

@ -43,6 +43,7 @@ mod cache;
pub mod tcx;
pub mod visit;
pub mod traversal;
pub mod interpret;
/// Types for locals
type LocalDecls<'tcx> = IndexVec<Local, LocalDecl<'tcx>>;

View File

@ -1158,6 +1158,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"print some statistics about MIR"),
always_encode_mir: bool = (false, parse_bool, [TRACKED],
"encode MIR of all functions into the crate metadata"),
miri: bool = (false, parse_bool, [TRACKED],
"check the miri const evaluator against the old ctfe"),
osx_rpath_install_name: bool = (false, parse_bool, [TRACKED],
"pass `-install_name @rpath/...` to the macOS linker"),
sanitizer: Option<Sanitizer> = (None, parse_sanitizer, [TRACKED],

View File

@ -30,9 +30,10 @@ use middle::cstore::EncodedMetadata;
use middle::lang_items;
use middle::resolve_lifetime::{self, ObjectLifetimeDefault};
use middle::stability;
use mir::Mir;
use mir::{Mir, interpret};
use ty::subst::{Kind, Substs};
use ty::ReprOptions;
use ty::Instance;
use traits;
use ty::{self, Ty, TypeAndMut};
use ty::{TyS, TypeVariants, Slice};
@ -87,6 +88,8 @@ pub struct GlobalArenas<'tcx> {
steal_mir: TypedArena<Steal<Mir<'tcx>>>,
mir: TypedArena<Mir<'tcx>>,
tables: TypedArena<ty::TypeckTables<'tcx>>,
/// miri allocations
const_allocs: TypedArena<interpret::Allocation>,
}
impl<'tcx> GlobalArenas<'tcx> {
@ -99,6 +102,7 @@ impl<'tcx> GlobalArenas<'tcx> {
steal_mir: TypedArena::new(),
mir: TypedArena::new(),
tables: TypedArena::new(),
const_allocs: TypedArena::new(),
}
}
}
@ -847,6 +851,8 @@ pub struct GlobalCtxt<'tcx> {
stability_interner: RefCell<FxHashSet<&'tcx attr::Stability>>,
pub interpret_interner: RefCell<InterpretInterner<'tcx>>,
layout_interner: RefCell<FxHashSet<&'tcx LayoutDetails>>,
/// A vector of every trait accessible in the whole crate
@ -866,6 +872,104 @@ pub struct GlobalCtxt<'tcx> {
output_filenames: Arc<OutputFilenames>,
}
/// Everything needed to efficiently work with interned allocations
#[derive(Debug, Default)]
pub struct InterpretInterner<'tcx> {
/// Stores the value of constants (and deduplicates the actual memory)
allocs: FxHashSet<&'tcx interpret::Allocation>,
/// Allows obtaining function instance handles via a unique identifier
functions: FxHashMap<u64, Instance<'tcx>>,
/// Inverse map of `interpret_functions`.
/// Used so we don't allocate a new pointer every time we need one
function_cache: FxHashMap<Instance<'tcx>, u64>,
/// Allows obtaining const allocs via a unique identifier
alloc_by_id: FxHashMap<u64, &'tcx interpret::Allocation>,
/// The AllocId to assign to the next new regular allocation.
/// Always incremented, never gets smaller.
next_id: u64,
/// Allows checking whether a constant already has an allocation
///
/// The pointers are to the beginning of an `alloc_by_id` allocation
alloc_cache: FxHashMap<interpret::GlobalId<'tcx>, interpret::PtrAndAlign>,
/// A cache for basic byte allocations keyed by their contents. This is used to deduplicate
/// allocations for string and bytestring literals.
literal_alloc_cache: FxHashMap<Vec<u8>, u64>,
}
impl<'tcx> InterpretInterner<'tcx> {
pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> u64 {
if let Some(&alloc_id) = self.function_cache.get(&instance) {
return alloc_id;
}
let id = self.reserve();
debug!("creating fn ptr: {}", id);
self.functions.insert(id, instance);
self.function_cache.insert(instance, id);
id
}
pub fn get_fn(
&self,
id: u64,
) -> Option<Instance<'tcx>> {
self.functions.get(&id).cloned()
}
pub fn get_alloc(
&self,
id: u64,
) -> Option<&'tcx interpret::Allocation> {
self.alloc_by_id.get(&id).cloned()
}
pub fn get_cached(
&self,
global_id: interpret::GlobalId<'tcx>,
) -> Option<interpret::PtrAndAlign> {
self.alloc_cache.get(&global_id).cloned()
}
pub fn cache(
&mut self,
global_id: interpret::GlobalId<'tcx>,
ptr: interpret::PtrAndAlign,
) {
if let Some(old) = self.alloc_cache.insert(global_id, ptr) {
bug!("tried to cache {:?}, but was already existing as {:#?}", global_id, old);
}
}
pub fn intern_at_reserved(
&mut self,
id: u64,
alloc: &'tcx interpret::Allocation,
) {
if let Some(old) = self.alloc_by_id.insert(id, alloc) {
bug!("tried to intern allocation at {}, but was already existing as {:#?}", id, old);
}
}
/// obtains a new allocation ID that can be referenced but does not
/// yet have an allocation backing it.
pub fn reserve(
&mut self,
) -> u64 {
let next = self.next_id;
self.next_id = self.next_id
.checked_add(1)
.expect("You overflowed a u64 by incrementing by 1... \
You've just earned yourself a free drink if we ever meet. \
Seriously, how did you do that?!");
next
}
}
impl<'tcx> GlobalCtxt<'tcx> {
/// Get the global TyCtxt.
pub fn global_tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
@ -933,6 +1037,41 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
}
}
pub fn intern_const_alloc(
self,
alloc: interpret::Allocation,
) -> &'gcx interpret::Allocation {
if let Some(alloc) = self.interpret_interner.borrow().allocs.get(&alloc) {
return alloc;
}
let interned = self.global_arenas.const_allocs.alloc(alloc);
if let Some(prev) = self.interpret_interner.borrow_mut().allocs.replace(interned) {
bug!("Tried to overwrite interned Allocation: {:#?}", prev)
}
interned
}
/// Allocates a byte or string literal for `mir::interpret`
pub fn allocate_cached(self, bytes: &[u8]) -> u64 {
// check whether we already allocated this literal or a constant with the same memory
if let Some(&alloc_id) = self.interpret_interner.borrow().literal_alloc_cache.get(bytes) {
return alloc_id;
}
// create an allocation that just contains these bytes
let alloc = interpret::Allocation::from_bytes(bytes);
let alloc = self.intern_const_alloc(alloc);
let mut int = self.interpret_interner.borrow_mut();
// the next unique id
let id = int.reserve();
// make the allocation identifiable
int.alloc_by_id.insert(id, alloc);
// cache it for the future
int.literal_alloc_cache.insert(bytes.to_owned(), id);
id
}
pub fn intern_stability(self, stab: attr::Stability) -> &'gcx attr::Stability {
if let Some(st) = self.stability_interner.borrow().get(&stab) {
return st;
@ -1079,6 +1218,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
layout_depth: Cell::new(0),
derive_macros: RefCell::new(NodeMap()),
stability_interner: RefCell::new(FxHashSet()),
interpret_interner: Default::default(),
all_traits: RefCell::new(None),
tx_to_llvm_workers: tx,
output_filenames: Arc::new(output_filenames.clone()),
@ -1525,6 +1665,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
println!("Substs interner: #{}", self.interners.substs.borrow().len());
println!("Region interner: #{}", self.interners.region.borrow().len());
println!("Stability interner: #{}", self.stability_interner.borrow().len());
println!("Interpret interner: #{}", self.interpret_interner.borrow().allocs.len());
println!("Layout interner: #{}", self.layout_interner.borrow().len());
}
}

View File

@ -180,20 +180,20 @@ impl<'a, 'b, 'tcx> Instance<'tcx> {
debug!("resolve(def_id={:?}, substs={:?}) = {:?}", def_id, substs, result);
result
}
}
fn resolve_closure<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: ty::ClosureSubsts<'tcx>,
requested_kind: ty::ClosureKind)
-> Instance<'tcx>
{
let actual_kind = substs.closure_kind(def_id, tcx);
pub fn resolve_closure(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: ty::ClosureSubsts<'tcx>,
requested_kind: ty::ClosureKind)
-> Instance<'tcx>
{
let actual_kind = substs.closure_kind(def_id, tcx);
match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
Ok(true) => fn_once_adapter_instance(tcx, def_id, substs),
_ => Instance::new(def_id, substs.substs)
match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
Ok(true) => fn_once_adapter_instance(tcx, def_id, substs),
_ => Instance::new(def_id, substs.substs)
}
}
}
@ -202,8 +202,8 @@ fn resolve_associated_item<'a, 'tcx>(
trait_item: &ty::AssociatedItem,
param_env: ty::ParamEnv<'tcx>,
trait_id: DefId,
rcvr_substs: &'tcx Substs<'tcx>
) -> Option<Instance<'tcx>> {
rcvr_substs: &'tcx Substs<'tcx>,
) -> Option<Instance<'tcx>> {
let def_id = trait_item.def_id;
debug!("resolve_associated_item(trait_item={:?}, \
trait_id={:?}, \
@ -230,7 +230,7 @@ fn resolve_associated_item<'a, 'tcx>(
}
traits::VtableClosure(closure_data) => {
let trait_closure_kind = tcx.lang_items().fn_trait_kind(trait_id).unwrap();
Some(resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs,
Some(Instance::resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs,
trait_closure_kind))
}
traits::VtableFnPointer(ref data) => {

View File

@ -682,35 +682,3 @@ impl<'a, 'tcx> ConstContext<'a, 'tcx> {
compare_const_vals(tcx, span, &a.val, &b.val)
}
}
pub(crate) fn const_eval<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
key: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>)
-> EvalResult<'tcx> {
let (def_id, substs) = if let Some(resolved) = lookup_const_by_id(tcx, key) {
resolved
} else {
return Err(ConstEvalErr {
span: tcx.def_span(key.value.0),
kind: TypeckError
});
};
let tables = tcx.typeck_tables_of(def_id);
let body = if let Some(id) = tcx.hir.as_local_node_id(def_id) {
let body_id = tcx.hir.body_owned_by(id);
// Do match-check before building MIR
if tcx.check_match(def_id).is_err() {
return Err(ConstEvalErr {
span: tcx.def_span(key.value.0),
kind: CheckMatchError,
});
}
tcx.mir_const_qualif(def_id);
tcx.hir.body(body_id)
} else {
tcx.extern_const_body(def_id).body
};
ConstContext::new(tcx, key.param_env.and(substs), tables).eval(&body.value)
}

View File

@ -50,7 +50,6 @@ use rustc::ty::maps::Providers;
pub fn provide(providers: &mut Providers) {
*providers = Providers {
const_eval: eval::const_eval,
check_match: check_match::check_match,
..*providers
};

View File

@ -12,6 +12,7 @@ crate-type = ["dylib"]
bitflags = "1.0"
graphviz = { path = "../libgraphviz" }
log = "0.3"
log_settings = "0.1.1"
rustc = { path = "../librustc" }
rustc_const_eval = { path = "../librustc_const_eval" }
rustc_const_math = { path = "../librustc_const_math" }
@ -20,3 +21,5 @@ rustc_errors = { path = "../librustc_errors" }
serialize = { path = "../libserialize" }
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
byteorder = { version = "1.1", features = ["i128"] }
rustc_apfloat = { path = "../librustc_apfloat" }

View File

@ -0,0 +1,133 @@
use rustc::ty::Ty;
use syntax::ast::{FloatTy, IntTy, UintTy};
use rustc_const_math::ConstFloat;
use super::{EvalContext, Machine};
use rustc::mir::interpret::{PrimVal, EvalResult, MemoryPointer, PointerArithmetic};
use rustc_apfloat::ieee::{Single, Double};
use rustc_apfloat::Float;
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub(super) fn cast_primval(
&self,
val: PrimVal,
src_ty: Ty<'tcx>,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx, PrimVal> {
trace!("Casting {:?}: {:?} to {:?}", val, src_ty, dest_ty);
let src_kind = self.ty_to_primval_kind(src_ty)?;
match val {
PrimVal::Undef => Ok(PrimVal::Undef),
PrimVal::Ptr(ptr) => self.cast_from_ptr(ptr, dest_ty),
val @ PrimVal::Bytes(_) => {
use rustc::mir::interpret::PrimValKind::*;
match src_kind {
F32 => self.cast_from_float(val.to_f32()?, dest_ty),
F64 => self.cast_from_float(val.to_f64()?, dest_ty),
I8 | I16 | I32 | I64 | I128 => {
self.cast_from_signed_int(val.to_i128()?, dest_ty)
}
Bool | Char | U8 | U16 | U32 | U64 | U128 | FnPtr | Ptr => {
self.cast_from_int(val.to_u128()?, dest_ty, false)
}
}
}
}
}
fn cast_from_signed_int(&self, val: i128, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
self.cast_from_int(val as u128, ty, val < 0)
}
fn int_to_int(&self, v: i128, ty: IntTy) -> u128 {
match ty {
IntTy::I8 => v as i8 as u128,
IntTy::I16 => v as i16 as u128,
IntTy::I32 => v as i32 as u128,
IntTy::I64 => v as i64 as u128,
IntTy::I128 => v as u128,
IntTy::Is => {
let ty = self.tcx.sess.target.isize_ty;
self.int_to_int(v, ty)
}
}
}
fn int_to_uint(&self, v: u128, ty: UintTy) -> u128 {
match ty {
UintTy::U8 => v as u8 as u128,
UintTy::U16 => v as u16 as u128,
UintTy::U32 => v as u32 as u128,
UintTy::U64 => v as u64 as u128,
UintTy::U128 => v,
UintTy::Us => {
let ty = self.tcx.sess.target.usize_ty;
self.int_to_uint(v, ty)
}
}
}
fn cast_from_int(
&self,
v: u128,
ty: Ty<'tcx>,
negative: bool,
) -> EvalResult<'tcx, PrimVal> {
trace!("cast_from_int: {}, {}, {}", v, ty, negative);
use rustc::ty::TypeVariants::*;
match ty.sty {
// Casts to bool are not permitted by rustc, no need to handle them here.
TyInt(ty) => Ok(PrimVal::Bytes(self.int_to_int(v as i128, ty))),
TyUint(ty) => Ok(PrimVal::Bytes(self.int_to_uint(v, ty))),
TyFloat(fty) if negative => Ok(PrimVal::Bytes(ConstFloat::from_i128(v as i128, fty).bits)),
TyFloat(fty) => Ok(PrimVal::Bytes(ConstFloat::from_u128(v, fty).bits)),
TyChar if v as u8 as u128 == v => Ok(PrimVal::Bytes(v)),
TyChar => err!(InvalidChar(v)),
// No alignment check needed for raw pointers. But we have to truncate to target ptr size.
TyRawPtr(_) => Ok(PrimVal::Bytes(self.memory.truncate_to_ptr(v).0 as u128)),
_ => err!(Unimplemented(format!("int to {:?} cast", ty))),
}
}
fn cast_from_float(&self, val: ConstFloat, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
use rustc::ty::TypeVariants::*;
match ty.sty {
TyUint(t) => {
let width = t.bit_width().unwrap_or(self.memory.pointer_size() as usize * 8);
match val.ty {
FloatTy::F32 => Ok(PrimVal::Bytes(Single::from_bits(val.bits).to_u128(width).value)),
FloatTy::F64 => Ok(PrimVal::Bytes(Double::from_bits(val.bits).to_u128(width).value)),
}
},
TyInt(t) => {
let width = t.bit_width().unwrap_or(self.memory.pointer_size() as usize * 8);
match val.ty {
FloatTy::F32 => Ok(PrimVal::from_i128(Single::from_bits(val.bits).to_i128(width).value)),
FloatTy::F64 => Ok(PrimVal::from_i128(Double::from_bits(val.bits).to_i128(width).value)),
}
},
TyFloat(fty) => Ok(PrimVal::from_float(val.convert(fty))),
_ => err!(Unimplemented(format!("float to {:?} cast", ty))),
}
}
fn cast_from_ptr(&self, ptr: MemoryPointer, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
use rustc::ty::TypeVariants::*;
match ty.sty {
// Casting to a reference or fn pointer is not permitted by rustc, no need to support it here.
TyRawPtr(_) |
TyInt(IntTy::Is) |
TyUint(UintTy::Us) => Ok(PrimVal::Ptr(ptr)),
TyInt(_) | TyUint(_) => err!(ReadPointerAsBytes),
_ => err!(Unimplemented(format!("ptr to {:?} cast", ty))),
}
}
}

View File

@ -0,0 +1,587 @@
use rustc::ty::{self, TyCtxt, Ty, Instance};
use rustc::ty::layout::{self, LayoutOf};
use rustc::ty::subst::Substs;
use rustc::hir::def_id::DefId;
use rustc::mir;
use rustc::middle::const_val::ErrKind::{CheckMatchError, TypeckError};
use rustc::middle::const_val::{ConstEvalErr, ConstVal};
use rustc_const_eval::{lookup_const_by_id, ConstContext};
use rustc::mir::Field;
use rustc_data_structures::indexed_vec::Idx;
use syntax::ast::Mutability;
use syntax::codemap::Span;
use rustc::mir::interpret::{EvalResult, EvalError, EvalErrorKind, GlobalId, Value, PrimVal, PtrAndAlign};
use super::{Place, PlaceExtra, EvalContext, StackPopCleanup, ValTy, HasMemory};
use rustc_const_math::ConstInt;
use std::fmt;
use std::error::Error;
pub fn eval_body<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
instance: Instance<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> (EvalResult<'tcx, (PtrAndAlign, Ty<'tcx>)>, EvalContext<'a, 'tcx, CompileTimeEvaluator>) {
debug!("eval_body: {:?}, {:?}", instance, param_env);
let limits = super::ResourceLimits::default();
let mut ecx = EvalContext::new(tcx, param_env, limits, CompileTimeEvaluator, ());
let cid = GlobalId {
instance,
promoted: None,
};
let try = (|| {
if ecx.tcx.has_attr(instance.def_id(), "linkage") {
return Err(ConstEvalError::NotConst("extern global".to_string()).into());
}
// FIXME(eddyb) use `Instance::ty` when it becomes available.
let instance_ty =
ecx.monomorphize(instance.def.def_ty(tcx), instance.substs);
if tcx.interpret_interner.borrow().get_cached(cid).is_none() {
let mir = ecx.load_mir(instance.def)?;
let layout = ecx.layout_of(instance_ty)?;
assert!(!layout.is_unsized());
let ptr = ecx.memory.allocate(
layout.size.bytes(),
layout.align.abi(),
None,
)?;
tcx.interpret_interner.borrow_mut().cache(
cid,
PtrAndAlign {
ptr: ptr.into(),
aligned: !layout.is_packed(),
},
);
let cleanup = StackPopCleanup::MarkStatic(Mutability::Immutable);
let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id()));
trace!("const_eval: pushing stack frame for global: {}", name);
ecx.push_stack_frame(
instance,
mir.span,
mir,
Place::from_ptr(ptr),
cleanup.clone(),
)?;
while ecx.step()? {}
// reinsert the stack frame so any future queries have the correct substs
ecx.push_stack_frame(
instance,
mir.span,
mir,
Place::from_ptr(ptr),
cleanup,
)?;
}
let value = tcx.interpret_interner.borrow().get_cached(cid).expect("global not cached");
Ok((value, instance_ty))
})();
(try, ecx)
}
pub fn eval_body_as_integer<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
instance: Instance<'tcx>,
) -> EvalResult<'tcx, ConstInt> {
let (ptr_ty, ecx) = eval_body(tcx, instance, param_env);
let (ptr, ty) = ptr_ty?;
let prim = match ecx.read_maybe_aligned(ptr.aligned, |ectx| ectx.try_read_value(ptr.ptr, ty))? {
Some(Value::ByVal(prim)) => prim.to_bytes()?,
_ => return err!(TypeNotPrimitive(ty)),
};
use syntax::ast::{IntTy, UintTy};
use rustc::ty::TypeVariants::*;
use rustc_const_math::{ConstIsize, ConstUsize};
Ok(match ty.sty {
TyInt(IntTy::I8) => ConstInt::I8(prim as i128 as i8),
TyInt(IntTy::I16) => ConstInt::I16(prim as i128 as i16),
TyInt(IntTy::I32) => ConstInt::I32(prim as i128 as i32),
TyInt(IntTy::I64) => ConstInt::I64(prim as i128 as i64),
TyInt(IntTy::I128) => ConstInt::I128(prim as i128),
TyInt(IntTy::Is) => ConstInt::Isize(
ConstIsize::new(prim as i128 as i64, tcx.sess.target.isize_ty)
.expect("miri should already have errored"),
),
TyUint(UintTy::U8) => ConstInt::U8(prim as u8),
TyUint(UintTy::U16) => ConstInt::U16(prim as u16),
TyUint(UintTy::U32) => ConstInt::U32(prim as u32),
TyUint(UintTy::U64) => ConstInt::U64(prim as u64),
TyUint(UintTy::U128) => ConstInt::U128(prim),
TyUint(UintTy::Us) => ConstInt::Usize(
ConstUsize::new(prim as u64, tcx.sess.target.usize_ty)
.expect("miri should already have errored"),
),
_ => {
return Err(
ConstEvalError::NeedsRfc(
"evaluating anything other than isize/usize during typeck".to_string(),
).into(),
)
}
})
}
pub struct CompileTimeEvaluator;
impl<'tcx> Into<EvalError<'tcx>> for ConstEvalError {
fn into(self) -> EvalError<'tcx> {
EvalErrorKind::MachineError(Box::new(self)).into()
}
}
#[derive(Clone, Debug)]
enum ConstEvalError {
NeedsRfc(String),
NotConst(String),
}
impl fmt::Display for ConstEvalError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::ConstEvalError::*;
match *self {
NeedsRfc(ref msg) => {
write!(
f,
"\"{}\" needs an rfc before being allowed inside constants",
msg
)
}
NotConst(ref msg) => write!(f, "Cannot evaluate within constants: \"{}\"", msg),
}
}
}
impl Error for ConstEvalError {
fn description(&self) -> &str {
use self::ConstEvalError::*;
match *self {
NeedsRfc(_) => "this feature needs an rfc before being allowed inside constants",
NotConst(_) => "this feature is not compatible with constant evaluation",
}
}
fn cause(&self) -> Option<&Error> {
None
}
}
impl<'tcx> super::Machine<'tcx> for CompileTimeEvaluator {
type MemoryData = ();
type MemoryKinds = !;
fn eval_fn_call<'a>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
destination: Option<(Place, mir::BasicBlock)>,
_args: &[ValTy<'tcx>],
span: Span,
_sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool> {
debug!("eval_fn_call: {:?}", instance);
if !ecx.tcx.is_const_fn(instance.def_id()) {
return Err(
ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(),
);
}
let mir = match ecx.load_mir(instance.def) {
Ok(mir) => mir,
Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => {
// some simple things like `malloc` might get accepted in the future
return Err(
ConstEvalError::NeedsRfc(format!("calling extern function `{}`", path))
.into(),
);
}
Err(other) => return Err(other),
};
let (return_place, return_to_block) = match destination {
Some((place, block)) => (place, StackPopCleanup::Goto(block)),
None => (Place::undef(), StackPopCleanup::None),
};
ecx.push_stack_frame(
instance,
span,
mir,
return_place,
return_to_block,
)?;
Ok(false)
}
fn call_intrinsic<'a>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
_args: &[ValTy<'tcx>],
dest: Place,
dest_layout: layout::TyLayout<'tcx>,
target: mir::BasicBlock,
) -> EvalResult<'tcx> {
let substs = instance.substs;
let intrinsic_name = &ecx.tcx.item_name(instance.def_id())[..];
match intrinsic_name {
"min_align_of" => {
let elem_ty = substs.type_at(0);
let elem_align = ecx.layout_of(elem_ty)?.align.abi();
let align_val = PrimVal::from_u128(elem_align as u128);
ecx.write_primval(dest, align_val, dest_layout.ty)?;
}
"size_of" => {
let ty = substs.type_at(0);
let size = ecx.layout_of(ty)?.size.bytes() as u128;
ecx.write_primval(dest, PrimVal::from_u128(size), dest_layout.ty)?;
}
name => return Err(ConstEvalError::NeedsRfc(format!("calling intrinsic `{}`", name)).into()),
}
ecx.goto_block(target);
// Since we pushed no stack frame, the main loop will act
// as if the call just completed and it's returning to the
// current frame.
Ok(())
}
fn try_ptr_op<'a>(
_ecx: &EvalContext<'a, 'tcx, Self>,
_bin_op: mir::BinOp,
left: PrimVal,
_left_ty: Ty<'tcx>,
right: PrimVal,
_right_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Option<(PrimVal, bool)>> {
if left.is_bytes() && right.is_bytes() {
Ok(None)
} else {
Err(
ConstEvalError::NeedsRfc("Pointer arithmetic or comparison".to_string()).into(),
)
}
}
fn mark_static_initialized(m: !) -> EvalResult<'tcx> {
m
}
fn box_alloc<'a>(
_ecx: &mut EvalContext<'a, 'tcx, Self>,
_ty: Ty<'tcx>,
_dest: Place,
) -> EvalResult<'tcx> {
Err(
ConstEvalError::NeedsRfc("Heap allocations via `box` keyword".to_string()).into(),
)
}
fn global_item_with_linkage<'a>(
_ecx: &mut EvalContext<'a, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
_mutability: Mutability,
) -> EvalResult<'tcx> {
Err(
ConstEvalError::NotConst("statics with `linkage` attribute".to_string()).into(),
)
}
}
pub fn const_eval_provider<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
key: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>,
) -> ::rustc::middle::const_val::EvalResult<'tcx> {
trace!("const eval: {:?}", key);
let (def_id, substs) = if let Some(resolved) = lookup_const_by_id(tcx, key) {
resolved
} else {
return Err(ConstEvalErr {
span: tcx.def_span(key.value.0),
kind: TypeckError
});
};
let tables = tcx.typeck_tables_of(def_id);
let body = if let Some(id) = tcx.hir.as_local_node_id(def_id) {
let body_id = tcx.hir.body_owned_by(id);
// Do match-check before building MIR
if tcx.check_match(def_id).is_err() {
return Err(ConstEvalErr {
span: tcx.def_span(key.value.0),
kind: CheckMatchError,
});
}
tcx.mir_const_qualif(def_id);
tcx.hir.body(body_id)
} else {
tcx.extern_const_body(def_id).body
};
// do not continue into miri if typeck errors occurred
// it will fail horribly
if tables.tainted_by_errors {
return Err(ConstEvalErr { span: body.value.span, kind: TypeckError })
}
trace!("running old const eval");
let old_result = ConstContext::new(tcx, key.param_env.and(substs), tables).eval(&body.value);
trace!("old const eval produced {:?}", old_result);
if tcx.sess.opts.debugging_opts.miri {
let instance = ty::Instance::new(def_id, substs);
trace!("const eval instance: {:?}, {:?}", instance, key.param_env);
let miri_result = ::interpret::eval_body(tcx, instance, key.param_env);
match (miri_result, old_result) {
((Err(err), ecx), Ok(ok)) => {
trace!("miri failed, ctfe returned {:?}", ok);
tcx.sess.span_warn(
tcx.def_span(key.value.0),
"miri failed to eval, while ctfe succeeded",
);
let () = unwrap_miri(&ecx, Err(err));
Ok(ok)
},
((Ok(_), _), Err(err)) => {
Err(err)
},
((Err(_), _), Err(err)) => Err(err),
((Ok((miri_val, miri_ty)), mut ecx), Ok(ctfe)) => {
check_ctfe_against_miri(&mut ecx, miri_val, miri_ty, ctfe.val);
Ok(ctfe)
}
}
} else {
old_result
}
}
fn check_ctfe_against_miri<'a, 'tcx>(
ecx: &mut EvalContext<'a, 'tcx, CompileTimeEvaluator>,
miri_val: PtrAndAlign,
miri_ty: Ty<'tcx>,
ctfe: ConstVal<'tcx>,
) {
use rustc::middle::const_val::ConstAggregate::*;
use rustc_const_math::ConstFloat;
use rustc::ty::TypeVariants::*;
match miri_ty.sty {
TyInt(int_ty) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let prim = get_prim(ecx, value);
let c = ConstInt::new_signed_truncating(prim as i128,
int_ty,
ecx.tcx.sess.target.isize_ty);
let c = ConstVal::Integral(c);
assert_eq!(c, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", c, ctfe);
},
TyUint(uint_ty) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let prim = get_prim(ecx, value);
let c = ConstInt::new_unsigned_truncating(prim,
uint_ty,
ecx.tcx.sess.target.usize_ty);
let c = ConstVal::Integral(c);
assert_eq!(c, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", c, ctfe);
},
TyFloat(ty) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let prim = get_prim(ecx, value);
let f = ConstVal::Float(ConstFloat { bits: prim, ty });
assert_eq!(f, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", f, ctfe);
},
TyBool => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let bits = get_prim(ecx, value);
if bits > 1 {
bug!("miri evaluated to {}, but expected a bool {:?}", bits, ctfe);
}
let b = ConstVal::Bool(bits == 1);
assert_eq!(b, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", b, ctfe);
},
TyChar => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let bits = get_prim(ecx, value);
if let Some(cm) = ::std::char::from_u32(bits as u32) {
assert_eq!(
ConstVal::Char(cm), ctfe,
"miri evaluated to {:?}, but expected {:?}", cm, ctfe,
);
} else {
bug!("miri evaluated to {}, but expected a char {:?}", bits, ctfe);
}
},
TyStr => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
if let Ok(Some(Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::Bytes(len)))) = value {
let bytes = ecx
.memory
.read_bytes(ptr.into(), len as u64)
.expect("bad miri memory for str");
if let Ok(s) = ::std::str::from_utf8(bytes) {
if let ConstVal::Str(s2) = ctfe {
assert_eq!(s, s2, "miri produced {:?}, but expected {:?}", s, s2);
} else {
bug!("miri produced {:?}, but expected {:?}", s, ctfe);
}
} else {
bug!(
"miri failed to produce valid utf8 {:?}, while ctfe produced {:?}",
bytes,
ctfe,
);
}
} else {
bug!("miri evaluated to {:?}, but expected a str {:?}", value, ctfe);
}
},
TyArray(elem_ty, n) => {
let n = n.val.to_const_int().unwrap().to_u64().unwrap();
let size = ecx.layout_of(elem_ty).unwrap().size.bytes();
let vec: Vec<(ConstVal, Ty<'tcx>)> = match ctfe {
ConstVal::ByteStr(arr) => arr.data.iter().map(|&b| {
(ConstVal::Integral(ConstInt::U8(b)), ecx.tcx.types.u8)
}).collect(),
ConstVal::Aggregate(Array(v)) => {
v.iter().map(|c| (c.val, c.ty)).collect()
},
ConstVal::Aggregate(Repeat(v, n)) => {
vec![(v.val, v.ty); n as usize]
},
_ => bug!("miri produced {:?}, but ctfe yielded {:?}", miri_ty, ctfe),
};
for (i, elem) in vec.into_iter().enumerate() {
assert!((i as u64) < n);
let ptr = miri_val.offset(size * i as u64, &ecx).unwrap();
check_ctfe_against_miri(ecx, ptr, elem_ty, elem.0);
}
},
TyTuple(..) => {
let vec = match ctfe {
ConstVal::Aggregate(Tuple(v)) => v,
_ => bug!("miri produced {:?}, but ctfe yielded {:?}", miri_ty, ctfe),
};
let layout = ecx.layout_of(miri_ty).unwrap();
for (i, elem) in vec.into_iter().enumerate() {
let offset = layout.fields.offset(i);
let ptr = miri_val.offset(offset.bytes(), &ecx).unwrap();
check_ctfe_against_miri(ecx, ptr, elem.ty, elem.val);
}
},
TyAdt(def, _) => {
let (struct_variant, extra) = if def.is_enum() {
let discr = ecx.read_discriminant_value(
Place::Ptr { ptr: miri_val, extra: PlaceExtra::None },
miri_ty).unwrap();
let variant = def.discriminants(ecx.tcx).position(|variant_discr| {
variant_discr.to_u128_unchecked() == discr
}).expect("miri produced invalid enum discriminant");
(&def.variants[variant], PlaceExtra::DowncastVariant(variant))
} else {
(def.struct_variant(), PlaceExtra::None)
};
let vec = match ctfe {
ConstVal::Aggregate(Struct(v)) => v,
ConstVal::Variant(did) => {
assert_eq!(struct_variant.fields.len(), 0);
assert_eq!(did, struct_variant.did);
return;
},
ctfe => bug!("miri produced {:?}, but ctfe yielded {:?}", miri_ty, ctfe),
};
let layout = ecx.layout_of(miri_ty).unwrap();
for &(name, elem) in vec.into_iter() {
let field = struct_variant.fields.iter().position(|f| f.name == name).unwrap();
let (place, _) = ecx.place_field(
Place::Ptr { ptr: miri_val, extra },
Field::new(field),
layout,
).unwrap();
let ptr = place.to_ptr_extra_aligned().0;
check_ctfe_against_miri(ecx, ptr, elem.ty, elem.val);
}
},
TySlice(_) => bug!("miri produced a slice?"),
// not supported by ctfe
TyRawPtr(_) |
TyRef(..) => {}
TyDynamic(..) => bug!("miri produced a trait object"),
TyClosure(..) => bug!("miri produced a closure"),
TyGenerator(..) => bug!("miri produced a generator"),
TyNever => bug!("miri produced a value of the never type"),
TyProjection(_) => bug!("miri produced a projection"),
TyAnon(..) => bug!("miri produced an impl Trait type"),
TyParam(_) => bug!("miri produced an unmonomorphized type"),
TyInfer(_) => bug!("miri produced an uninferred type"),
TyError => bug!("miri produced a type error"),
TyForeign(_) => bug!("miri produced an extern type"),
// should be fine
TyFnDef(..) => {}
TyFnPtr(_) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let ptr = match value {
Ok(Some(Value::ByVal(PrimVal::Ptr(ptr)))) => ptr,
value => bug!("expected fn ptr, got {:?}", value),
};
let inst = ecx.memory.get_fn(ptr).unwrap();
match ctfe {
ConstVal::Function(did, substs) => {
let ctfe = ty::Instance::resolve(
ecx.tcx,
ecx.param_env,
did,
substs,
).unwrap();
assert_eq!(inst, ctfe, "expected fn ptr {:?}, but got {:?}", ctfe, inst);
},
_ => bug!("ctfe produced {:?}, but miri produced function {:?}", ctfe, inst),
}
},
}
}
fn get_prim<'a, 'tcx>(
ecx: &mut EvalContext<'a, 'tcx, CompileTimeEvaluator>,
res: Result<Option<Value>, EvalError<'tcx>>,
) -> u128 {
match res {
Ok(Some(Value::ByVal(prim))) => unwrap_miri(ecx, prim.to_bytes()),
Err(err) => unwrap_miri(ecx, Err(err)),
val => bug!("got {:?}", val),
}
}
fn unwrap_miri<'a, 'tcx, T>(
ecx: &EvalContext<'a, 'tcx, CompileTimeEvaluator>,
res: Result<T, EvalError<'tcx>>,
) -> T {
match res {
Ok(val) => val,
Err(mut err) => {
ecx.report(&mut err);
ecx.tcx.sess.abort_if_errors();
bug!("{:#?}", err);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,117 @@
//! This module contains everything needed to instantiate an interpreter.
//! This separation exists to ensure that no fancy miri features like
//! interpreting common C functions leak into CTFE.
use rustc::mir::interpret::{EvalResult, PrimVal, MemoryPointer, AccessKind};
use super::{EvalContext, Place, ValTy, Memory};
use rustc::mir;
use rustc::ty::{self, Ty};
use syntax::codemap::Span;
use syntax::ast::Mutability;
/// Methods of this trait signifies a point where CTFE evaluation would fail
/// and some use case dependent behaviour can instead be applied
pub trait Machine<'tcx>: Sized {
/// Additional data that can be accessed via the Memory
type MemoryData;
/// Additional memory kinds a machine wishes to distinguish from the builtin ones
type MemoryKinds: ::std::fmt::Debug + PartialEq + Copy + Clone;
/// Entry point to all function calls.
///
/// Returns Ok(true) when the function was handled completely
/// e.g. due to missing mir
///
/// Returns Ok(false) if a new stack frame was pushed
fn eval_fn_call<'a>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
destination: Option<(Place, mir::BasicBlock)>,
args: &[ValTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool>;
/// directly process an intrinsic without pushing a stack frame.
fn call_intrinsic<'a>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[ValTy<'tcx>],
dest: Place,
dest_layout: ty::layout::TyLayout<'tcx>,
target: mir::BasicBlock,
) -> EvalResult<'tcx>;
/// Called for all binary operations except on float types.
///
/// Returns `None` if the operation should be handled by the integer
/// op code in order to share more code between machines
///
/// Returns a (value, overflowed) pair if the operation succeeded
fn try_ptr_op<'a>(
ecx: &EvalContext<'a, 'tcx, Self>,
bin_op: mir::BinOp,
left: PrimVal,
left_ty: Ty<'tcx>,
right: PrimVal,
right_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Option<(PrimVal, bool)>>;
/// Called when trying to mark machine defined `MemoryKinds` as static
fn mark_static_initialized(m: Self::MemoryKinds) -> EvalResult<'tcx>;
/// Heap allocations via the `box` keyword
///
/// Returns a pointer to the allocated memory
fn box_alloc<'a>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
ty: Ty<'tcx>,
dest: Place,
) -> EvalResult<'tcx>;
/// Called when trying to access a global declared with a `linkage` attribute
fn global_item_with_linkage<'a>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
mutability: Mutability,
) -> EvalResult<'tcx>;
fn check_locks<'a>(
_mem: &Memory<'a, 'tcx, Self>,
_ptr: MemoryPointer,
_size: u64,
_access: AccessKind,
) -> EvalResult<'tcx> {
Ok(())
}
fn add_lock<'a>(
_mem: &mut Memory<'a, 'tcx, Self>,
_id: u64,
) {}
fn free_lock<'a>(
_mem: &mut Memory<'a, 'tcx, Self>,
_id: u64,
_len: u64,
) -> EvalResult<'tcx> {
Ok(())
}
fn end_region<'a>(
_ecx: &mut EvalContext<'a, 'tcx, Self>,
_reg: Option<::rustc::middle::region::Scope>,
) -> EvalResult<'tcx> {
Ok(())
}
fn validation_op<'a>(
_ecx: &mut EvalContext<'a, 'tcx, Self>,
_op: ::rustc::mir::ValidationOp,
_operand: &::rustc::mir::ValidationOperand<'tcx, ::rustc::mir::Place<'tcx>>,
) -> EvalResult<'tcx> {
Ok(())
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,23 @@
//! An interpreter for MIR used in CTFE and by miri
mod cast;
mod const_eval;
mod eval_context;
mod place;
mod machine;
mod memory;
mod operator;
mod step;
mod terminator;
mod traits;
pub use self::eval_context::{EvalContext, Frame, ResourceLimits, StackPopCleanup,
TyAndPacked, ValTy};
pub use self::place::{Place, PlaceExtra};
pub use self::memory::{Memory, MemoryKind, HasMemory};
pub use self::const_eval::{eval_body_as_integer, eval_body, CompileTimeEvaluator, const_eval_provider};
pub use self::machine::Machine;

View File

@ -0,0 +1,267 @@
use rustc::mir;
use rustc::ty::Ty;
use rustc_const_math::ConstFloat;
use syntax::ast::FloatTy;
use std::cmp::Ordering;
use super::{EvalContext, Place, Machine, ValTy};
use rustc::mir::interpret::{EvalResult, PrimVal, PrimValKind, Value, bytes_to_f32, bytes_to_f64};
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
fn binop_with_overflow(
&mut self,
op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
) -> EvalResult<'tcx, (PrimVal, bool)> {
let left_val = self.value_to_primval(left)?;
let right_val = self.value_to_primval(right)?;
self.binary_op(op, left_val, left.ty, right_val, right.ty)
}
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
/// and a boolean signifying the potential overflow to the destination.
pub fn intrinsic_with_overflow(
&mut self,
op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
dest: Place,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
let val = Value::ByValPair(val, PrimVal::from_bool(overflowed));
let valty = ValTy {
value: val,
ty: dest_ty,
};
self.write_value(valty, dest)
}
/// Applies the binary operation `op` to the arguments and writes the result to the
/// destination. Returns `true` if the operation overflowed.
pub fn intrinsic_overflowing(
&mut self,
op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
dest: Place,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx, bool> {
let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
self.write_primval(dest, val, dest_ty)?;
Ok(overflowed)
}
}
macro_rules! overflow {
($op:ident, $l:expr, $r:expr) => ({
let (val, overflowed) = $l.$op($r);
let primval = PrimVal::Bytes(val as u128);
Ok((primval, overflowed))
})
}
macro_rules! int_arithmetic {
($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({
let l = $l;
let r = $r;
use rustc::mir::interpret::PrimValKind::*;
match $kind {
I8 => overflow!($int_op, l as i8, r as i8),
I16 => overflow!($int_op, l as i16, r as i16),
I32 => overflow!($int_op, l as i32, r as i32),
I64 => overflow!($int_op, l as i64, r as i64),
I128 => overflow!($int_op, l as i128, r as i128),
U8 => overflow!($int_op, l as u8, r as u8),
U16 => overflow!($int_op, l as u16, r as u16),
U32 => overflow!($int_op, l as u32, r as u32),
U64 => overflow!($int_op, l as u64, r as u64),
U128 => overflow!($int_op, l as u128, r as u128),
_ => bug!("int_arithmetic should only be called on int primvals"),
}
})
}
macro_rules! int_shift {
($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({
let l = $l;
let r = $r;
let r_wrapped = r as u32;
match $kind {
I8 => overflow!($int_op, l as i8, r_wrapped),
I16 => overflow!($int_op, l as i16, r_wrapped),
I32 => overflow!($int_op, l as i32, r_wrapped),
I64 => overflow!($int_op, l as i64, r_wrapped),
I128 => overflow!($int_op, l as i128, r_wrapped),
U8 => overflow!($int_op, l as u8, r_wrapped),
U16 => overflow!($int_op, l as u16, r_wrapped),
U32 => overflow!($int_op, l as u32, r_wrapped),
U64 => overflow!($int_op, l as u64, r_wrapped),
U128 => overflow!($int_op, l as u128, r_wrapped),
_ => bug!("int_shift should only be called on int primvals"),
}.map(|(val, over)| (val, over || r != r_wrapped as u128))
})
}
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
/// Returns the result of the specified operation and whether it overflowed.
pub fn binary_op(
&self,
bin_op: mir::BinOp,
left: PrimVal,
left_ty: Ty<'tcx>,
right: PrimVal,
right_ty: Ty<'tcx>,
) -> EvalResult<'tcx, (PrimVal, bool)> {
use rustc::mir::BinOp::*;
use rustc::mir::interpret::PrimValKind::*;
let left_kind = self.ty_to_primval_kind(left_ty)?;
let right_kind = self.ty_to_primval_kind(right_ty)?;
//trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind);
// I: Handle operations that support pointers
if !left_kind.is_float() && !right_kind.is_float() {
if let Some(handled) = M::try_ptr_op(self, bin_op, left, left_ty, right, right_ty)? {
return Ok(handled);
}
}
// II: From now on, everything must be bytes, no pointers
let l = left.to_bytes()?;
let r = right.to_bytes()?;
// These ops can have an RHS with a different numeric type.
if right_kind.is_int() && (bin_op == Shl || bin_op == Shr) {
return match bin_op {
Shl => int_shift!(left_kind, overflowing_shl, l, r),
Shr => int_shift!(left_kind, overflowing_shr, l, r),
_ => bug!("it has already been checked that this is a shift op"),
};
}
if left_kind != right_kind {
let msg = format!(
"unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op,
left,
left_kind,
right,
right_kind
);
return err!(Unimplemented(msg));
}
let float_op = |op, l, r, ty| {
let l = ConstFloat {
bits: l,
ty,
};
let r = ConstFloat {
bits: r,
ty,
};
match op {
Eq => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Equal),
Ne => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Equal),
Lt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Less),
Le => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Greater),
Gt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Greater),
Ge => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Less),
Add => PrimVal::Bytes((l + r).unwrap().bits),
Sub => PrimVal::Bytes((l - r).unwrap().bits),
Mul => PrimVal::Bytes((l * r).unwrap().bits),
Div => PrimVal::Bytes((l / r).unwrap().bits),
Rem => PrimVal::Bytes((l % r).unwrap().bits),
_ => bug!("invalid float op: `{:?}`", op),
}
};
let val = match (bin_op, left_kind) {
(_, F32) => float_op(bin_op, l, r, FloatTy::F32),
(_, F64) => float_op(bin_op, l, r, FloatTy::F64),
(Eq, _) => PrimVal::from_bool(l == r),
(Ne, _) => PrimVal::from_bool(l != r),
(Lt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) < (r as i128)),
(Lt, _) => PrimVal::from_bool(l < r),
(Le, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) <= (r as i128)),
(Le, _) => PrimVal::from_bool(l <= r),
(Gt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) > (r as i128)),
(Gt, _) => PrimVal::from_bool(l > r),
(Ge, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) >= (r as i128)),
(Ge, _) => PrimVal::from_bool(l >= r),
(BitOr, _) => PrimVal::Bytes(l | r),
(BitAnd, _) => PrimVal::Bytes(l & r),
(BitXor, _) => PrimVal::Bytes(l ^ r),
(Add, k) if k.is_int() => return int_arithmetic!(k, overflowing_add, l, r),
(Sub, k) if k.is_int() => return int_arithmetic!(k, overflowing_sub, l, r),
(Mul, k) if k.is_int() => return int_arithmetic!(k, overflowing_mul, l, r),
(Div, k) if k.is_int() => return int_arithmetic!(k, overflowing_div, l, r),
(Rem, k) if k.is_int() => return int_arithmetic!(k, overflowing_rem, l, r),
_ => {
let msg = format!(
"unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op,
left,
left_kind,
right,
right_kind
);
return err!(Unimplemented(msg));
}
};
Ok((val, false))
}
}
pub fn unary_op<'tcx>(
un_op: mir::UnOp,
val: PrimVal,
val_kind: PrimValKind,
) -> EvalResult<'tcx, PrimVal> {
use rustc::mir::UnOp::*;
use rustc::mir::interpret::PrimValKind::*;
let bytes = val.to_bytes()?;
let result_bytes = match (un_op, val_kind) {
(Not, Bool) => !val.to_bool()? as u128,
(Not, U8) => !(bytes as u8) as u128,
(Not, U16) => !(bytes as u16) as u128,
(Not, U32) => !(bytes as u32) as u128,
(Not, U64) => !(bytes as u64) as u128,
(Not, U128) => !bytes,
(Not, I8) => !(bytes as i8) as u128,
(Not, I16) => !(bytes as i16) as u128,
(Not, I32) => !(bytes as i32) as u128,
(Not, I64) => !(bytes as i64) as u128,
(Not, I128) => !(bytes as i128) as u128,
(Neg, I8) => -(bytes as i8) as u128,
(Neg, I16) => -(bytes as i16) as u128,
(Neg, I32) => -(bytes as i32) as u128,
(Neg, I64) => -(bytes as i64) as u128,
(Neg, I128) => -(bytes as i128) as u128,
(Neg, F32) => (-bytes_to_f32(bytes)).bits,
(Neg, F64) => (-bytes_to_f64(bytes)).bits,
_ => {
let msg = format!("unimplemented unary op: {:?}, {:?}", un_op, val);
return err!(Unimplemented(msg));
}
};
Ok(PrimVal::Bytes(result_bytes))
}

View File

@ -0,0 +1,428 @@
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{LayoutOf, TyLayout};
use rustc_data_structures::indexed_vec::Idx;
use rustc::mir::interpret::{GlobalId, PtrAndAlign};
use rustc::mir::interpret::{Value, PrimVal, EvalResult, Pointer, MemoryPointer};
use super::{EvalContext, Machine, ValTy};
use interpret::memory::HasMemory;
#[derive(Copy, Clone, Debug)]
pub enum Place {
/// An place referring to a value allocated in the `Memory` system.
Ptr {
/// An place may have an invalid (integral or undef) pointer,
/// since it might be turned back into a reference
/// before ever being dereferenced.
ptr: PtrAndAlign,
extra: PlaceExtra,
},
/// An place referring to a value on the stack. Represented by a stack frame index paired with
/// a Mir local index.
Local { frame: usize, local: mir::Local },
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum PlaceExtra {
None,
Length(u64),
Vtable(MemoryPointer),
DowncastVariant(usize),
}
impl<'tcx> Place {
/// Produces an Place that will error if attempted to be read from
pub fn undef() -> Self {
Self::from_primval_ptr(PrimVal::Undef.into())
}
pub fn from_primval_ptr(ptr: Pointer) -> Self {
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned: true },
extra: PlaceExtra::None,
}
}
pub fn from_ptr(ptr: MemoryPointer) -> Self {
Self::from_primval_ptr(ptr.into())
}
pub fn to_ptr_extra_aligned(self) -> (PtrAndAlign, PlaceExtra) {
match self {
Place::Ptr { ptr, extra } => (ptr, extra),
_ => bug!("to_ptr_and_extra: expected Place::Ptr, got {:?}", self),
}
}
pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
let (ptr, extra) = self.to_ptr_extra_aligned();
// At this point, we forget about the alignment information -- the place has been turned into a reference,
// and no matter where it came from, it now must be aligned.
assert_eq!(extra, PlaceExtra::None);
ptr.to_ptr()
}
pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) {
match ty.sty {
ty::TyArray(elem, n) => (elem, n.val.to_const_int().unwrap().to_u64().unwrap() as u64),
ty::TySlice(elem) => {
match self {
Place::Ptr { extra: PlaceExtra::Length(len), .. } => (elem, len),
_ => {
bug!(
"elem_ty_and_len of a TySlice given non-slice place: {:?}",
self
)
}
}
}
_ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty),
}
}
}
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
/// Reads a value from the place without going through the intermediate step of obtaining
/// a `miri::Place`
pub fn try_read_place(
&mut self,
place: &mir::Place<'tcx>,
) -> EvalResult<'tcx, Option<Value>> {
use rustc::mir::Place::*;
match *place {
// Might allow this in the future, right now there's no way to do this from Rust code anyway
Local(mir::RETURN_PLACE) => err!(ReadFromReturnPointer),
// Directly reading a local will always succeed
Local(local) => self.frame().get_local(local).map(Some),
// Directly reading a static will always succeed
Static(ref static_) => {
let instance = ty::Instance::mono(self.tcx, static_.def_id);
let cid = GlobalId {
instance,
promoted: None,
};
Ok(Some(Value::ByRef(
self.tcx.interpret_interner.borrow().get_cached(cid).expect("global not cached"),
)))
}
Projection(ref proj) => self.try_read_place_projection(proj),
}
}
fn try_read_place_projection(
&mut self,
proj: &mir::PlaceProjection<'tcx>,
) -> EvalResult<'tcx, Option<Value>> {
use rustc::mir::ProjectionElem::*;
let base = match self.try_read_place(&proj.base)? {
Some(base) => base,
None => return Ok(None),
};
let base_ty = self.place_ty(&proj.base);
match proj.elem {
Field(field, _) => {
let base_layout = self.layout_of(base_ty)?;
let field_index = field.index();
let field = base_layout.field(&self, field_index)?;
let offset = base_layout.fields.offset(field_index);
match base {
// the field covers the entire type
Value::ByValPair(..) |
Value::ByVal(_) if offset.bytes() == 0 && field.size == base_layout.size => Ok(Some(base)),
// split fat pointers, 2 element tuples, ...
Value::ByValPair(a, b) if base_layout.fields.count() == 2 => {
let val = [a, b][field_index];
Ok(Some(Value::ByVal(val)))
},
_ => Ok(None),
}
},
// The NullablePointer cases should work fine, need to take care for normal enums
Downcast(..) |
Subslice { .. } |
// reading index 0 or index 1 from a ByVal or ByVal pair could be optimized
ConstantIndex { .. } | Index(_) |
// No way to optimize this projection any better than the normal place path
Deref => Ok(None),
}
}
/// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses.
pub(super) fn eval_and_read_place(
&mut self,
place: &mir::Place<'tcx>,
) -> EvalResult<'tcx, Value> {
// Shortcut for things like accessing a fat pointer's field,
// which would otherwise (in the `eval_place` path) require moving a `ByValPair` to memory
// and returning an `Place::Ptr` to it
if let Some(val) = self.try_read_place(place)? {
return Ok(val);
}
let place = self.eval_place(place)?;
self.read_place(place)
}
pub fn read_place(&self, place: Place) -> EvalResult<'tcx, Value> {
match place {
Place::Ptr { ptr, extra } => {
assert_eq!(extra, PlaceExtra::None);
Ok(Value::ByRef(ptr))
}
Place::Local { frame, local } => self.stack[frame].get_local(local),
}
}
pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, Place> {
use rustc::mir::Place::*;
let place = match *mir_place {
Local(mir::RETURN_PLACE) => self.frame().return_place,
Local(local) => Place::Local {
frame: self.cur_frame(),
local,
},
Static(ref static_) => {
let instance = ty::Instance::mono(self.tcx, static_.def_id);
let gid = GlobalId {
instance,
promoted: None,
};
Place::Ptr {
ptr: self.tcx.interpret_interner.borrow().get_cached(gid).expect("uncached global"),
extra: PlaceExtra::None,
}
}
Projection(ref proj) => {
let ty = self.place_ty(&proj.base);
let place = self.eval_place(&proj.base)?;
return self.eval_place_projection(place, ty, &proj.elem);
}
};
if log_enabled!(::log::LogLevel::Trace) {
self.dump_local(place);
}
Ok(place)
}
pub fn place_field(
&mut self,
base: Place,
field: mir::Field,
mut base_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Place, TyLayout<'tcx>)> {
match base {
Place::Ptr { extra: PlaceExtra::DowncastVariant(variant_index), .. } => {
base_layout = base_layout.for_variant(&self, variant_index);
}
_ => {}
}
let field_index = field.index();
let field = base_layout.field(&self, field_index)?;
let offset = base_layout.fields.offset(field_index);
// Do not allocate in trivial cases
let (base_ptr, base_extra) = match base {
Place::Ptr { ptr, extra } => (ptr, extra),
Place::Local { frame, local } => {
match self.stack[frame].get_local(local)? {
// in case the field covers the entire type, just return the value
Value::ByVal(_) if offset.bytes() == 0 &&
field.size == base_layout.size => {
return Ok((base, field));
}
Value::ByRef { .. } |
Value::ByValPair(..) |
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
}
}
};
let offset = match base_extra {
PlaceExtra::Vtable(tab) => {
let (_, align) = self.size_and_align_of_dst(
base_layout.ty,
base_ptr.ptr.to_value_with_vtable(tab),
)?;
offset.abi_align(align).bytes()
}
_ => offset.bytes(),
};
let mut ptr = base_ptr.offset(offset, &self)?;
// if we were unaligned, stay unaligned
// no matter what we were, if we are packed, we must not be aligned anymore
ptr.aligned &= !base_layout.is_packed();
let extra = if !field.is_unsized() {
PlaceExtra::None
} else {
match base_extra {
PlaceExtra::None => bug!("expected fat pointer"),
PlaceExtra::DowncastVariant(..) => {
bug!("Rust doesn't support unsized fields in enum variants")
}
PlaceExtra::Vtable(_) |
PlaceExtra::Length(_) => {}
}
base_extra
};
Ok((Place::Ptr { ptr, extra }, field))
}
pub fn val_to_place(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Place> {
Ok(match self.tcx.struct_tail(ty).sty {
ty::TyDynamic(..) => {
let (ptr, vtable) = self.into_ptr_vtable_pair(val)?;
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned: true },
extra: PlaceExtra::Vtable(vtable),
}
}
ty::TyStr | ty::TySlice(_) => {
let (ptr, len) = self.into_slice(val)?;
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned: true },
extra: PlaceExtra::Length(len),
}
}
_ => Place::from_primval_ptr(self.into_ptr(val)?),
})
}
pub fn place_index(
&mut self,
base: Place,
outer_ty: Ty<'tcx>,
n: u64,
) -> EvalResult<'tcx, Place> {
// Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, len) = base.elem_ty_and_len(outer_ty);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
assert!(
n < len,
"Tried to access element {} of array/slice with length {}",
n,
len
);
let ptr = base_ptr.offset(n * elem_size, &*self)?;
Ok(Place::Ptr {
ptr,
extra: PlaceExtra::None,
})
}
pub(super) fn place_downcast(
&mut self,
base: Place,
variant: usize,
) -> EvalResult<'tcx, Place> {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (ptr, _) = base.to_ptr_extra_aligned();
let extra = PlaceExtra::DowncastVariant(variant);
Ok(Place::Ptr { ptr, extra })
}
pub fn eval_place_projection(
&mut self,
base: Place,
base_ty: Ty<'tcx>,
proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>,
) -> EvalResult<'tcx, Place> {
use rustc::mir::ProjectionElem::*;
let (ptr, extra) = match *proj_elem {
Field(field, _) => {
let layout = self.layout_of(base_ty)?;
return Ok(self.place_field(base, field, layout)?.0);
}
Downcast(_, variant) => {
return self.place_downcast(base, variant);
}
Deref => {
let val = self.read_place(base)?;
let pointee_type = match base_ty.sty {
ty::TyRawPtr(ref tam) |
ty::TyRef(_, ref tam) => tam.ty,
ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(),
_ => bug!("can only deref pointer types"),
};
trace!("deref to {} on {:?}", pointee_type, val);
return self.val_to_place(val, pointee_type);
}
Index(local) => {
let value = self.frame().get_local(local)?;
let ty = self.tcx.types.usize;
let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?;
return self.place_index(base, base_ty, n);
}
ConstantIndex {
offset,
min_length,
from_end,
} => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
assert!(n >= min_length as u64);
let index = if from_end {
n - u64::from(offset)
} else {
u64::from(offset)
};
let ptr = base_ptr.offset(index * elem_size, &self)?;
(ptr, PlaceExtra::None)
}
Subslice { from, to } => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
assert!(u64::from(from) <= n - u64::from(to));
let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?;
// sublicing arrays produces arrays
let extra = if self.type_is_sized(base_ty) {
PlaceExtra::None
} else {
PlaceExtra::Length(n - u64::from(to) - u64::from(from))
};
(ptr, extra)
}
};
Ok(Place::Ptr { ptr, extra })
}
pub fn place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
self.monomorphize(
place.ty(self.mir(), self.tcx).to_ty(self.tcx),
self.substs(),
)
}
}

View File

@ -0,0 +1,349 @@
//! This module contains the `EvalContext` methods for executing a single step of the interpreter.
//!
//! The main entry point is the `step` method.
use rustc::hir;
use rustc::mir::visit::{Visitor, PlaceContext};
use rustc::mir;
use rustc::ty::{self, Instance};
use rustc::ty::layout::LayoutOf;
use rustc::middle::const_val::ConstVal;
use rustc::mir::interpret::{PtrAndAlign, GlobalId};
use rustc::mir::interpret::{EvalResult, EvalErrorKind};
use super::{EvalContext, StackPopCleanup, Place, Machine};
use syntax::codemap::Span;
use syntax::ast::Mutability;
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub fn inc_step_counter_and_check_limit(&mut self, n: u64) -> EvalResult<'tcx> {
self.steps_remaining = self.steps_remaining.saturating_sub(n);
if self.steps_remaining > 0 {
Ok(())
} else {
err!(ExecutionTimeLimitReached)
}
}
/// Returns true as long as there are more things to do.
pub fn step(&mut self) -> EvalResult<'tcx, bool> {
self.inc_step_counter_and_check_limit(1)?;
if self.stack.is_empty() {
return Ok(false);
}
let block = self.frame().block;
let stmt_id = self.frame().stmt;
let mir = self.mir();
let basic_block = &mir.basic_blocks()[block];
let old_frames = self.cur_frame();
if let Some(stmt) = basic_block.statements.get(stmt_id) {
let mut new = Ok(false);
ConstantExtractor {
span: stmt.source_info.span,
instance: self.frame().instance,
ecx: self,
mir,
new_constant: &mut new,
}.visit_statement(
block,
stmt,
mir::Location {
block,
statement_index: stmt_id,
},
);
// if ConstantExtractor added a new frame, we don't execute anything here
// but await the next call to step
if !new? {
assert_eq!(old_frames, self.cur_frame());
self.statement(stmt)?;
}
return Ok(true);
}
let terminator = basic_block.terminator();
let mut new = Ok(false);
ConstantExtractor {
span: terminator.source_info.span,
instance: self.frame().instance,
ecx: self,
mir,
new_constant: &mut new,
}.visit_terminator(
block,
terminator,
mir::Location {
block,
statement_index: stmt_id,
},
);
// if ConstantExtractor added a new frame, we don't execute anything here
// but await the next call to step
if !new? {
assert_eq!(old_frames, self.cur_frame());
self.terminator(terminator)?;
}
Ok(true)
}
fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
trace!("{:?}", stmt);
use rustc::mir::StatementKind::*;
// Some statements (e.g. box) push new stack frames. We have to record the stack frame number
// *before* executing the statement.
let frame_idx = self.cur_frame();
match stmt.kind {
Assign(ref place, ref rvalue) => self.eval_rvalue_into_place(rvalue, place)?,
SetDiscriminant {
ref place,
variant_index,
} => {
let dest = self.eval_place(place)?;
let dest_ty = self.place_ty(place);
self.write_discriminant_value(dest_ty, dest, variant_index)?;
}
// Mark locals as alive
StorageLive(local) => {
let old_val = self.frame_mut().storage_live(local)?;
self.deallocate_local(old_val)?;
}
// Mark locals as dead
StorageDead(local) => {
let old_val = self.frame_mut().storage_dead(local)?;
self.deallocate_local(old_val)?;
}
// Validity checks.
Validate(op, ref places) => {
for operand in places {
M::validation_op(self, op, operand)?;
}
}
EndRegion(ce) => {
M::end_region(self, Some(ce))?;
}
// Defined to do nothing. These are added by optimization passes, to avoid changing the
// size of MIR constantly.
Nop => {}
InlineAsm { .. } => return err!(InlineAsm),
}
self.stack[frame_idx].stmt += 1;
Ok(())
}
fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> {
trace!("{:?}", terminator.kind);
self.eval_terminator(terminator)?;
if !self.stack.is_empty() {
trace!("// {:?}", self.frame().block);
}
Ok(())
}
/// returns `true` if a stackframe was pushed
fn global_item(
&mut self,
instance: Instance<'tcx>,
span: Span,
mutability: Mutability,
) -> EvalResult<'tcx, bool> {
debug!("global_item: {:?}", instance);
let cid = GlobalId {
instance,
promoted: None,
};
if self.tcx.interpret_interner.borrow().get_cached(cid).is_some() {
return Ok(false);
}
if self.tcx.has_attr(instance.def_id(), "linkage") {
M::global_item_with_linkage(self, cid.instance, mutability)?;
return Ok(false);
}
// FIXME(eddyb) use `Instance::ty` when it becomes available.
let instance_ty =
self.monomorphize(instance.def.def_ty(self.tcx), instance.substs);
let layout = self.layout_of(instance_ty)?;
assert!(!layout.is_unsized());
let ptr = self.memory.allocate(
layout.size.bytes(),
layout.align.abi(),
None,
)?;
self.tcx.interpret_interner.borrow_mut().cache(
cid,
PtrAndAlign {
ptr: ptr.into(),
aligned: !layout.is_packed(),
},
);
let internally_mutable = !layout.ty.is_freeze(self.tcx, self.param_env, span);
let mutability = if mutability == Mutability::Mutable || internally_mutable {
Mutability::Mutable
} else {
Mutability::Immutable
};
let cleanup = StackPopCleanup::MarkStatic(mutability);
let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id()));
trace!("pushing stack frame for global: {}", name);
let mir = self.load_mir(instance.def)?;
self.push_stack_frame(
instance,
span,
mir,
Place::from_ptr(ptr),
cleanup,
)?;
Ok(true)
}
}
struct ConstantExtractor<'a, 'b: 'a, 'tcx: 'b, M: Machine<'tcx> + 'a> {
span: Span,
ecx: &'a mut EvalContext<'b, 'tcx, M>,
mir: &'tcx mir::Mir<'tcx>,
instance: ty::Instance<'tcx>,
// Whether a stackframe for a new constant has been pushed
new_constant: &'a mut EvalResult<'tcx, bool>,
}
impl<'a, 'b, 'tcx, M: Machine<'tcx>> ConstantExtractor<'a, 'b, 'tcx, M> {
fn try<F: FnOnce(&mut Self) -> EvalResult<'tcx, bool>>(&mut self, f: F) {
match *self.new_constant {
// already computed a constant, don't do more than one per iteration
Ok(true) => {},
// no constants computed yet
Ok(false) => *self.new_constant = f(self),
// error happened, abort the visitor traversing
Err(_) => {},
}
}
}
impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b, 'tcx, M> {
fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: mir::Location) {
self.super_constant(constant, location);
self.try(|this| {
match constant.literal {
// already computed by rustc
mir::Literal::Value { value: &ty::Const { val: ConstVal::Unevaluated(def_id, substs), .. } } => {
debug!("global_item: {:?}, {:#?}", def_id, substs);
let substs = this.ecx.tcx.trans_apply_param_substs(this.instance.substs, &substs);
debug!("global_item_new_substs: {:#?}", substs);
debug!("global_item_param_env: {:#?}", this.ecx.param_env);
let instance = Instance::resolve(
this.ecx.tcx,
this.ecx.param_env,
def_id,
substs,
).ok_or(EvalErrorKind::TypeckError)?; // turn error prop into a panic to expose associated type in const issue
this.ecx.global_item(
instance,
constant.span,
Mutability::Immutable,
)
}
mir::Literal::Value { .. } => Ok(false),
mir::Literal::Promoted { index } => {
let cid = GlobalId {
instance: this.instance,
promoted: Some(index),
};
if this.ecx.tcx.interpret_interner.borrow().get_cached(cid).is_some() {
return Ok(false);
}
let mir = &this.mir.promoted[index];
let ty = this.ecx.monomorphize(mir.return_ty(), this.instance.substs);
let layout = this.ecx.layout_of(ty)?;
assert!(!layout.is_unsized());
let ptr = this.ecx.memory.allocate(
layout.size.bytes(),
layout.align.abi(),
None,
)?;
this.ecx.tcx.interpret_interner.borrow_mut().cache(
cid,
PtrAndAlign {
ptr: ptr.into(),
aligned: !layout.is_packed(),
},
);
trace!("pushing stack frame for {:?}", index);
this.ecx.push_stack_frame(
this.instance,
constant.span,
mir,
Place::from_ptr(ptr),
StackPopCleanup::MarkStatic(Mutability::Immutable),
)?;
Ok(true)
}
}
});
}
fn visit_place(
&mut self,
place: &mir::Place<'tcx>,
context: PlaceContext<'tcx>,
location: mir::Location,
) {
self.super_place(place, context, location);
self.try(|this| {
if let mir::Place::Static(ref static_) = *place {
let def_id = static_.def_id;
let span = this.span;
if let Some(node_item) = this.ecx.tcx.hir.get_if_local(def_id) {
if let hir::map::Node::NodeItem(&hir::Item { ref node, .. }) = node_item {
if let hir::ItemStatic(_, m, _) = *node {
let instance = Instance::mono(this.ecx.tcx, def_id);
this.ecx.global_item(
instance,
span,
if m == hir::MutMutable {
Mutability::Mutable
} else {
Mutability::Immutable
},
)
} else {
bug!("static def id doesn't point to static");
}
} else {
bug!("static def id doesn't point to item");
}
} else {
let def = this.ecx.tcx.describe_def(def_id).expect("static not found");
if let hir::def::Def::Static(_, mutable) = def {
let instance = Instance::mono(this.ecx.tcx, def_id);
this.ecx.global_item(
instance,
span,
if mutable {
Mutability::Mutable
} else {
Mutability::Immutable
},
)
} else {
bug!("static found but isn't a static: {:?}", def);
}
}
} else {
Ok(false)
}
});
}
}

View File

@ -0,0 +1,83 @@
use rustc::mir::BasicBlock;
use rustc::ty::{self, Ty};
use syntax::codemap::Span;
use rustc::mir::interpret::{EvalResult, PrimVal, Value};
use interpret::{Machine, ValTy, EvalContext, Place, PlaceExtra};
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub(crate) fn drop_place(
&mut self,
place: Place,
instance: ty::Instance<'tcx>,
ty: Ty<'tcx>,
span: Span,
target: BasicBlock,
) -> EvalResult<'tcx> {
trace!("drop_place: {:#?}", place);
// We take the address of the object. This may well be unaligned, which is fine for us here.
// However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared
// by rustc.
let val = match self.force_allocation(place)? {
Place::Ptr {
ptr,
extra: PlaceExtra::Vtable(vtable),
} => ptr.ptr.to_value_with_vtable(vtable),
Place::Ptr {
ptr,
extra: PlaceExtra::Length(len),
} => ptr.ptr.to_value_with_len(len),
Place::Ptr {
ptr,
extra: PlaceExtra::None,
} => ptr.ptr.to_value(),
_ => bug!("force_allocation broken"),
};
self.drop(val, instance, ty, span, target)
}
fn drop(
&mut self,
arg: Value,
instance: ty::Instance<'tcx>,
ty: Ty<'tcx>,
span: Span,
target: BasicBlock,
) -> EvalResult<'tcx> {
trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def);
let instance = match ty.sty {
ty::TyDynamic(..) => {
let vtable = match arg {
Value::ByValPair(_, PrimVal::Ptr(vtable)) => vtable,
_ => bug!("expected fat ptr, got {:?}", arg),
};
match self.read_drop_type_from_vtable(vtable)? {
Some(func) => func,
// no drop fn -> bail out
None => {
self.goto_block(target);
return Ok(())
},
}
}
_ => instance,
};
// the drop function expects a reference to the value
let valty = ValTy {
value: arg,
ty: self.tcx.mk_mut_ptr(ty),
};
let fn_sig = self.tcx.fn_sig(instance.def_id()).skip_binder().clone();
self.eval_fn_call(
instance,
Some((Place::undef(), target)),
&vec![valty],
span,
fn_sig,
)
}
}

View File

@ -0,0 +1,420 @@
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc::ty::layout::LayoutOf;
use syntax::codemap::Span;
use syntax::abi::Abi;
use rustc::mir::interpret::{PtrAndAlign, EvalResult, PrimVal, Value};
use super::{EvalContext, eval_context,
Place, Machine, ValTy};
use rustc_data_structures::indexed_vec::Idx;
use interpret::memory::HasMemory;
mod drop;
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub fn goto_block(&mut self, target: mir::BasicBlock) {
self.frame_mut().block = target;
self.frame_mut().stmt = 0;
}
pub(super) fn eval_terminator(
&mut self,
terminator: &mir::Terminator<'tcx>,
) -> EvalResult<'tcx> {
use rustc::mir::TerminatorKind::*;
match terminator.kind {
Return => {
self.dump_local(self.frame().return_place);
self.pop_stack_frame()?
}
Goto { target } => self.goto_block(target),
SwitchInt {
ref discr,
ref values,
ref targets,
..
} => {
// FIXME(CTFE): forbid branching
let discr_val = self.eval_operand(discr)?;
let discr_prim = self.value_to_primval(discr_val)?;
// Branch to the `otherwise` case by default, if no match is found.
let mut target_block = targets[targets.len() - 1];
for (index, const_int) in values.iter().enumerate() {
let prim = PrimVal::Bytes(const_int.to_u128_unchecked());
if discr_prim.to_bytes()? == prim.to_bytes()? {
target_block = targets[index];
break;
}
}
self.goto_block(target_block);
}
Call {
ref func,
ref args,
ref destination,
..
} => {
let destination = match *destination {
Some((ref lv, target)) => Some((self.eval_place(lv)?, target)),
None => None,
};
let func = self.eval_operand(func)?;
let (fn_def, sig) = match func.ty.sty {
ty::TyFnPtr(sig) => {
let fn_ptr = self.value_to_primval(func)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
// FIXME(eddyb) use `Instance::ty` when it becomes available.
let instance_ty =
self.monomorphize(instance.def.def_ty(self.tcx), instance.substs);
match instance_ty.sty {
ty::TyFnDef(..) => {
let real_sig = instance_ty.fn_sig(self.tcx);
let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig);
let real_sig = self.tcx.erase_late_bound_regions_and_normalize(&real_sig);
if !self.check_sig_compat(sig, real_sig)? {
return err!(FunctionPointerTyMismatch(real_sig, sig));
}
}
ref other => bug!("instance def ty: {:?}", other),
}
(instance, sig)
}
ty::TyFnDef(def_id, substs) => (
self.resolve(def_id, substs)?,
func.ty.fn_sig(self.tcx),
),
_ => {
let msg = format!("can't handle callee of type {:?}", func.ty);
return err!(Unimplemented(msg));
}
};
let args = self.operands_to_args(args)?;
let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig);
self.eval_fn_call(
fn_def,
destination,
&args,
terminator.source_info.span,
sig,
)?;
}
Drop {
ref location,
target,
..
} => {
// FIXME(CTFE): forbid drop in const eval
let place = self.eval_place(location)?;
let ty = self.place_ty(location);
let ty = self.tcx.trans_apply_param_substs(self.substs(), &ty);
trace!("TerminatorKind::drop: {:?}, type {}", location, ty);
let instance = eval_context::resolve_drop_in_place(self.tcx, ty);
self.drop_place(
place,
instance,
ty,
terminator.source_info.span,
target,
)?;
}
Assert {
ref cond,
expected,
ref msg,
target,
..
} => {
let cond_val = self.eval_operand_to_primval(cond)?.to_bool()?;
if expected == cond_val {
self.goto_block(target);
} else {
use rustc::mir::AssertMessage::*;
return match *msg {
BoundsCheck { ref len, ref index } => {
let span = terminator.source_info.span;
let len = self.eval_operand_to_primval(len)
.expect("can't eval len")
.to_u64()?;
let index = self.eval_operand_to_primval(index)
.expect("can't eval index")
.to_u64()?;
err!(ArrayIndexOutOfBounds(span, len, index))
}
Math(ref err) => {
err!(Math(terminator.source_info.span, err.clone()))
}
GeneratorResumedAfterReturn |
GeneratorResumedAfterPanic => unimplemented!(),
};
}
}
Yield { .. } => unimplemented!("{:#?}", terminator.kind),
GeneratorDrop => unimplemented!(),
DropAndReplace { .. } => unimplemented!(),
Resume => unimplemented!(),
FalseEdges { .. } => bug!("should have been eliminated by `simplify_branches` mir pass"),
Unreachable => return err!(Unreachable),
}
Ok(())
}
/// Decides whether it is okay to call the method with signature `real_sig` using signature `sig`.
/// FIXME: This should take into account the platform-dependent ABI description.
fn check_sig_compat(
&mut self,
sig: ty::FnSig<'tcx>,
real_sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool> {
fn check_ty_compat<'tcx>(ty: Ty<'tcx>, real_ty: Ty<'tcx>) -> bool {
if ty == real_ty {
return true;
} // This is actually a fast pointer comparison
return match (&ty.sty, &real_ty.sty) {
// Permit changing the pointer type of raw pointers and references as well as
// mutability of raw pointers.
// TODO: Should not be allowed when fat pointers are involved.
(&ty::TyRawPtr(_), &ty::TyRawPtr(_)) => true,
(&ty::TyRef(_, _), &ty::TyRef(_, _)) => {
ty.is_mutable_pointer() == real_ty.is_mutable_pointer()
}
// rule out everything else
_ => false,
};
}
if sig.abi == real_sig.abi && sig.variadic == real_sig.variadic &&
sig.inputs_and_output.len() == real_sig.inputs_and_output.len() &&
sig.inputs_and_output
.iter()
.zip(real_sig.inputs_and_output)
.all(|(ty, real_ty)| check_ty_compat(ty, real_ty))
{
// Definitely good.
return Ok(true);
}
if sig.variadic || real_sig.variadic {
// We're not touching this
return Ok(false);
}
// We need to allow what comes up when a non-capturing closure is cast to a fn().
match (sig.abi, real_sig.abi) {
(Abi::Rust, Abi::RustCall) // check the ABIs. This makes the test here non-symmetric.
if check_ty_compat(sig.output(), real_sig.output()) && real_sig.inputs_and_output.len() == 3 => {
// First argument of real_sig must be a ZST
let fst_ty = real_sig.inputs_and_output[0];
if self.layout_of(fst_ty)?.is_zst() {
// Second argument must be a tuple matching the argument list of sig
let snd_ty = real_sig.inputs_and_output[1];
match snd_ty.sty {
ty::TyTuple(tys, _) if sig.inputs().len() == tys.len() =>
if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) {
return Ok(true)
},
_ => {}
}
}
}
_ => {}
};
// Nope, this doesn't work.
return Ok(false);
}
fn eval_fn_call(
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Place, mir::BasicBlock)>,
args: &[ValTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx> {
trace!("eval_fn_call: {:#?}", instance);
match instance.def {
ty::InstanceDef::Intrinsic(..) => {
let (ret, target) = match destination {
Some(dest) => dest,
_ => return err!(Unreachable),
};
let ty = sig.output();
let layout = self.layout_of(ty)?;
M::call_intrinsic(self, instance, args, ret, layout, target)?;
self.dump_local(ret);
Ok(())
}
// FIXME: figure out why we can't just go through the shim
ty::InstanceDef::ClosureOnceShim { .. } => {
if M::eval_fn_call(self, instance, destination, args, span, sig)? {
return Ok(());
}
let mut arg_locals = self.frame().mir.args_iter();
match sig.abi {
// closure as closure once
Abi::RustCall => {
for (arg_local, &valty) in arg_locals.zip(args) {
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
self.write_value(valty, dest)?;
}
}
// non capture closure as fn ptr
// need to inject zst ptr for closure object (aka do nothing)
// and need to pack arguments
Abi::Rust => {
trace!(
"arg_locals: {:?}",
self.frame().mir.args_iter().collect::<Vec<_>>()
);
trace!("args: {:?}", args);
let local = arg_locals.nth(1).unwrap();
for (i, &valty) in args.into_iter().enumerate() {
let dest = self.eval_place(&mir::Place::Local(local).field(
mir::Field::new(i),
valty.ty,
))?;
self.write_value(valty, dest)?;
}
}
_ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi),
}
Ok(())
}
ty::InstanceDef::FnPtrShim(..) |
ty::InstanceDef::DropGlue(..) |
ty::InstanceDef::CloneShim(..) |
ty::InstanceDef::Item(_) => {
// Push the stack frame, and potentially be entirely done if the call got hooked
if M::eval_fn_call(self, instance, destination, args, span, sig)? {
return Ok(());
}
// Pass the arguments
let mut arg_locals = self.frame().mir.args_iter();
trace!("ABI: {:?}", sig.abi);
trace!(
"arg_locals: {:?}",
self.frame().mir.args_iter().collect::<Vec<_>>()
);
trace!("args: {:?}", args);
match sig.abi {
Abi::RustCall => {
assert_eq!(args.len(), 2);
{
// write first argument
let first_local = arg_locals.next().unwrap();
let dest = self.eval_place(&mir::Place::Local(first_local))?;
self.write_value(args[0], dest)?;
}
// unpack and write all other args
let layout = self.layout_of(args[1].ty)?;
if let ty::TyTuple(..) = args[1].ty.sty {
if self.frame().mir.args_iter().count() == layout.fields.count() + 1 {
match args[1].value {
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
assert!(
aligned,
"Unaligned ByRef-values cannot occur as function arguments"
);
for (i, arg_local) in arg_locals.enumerate() {
let field = layout.field(&self, i)?;
let offset = layout.fields.offset(i).bytes();
let arg = Value::by_ref(ptr.offset(offset, &self)?);
let dest =
self.eval_place(&mir::Place::Local(arg_local))?;
trace!(
"writing arg {:?} to {:?} (type: {})",
arg,
dest,
field.ty
);
let valty = ValTy {
value: arg,
ty: field.ty,
};
self.write_value(valty, dest)?;
}
}
Value::ByVal(PrimVal::Undef) => {}
other => {
trace!("{:#?}, {:#?}", other, layout);
let mut layout = layout;
'outer: loop {
for i in 0..layout.fields.count() {
let field = layout.field(&self, i)?;
if layout.fields.offset(i).bytes() == 0 && layout.size == field.size {
layout = field;
continue 'outer;
}
}
break;
}
let dest = self.eval_place(&mir::Place::Local(
arg_locals.next().unwrap(),
))?;
let valty = ValTy {
value: other,
ty: layout.ty,
};
self.write_value(valty, dest)?;
}
}
} else {
trace!("manual impl of rust-call ABI");
// called a manual impl of a rust-call function
let dest = self.eval_place(
&mir::Place::Local(arg_locals.next().unwrap()),
)?;
self.write_value(args[1], dest)?;
}
} else {
bug!(
"rust-call ABI tuple argument was {:#?}, {:#?}",
args[1].ty,
layout
);
}
}
_ => {
for (arg_local, &valty) in arg_locals.zip(args) {
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
self.write_value(valty, dest)?;
}
}
}
Ok(())
}
// cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory.pointer_size();
let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?;
let fn_ptr = self.memory.read_ptr_sized_unsigned(
vtable.offset(ptr_size * (idx as u64 + 3), &self)?
)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
let mut args = args.to_vec();
let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty;
args[0].ty = ty;
args[0].value = ptr.to_value();
// recurse with concrete function
self.eval_fn_call(instance, destination, &args, span, sig)
}
}
}
}

View File

@ -0,0 +1,86 @@
use rustc::ty::{self, Ty};
use rustc::ty::layout::{Size, Align, LayoutOf};
use syntax::ast::Mutability;
use rustc::mir::interpret::{PrimVal, Value, MemoryPointer, EvalResult};
use super::{EvalContext, eval_context,
Machine};
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for
/// objects.
///
/// The `trait_ref` encodes the erased self type. Hence if we are
/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
/// `trait_ref` would map `T:Trait`.
pub fn get_vtable(
&mut self,
ty: Ty<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
) -> EvalResult<'tcx, MemoryPointer> {
debug!("get_vtable(trait_ref={:?})", trait_ref);
let layout = self.layout_of(trait_ref.self_ty())?;
assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
let size = layout.size.bytes();
let align = layout.align.abi();
let ptr_size = self.memory.pointer_size();
let methods = self.tcx.vtable_methods(trait_ref);
let vtable = self.memory.allocate(
ptr_size * (3 + methods.len() as u64),
ptr_size,
None,
)?;
let drop = eval_context::resolve_drop_in_place(self.tcx, ty);
let drop = self.memory.create_fn_alloc(drop);
self.memory.write_ptr_sized_unsigned(vtable, PrimVal::Ptr(drop))?;
let size_ptr = vtable.offset(ptr_size, &self)?;
self.memory.write_ptr_sized_unsigned(size_ptr, PrimVal::Bytes(size as u128))?;
let align_ptr = vtable.offset(ptr_size * 2, &self)?;
self.memory.write_ptr_sized_unsigned(align_ptr, PrimVal::Bytes(align as u128))?;
for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method {
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance);
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
self.memory.write_ptr_sized_unsigned(method_ptr, PrimVal::Ptr(fn_ptr))?;
}
}
self.memory.mark_static_initalized(
vtable.alloc_id,
Mutability::Mutable,
)?;
Ok(vtable)
}
pub fn read_drop_type_from_vtable(
&self,
vtable: MemoryPointer,
) -> EvalResult<'tcx, Option<ty::Instance<'tcx>>> {
// we don't care about the pointee type, we just want a pointer
match self.read_ptr(vtable, self.tcx.mk_nil_ptr())? {
// some values don't need to call a drop impl, so the value is null
Value::ByVal(PrimVal::Bytes(0)) => Ok(None),
Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some),
_ => err!(ReadBytesAsPointer),
}
}
pub fn read_size_and_align_from_vtable(
&self,
vtable: MemoryPointer,
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory.pointer_size();
let size = self.memory.read_ptr_sized_unsigned(vtable.offset(pointer_size, self)?)?.to_bytes()? as u64;
let align = self.memory.read_ptr_sized_unsigned(
vtable.offset(pointer_size * 2, self)?
)?.to_bytes()? as u64;
Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap()))
}
}

View File

@ -25,7 +25,10 @@ Rust MIR: a lowered representation of Rust. Also: an experiment!
#![feature(decl_macro)]
#![feature(i128_type)]
#![feature(inclusive_range_syntax)]
#![feature(inclusive_range)]
#![feature(macro_vis_matcher)]
#![feature(match_default_bindings)]
#![feature(never_type)]
#![feature(range_contains)]
#![feature(rustc_diagnostic_macros)]
#![feature(placement_in_syntax)]
@ -48,6 +51,9 @@ extern crate syntax_pos;
extern crate rustc_const_math;
extern crate rustc_const_eval;
extern crate core; // for NonZero
extern crate log_settings;
extern crate rustc_apfloat;
extern crate byteorder;
mod diagnostics;
@ -58,6 +64,7 @@ mod hair;
mod shim;
pub mod transform;
pub mod util;
pub mod interpret;
use rustc::ty::maps::Providers;
@ -65,6 +72,7 @@ pub fn provide(providers: &mut Providers) {
borrow_check::provide(providers);
shim::provide(providers);
transform::provide(providers);
providers.const_eval = interpret::const_eval_provider;
}
__build_diagnostic_array! { librustc_mir, DIAGNOSTICS }

View File

@ -81,7 +81,7 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
unsafe extern fn run_dtors(mut ptr: *mut u8) {
while !ptr.is_null() {
let list: Box<List> = Box::from_raw(ptr as *mut List);
for &(ptr, dtor) in list.iter() {
for (ptr, dtor) in list.into_iter() {
dtor(ptr);
}
ptr = DTORS.get();

View File

@ -262,7 +262,7 @@ pub unsafe fn register_dtor_fallback(t: *mut u8,
unsafe extern fn run_dtors(mut ptr: *mut u8) {
while !ptr.is_null() {
let list: Box<List> = Box::from_raw(ptr as *mut List);
for &(ptr, dtor) in list.iter() {
for (ptr, dtor) in list.into_iter() {
dtor(ptr);
}
ptr = DTORS.get();

View File

@ -10,7 +10,9 @@
enum Enum {
X = (1 << 500), //~ ERROR E0080
//~| WARNING shift left with overflow
Y = (1 / 0) //~ ERROR E0080
//~| WARNING divide by zero
}
fn main() {

View File

@ -23,6 +23,8 @@ const A_I8_T
: [u32; (i8::MAX as i8 + 1i8) as usize]
//~^ ERROR constant evaluation error
//~^^ NOTE attempt to add with overflow
//~| WARNING constant evaluation error
//~| NOTE on by default
= [0; (i8::MAX as usize) + 1];
fn main() {

View File

@ -13,8 +13,9 @@
const X : usize = 2;
const fn f(x: usize) -> usize {
let mut sum = 0;
for i in 0..x {
let mut sum = 0; //~ ERROR blocks in constant functions are limited
for i in 0..x { //~ ERROR calls in constant functions
//~| ERROR constant function contains unimplemented
sum += i;
}
sum //~ ERROR E0080
@ -24,4 +25,6 @@ const fn f(x: usize) -> usize {
#[allow(unused_variables)]
fn main() {
let a : [i32; f(X)]; //~ NOTE for constant expression here
//~| WARNING constant evaluation error: non-constant path
//~| on by default
}

View File

@ -17,6 +17,8 @@ const TWO: usize = 2;
const LEN: usize = ONE - TWO;
//~^ ERROR E0080
//~| attempt to subtract with overflow
//~| NOTE attempt to subtract with overflow
//~| NOTE on by default
fn main() {
let a: [i8; LEN] = unimplemented!();

View File

@ -20,5 +20,7 @@ fn main() {
let a: [u8; C.a]; // OK
let b: [u8; C.b]; //~ ERROR constant evaluation error
//~^ NOTE nonexistent struct field
//~| WARNING constant evaluation error
//~| NOTE on by default
}
}

@ -1 +1 @@
Subproject commit 6dbfe23c4d1af109c894ff9d7d5da97c025584e5
Subproject commit bde093fa140cbf95023482a94b92b0b16af4b521

View File

@ -66,6 +66,8 @@ fn filter_dirs(path: &Path) -> bool {
"src/tools/rust-installer",
"src/tools/rustfmt",
"src/tools/miri",
"src/librustc/mir/interpret",
"src/librustc_mir/interpret",
];
skip.iter().any(|p| path.ends_with(p))
}

View File

@ -23,7 +23,7 @@
# Each tool has a list of people to ping
# ping @oli-obk @RalfJung @eddyb
miri = "Broken"
miri = "Testing"
# ping @Manishearth @llogiq @mcarton @oli-obk
clippy = "Testing"