implement valtree -> constvalue conversion

This commit is contained in:
b-naber 2022-04-05 16:33:42 +02:00
parent eaf8cdaa0b
commit 1157dc7167
9 changed files with 546 additions and 151 deletions

View File

@ -106,6 +106,7 @@ pub(super) fn mk_eval_cx<'mir, 'tcx>(
/// This function converts an interpreter value into a constant that is meant for use in the /// This function converts an interpreter value into a constant that is meant for use in the
/// type system. /// type system.
#[instrument(skip(ecx), level = "debug")]
pub(super) fn op_to_const<'tcx>( pub(super) fn op_to_const<'tcx>(
ecx: &CompileTimeEvalContext<'_, 'tcx>, ecx: &CompileTimeEvalContext<'_, 'tcx>,
op: &OpTy<'tcx>, op: &OpTy<'tcx>,
@ -140,8 +141,12 @@ pub(super) fn op_to_const<'tcx>(
op.try_as_mplace() op.try_as_mplace()
}; };
debug!(?immediate);
// We know `offset` is relative to the allocation, so we can use `into_parts`. // We know `offset` is relative to the allocation, so we can use `into_parts`.
let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr.into_parts() { let to_const_value = |mplace: &MPlaceTy<'_>| {
debug!("to_const_value(mplace: {:?})", mplace);
match mplace.ptr.into_parts() {
(Some(alloc_id), offset) => { (Some(alloc_id), offset) => {
let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory(); let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
ConstValue::ByRef { alloc, offset } ConstValue::ByRef { alloc, offset }
@ -156,6 +161,7 @@ pub(super) fn op_to_const<'tcx>(
); );
ConstValue::Scalar(Scalar::ZST) ConstValue::Scalar(Scalar::ZST)
} }
}
}; };
match immediate { match immediate {
Ok(ref mplace) => to_const_value(mplace), Ok(ref mplace) => to_const_value(mplace),
@ -166,6 +172,7 @@ pub(super) fn op_to_const<'tcx>(
ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()), ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()),
}, },
Immediate::ScalarPair(a, b) => { Immediate::ScalarPair(a, b) => {
debug!("ScalarPair(a: {:?}, b: {:?})", a, b);
// We know `offset` is relative to the allocation, so we can use `into_parts`. // We know `offset` is relative to the allocation, so we can use `into_parts`.
let (data, start) = let (data, start) =
match ecx.scalar_to_ptr(a.check_init().unwrap()).unwrap().into_parts() { match ecx.scalar_to_ptr(a.check_init().unwrap()).unwrap().into_parts() {
@ -209,7 +216,10 @@ fn turn_into_const_value<'tcx>(
); );
// Turn this into a proper constant. // Turn this into a proper constant.
op_to_const(&ecx, &mplace.into()) let const_val = op_to_const(&ecx, &mplace.into());
debug!(?const_val);
const_val
} }
pub fn eval_to_const_value_raw_provider<'tcx>( pub fn eval_to_const_value_raw_provider<'tcx>(

View File

@ -3,29 +3,26 @@
use std::convert::TryFrom; use std::convert::TryFrom;
use rustc_hir::Mutability; use rustc_hir::Mutability;
use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::mir;
use rustc_middle::ty::{self, TyCtxt}; use rustc_middle::ty::{self, TyCtxt};
use rustc_middle::{
mir::{self, interpret::ConstAlloc},
ty::ScalarInt,
};
use rustc_span::{source_map::DUMMY_SP, symbol::Symbol}; use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
use rustc_target::abi::VariantIdx;
use crate::interpret::{ use crate::interpret::{
intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MPlaceTy, intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MemPlaceMeta,
MemPlaceMeta, Scalar, Scalar,
}; };
mod error; mod error;
mod eval_queries; mod eval_queries;
mod fn_queries; mod fn_queries;
mod machine; mod machine;
mod valtrees;
pub use error::*; pub use error::*;
pub use eval_queries::*; pub use eval_queries::*;
pub use fn_queries::*; pub use fn_queries::*;
pub use machine::*; pub use machine::*;
pub(crate) use valtrees::{const_to_valtree, valtree_to_const_value};
pub(crate) fn const_caller_location( pub(crate) fn const_caller_location(
tcx: TyCtxt<'_>, tcx: TyCtxt<'_>,
@ -41,128 +38,6 @@ pub(crate) fn const_caller_location(
ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr, &tcx)) ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr, &tcx))
} }
/// Convert an evaluated constant to a type level constant
pub(crate) fn const_to_valtree<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
raw: ConstAlloc<'tcx>,
) -> Option<ty::ValTree<'tcx>> {
let ecx = mk_eval_cx(
tcx, DUMMY_SP, param_env,
// It is absolutely crucial for soundness that
// we do not read from static items or other mutable memory.
false,
);
let place = ecx.raw_const_to_mplace(raw).unwrap();
const_to_valtree_inner(&ecx, &place)
}
#[instrument(skip(ecx), level = "debug")]
fn branches<'tcx>(
ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
place: &MPlaceTy<'tcx>,
n: usize,
variant: Option<VariantIdx>,
) -> Option<ty::ValTree<'tcx>> {
let place = match variant {
Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(),
None => *place,
};
let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
debug!(?place, ?variant);
let fields = (0..n).map(|i| {
let field = ecx.mplace_field(&place, i).unwrap();
const_to_valtree_inner(ecx, &field)
});
// For enums, we prepend their variant index before the variant's fields so we can figure out
// the variant again when just seeing a valtree.
let branches = variant.into_iter().chain(fields);
Some(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches.collect::<Option<Vec<_>>>()?)))
}
fn slice_branches<'tcx>(
ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
place: &MPlaceTy<'tcx>,
) -> Option<ty::ValTree<'tcx>> {
let n = place.len(&ecx.tcx()).expect(&format!("expected to use len of place {:?}", place));
let branches = (0..n).map(|i| {
let place_elem = ecx.mplace_index(place, i).unwrap();
const_to_valtree_inner(ecx, &place_elem)
});
Some(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches.collect::<Option<Vec<_>>>()?)))
}
#[instrument(skip(ecx), level = "debug")]
fn const_to_valtree_inner<'tcx>(
ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
place: &MPlaceTy<'tcx>,
) -> Option<ty::ValTree<'tcx>> {
match place.layout.ty.kind() {
ty::FnDef(..) => Some(ty::ValTree::zst()),
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
let val = ecx.read_immediate(&place.into()).unwrap();
let val = val.to_scalar().unwrap();
Some(ty::ValTree::Leaf(val.assert_int()))
}
// Raw pointers are not allowed in type level constants, as we cannot properly test them for
// equality at compile-time (see `ptr_guaranteed_eq`/`_ne`).
// Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to
// agree with runtime equality tests.
ty::FnPtr(_) | ty::RawPtr(_) => None,
ty::Ref(_, _, _) => {
let derefd_place = ecx.deref_operand(&place.into()).unwrap_or_else(|e| bug!("couldn't deref {:?}, error: {:?}", place, e));
debug!(?derefd_place);
const_to_valtree_inner(ecx, &derefd_place)
}
ty::Str | ty::Slice(_) | ty::Array(_, _) => {
let valtree = slice_branches(ecx, place);
debug!(?valtree);
valtree
}
// Trait objects are not allowed in type level constants, as we have no concept for
// resolving their backing type, even if we can do that at const eval time. We may
// hypothetically be able to allow `dyn StructuralEq` trait objects in the future,
// but it is unclear if this is useful.
ty::Dynamic(..) => None,
ty::Tuple(substs) => branches(ecx, place, substs.len(), None),
ty::Adt(def, _) => {
if def.variants().is_empty() {
bug!("uninhabited types should have errored and never gotten converted to valtree")
}
let variant = ecx.read_discriminant(&place.into()).unwrap().1;
branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant))
}
ty::Never
| ty::Error(_)
| ty::Foreign(..)
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_))
| ty::Projection(..)
| ty::Param(_)
| ty::Bound(..)
| ty::Placeholder(..)
// FIXME(oli-obk): we could look behind opaque types
| ty::Opaque(..)
| ty::Infer(_)
// FIXME(oli-obk): we can probably encode closures just like structs
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..) => None,
}
}
/// This function should never fail for validated constants. However, it is also invoked from the /// This function should never fail for validated constants. However, it is also invoked from the
/// pretty printer which might attempt to format invalid constants and in that case it might fail. /// pretty printer which might attempt to format invalid constants and in that case it might fail.
pub(crate) fn try_destructure_const<'tcx>( pub(crate) fn try_destructure_const<'tcx>(
@ -202,6 +77,7 @@ pub(crate) fn try_destructure_const<'tcx>(
Ok(mir::DestructuredConst { variant, fields }) Ok(mir::DestructuredConst { variant, fields })
} }
#[instrument(skip(tcx), level = "debug")]
pub(crate) fn deref_const<'tcx>( pub(crate) fn deref_const<'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>, param_env: ty::ParamEnv<'tcx>,

View File

@ -0,0 +1,479 @@
use super::eval_queries::{mk_eval_cx, op_to_const};
use super::machine::CompileTimeEvalContext;
use crate::interpret::{
intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemoryKind, PlaceTy,
Pointer, Scalar, ScalarMaybeUninit,
};
use rustc_middle::mir::interpret::{ConstAlloc, GlobalAlloc};
use rustc_middle::mir::{Field, ProjectionElem};
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
use rustc_span::source_map::DUMMY_SP;
use rustc_target::abi::VariantIdx;
use crate::interpret::visitor::Value;
use crate::interpret::MPlaceTy;
/// Convert an evaluated constant to a type level constant
#[instrument(skip(tcx), level = "debug")]
pub(crate) fn const_to_valtree<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
raw: ConstAlloc<'tcx>,
) -> Option<ty::ValTree<'tcx>> {
let ecx = mk_eval_cx(
tcx, DUMMY_SP, param_env,
// It is absolutely crucial for soundness that
// we do not read from static items or other mutable memory.
false,
);
let place = ecx.raw_const_to_mplace(raw).unwrap();
const_to_valtree_inner(&ecx, &place)
}
#[instrument(skip(ecx), level = "debug")]
fn branches<'tcx>(
ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
place: &MPlaceTy<'tcx>,
n: usize,
variant: Option<VariantIdx>,
) -> Option<ty::ValTree<'tcx>> {
let place = match variant {
Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(),
None => *place,
};
let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
debug!(?place, ?variant);
let fields = (0..n).map(|i| {
let field = ecx.mplace_field(&place, i).unwrap();
const_to_valtree_inner(ecx, &field)
});
// For enums, we preped their variant index before the variant's fields so we can figure out
// the variant again when just seeing a valtree.
let branches = variant.into_iter().chain(fields);
Some(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches.collect::<Option<Vec<_>>>()?)))
}
fn slice_branches<'tcx>(
ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
place: &MPlaceTy<'tcx>,
) -> Option<ty::ValTree<'tcx>> {
let n = place.len(&ecx.tcx.tcx).expect(&format!("expected to use len of place {:?}", place));
let branches = (0..n).map(|i| {
let place_elem = ecx.mplace_index(place, i).unwrap();
const_to_valtree_inner(ecx, &place_elem)
});
Some(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches.collect::<Option<Vec<_>>>()?)))
}
#[instrument(skip(ecx), level = "debug")]
fn const_to_valtree_inner<'tcx>(
ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
place: &MPlaceTy<'tcx>,
) -> Option<ty::ValTree<'tcx>> {
match place.layout.ty.kind() {
ty::FnDef(..) => Some(ty::ValTree::zst()),
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
let val = ecx.read_immediate(&place.into()).unwrap();
let val = val.to_scalar().unwrap();
Some(ty::ValTree::Leaf(val.assert_int()))
}
// Raw pointers are not allowed in type level constants, as we cannot properly test them for
// equality at compile-time (see `ptr_guaranteed_eq`/`_ne`).
// Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to
// agree with runtime equality tests.
ty::FnPtr(_) | ty::RawPtr(_) => None,
ty::Ref(_, _, _) => {
let derefd_place = ecx.deref_operand(&place.into()).unwrap_or_else(|e| bug!("couldn't deref {:?}, error: {:?}", place, e));
debug!(?derefd_place);
const_to_valtree_inner(ecx, &derefd_place)
}
ty::Str | ty::Slice(_) | ty::Array(_, _) => {
let valtree = slice_branches(ecx, place);
debug!(?valtree);
valtree
}
// Trait objects are not allowed in type level constants, as we have no concept for
// resolving their backing type, even if we can do that at const eval time. We may
// hypothetically be able to allow `dyn StructuralEq` trait objects in the future,
// but it is unclear if this is useful.
ty::Dynamic(..) => None,
ty::Tuple(substs) => branches(ecx, place, substs.len(), None),
ty::Adt(def, _) => {
if def.variants().is_empty() {
bug!("uninhabited types should have errored and never gotten converted to valtree")
}
let variant = ecx.read_discriminant(&place.into()).unwrap().1;
branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant))
}
ty::Never
| ty::Error(_)
| ty::Foreign(..)
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_))
| ty::Projection(..)
| ty::Param(_)
| ty::Bound(..)
| ty::Placeholder(..)
// FIXME(oli-obk): we could look behind opaque types
| ty::Opaque(..)
| ty::Infer(_)
// FIXME(oli-obk): we can probably encode closures just like structs
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..) => None,
}
}
#[instrument(skip(ecx), level = "debug")]
fn create_mplace_from_layout<'tcx>(
ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
) -> MPlaceTy<'tcx> {
let tcx = ecx.tcx;
let layout = tcx.layout_of(param_env_ty).unwrap();
debug!(?layout);
ecx.allocate(layout, MemoryKind::Stack).unwrap()
}
/// Converts a `ValTree` to a `ConstValue`, which is needed after mir
/// construction has finished.
#[instrument(skip(tcx), level = "debug")]
pub fn valtree_to_const_value<'tcx>(
tcx: TyCtxt<'tcx>,
param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
valtree: ty::ValTree<'tcx>,
) -> ConstValue<'tcx> {
// Basic idea: We directly construct `Scalar` values from trivial `ValTree`s
// (those for constants with type bool, int, uint, float or char).
// For all other types we create an `MPlace` and fill that by walking
// the `ValTree` and using `place_projection` and `place_field` to
// create inner `MPlace`s which are filled recursively.
// FIXME Does this need an example?
let (param_env, ty) = param_env_ty.into_parts();
let mut ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
match ty.kind() {
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree {
ty::ValTree::Leaf(scalar_int) => ConstValue::Scalar(Scalar::Int(scalar_int)),
ty::ValTree::Branch(_) => bug!(
"ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf"
),
},
ty::Ref(_, inner_ty, _) => {
match inner_ty.kind() {
ty::Slice(_) | ty::Str => {
let slice_ty = match inner_ty.kind() {
ty::Slice(slice_ty) => *slice_ty,
ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
_ => bug!("expected ty::Slice | ty::Str"),
};
debug!(?slice_ty);
let valtrees = valtree.unwrap_branch();
// Create a place for the underlying array
let len = valtrees.len();
let arr_ty = tcx.mk_array(slice_ty, len as u64);
let mut place =
create_mplace_from_layout(&mut ecx, ty::ParamEnv::empty().and(arr_ty));
debug!(?place);
// Insert elements of `arr_valtree` into `place`
fill_place_recursively(&mut ecx, &mut place, valtree, arr_ty);
dump_place(&ecx, place.into());
// The allocation behind `place` is local, we need to intern it
intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
// Now we need to get the Allocation
let alloc_id = place.mplace.ptr.provenance.unwrap();
debug!(?alloc_id);
let data = match tcx.get_global_alloc(alloc_id) {
Some(GlobalAlloc::Memory(const_alloc)) => const_alloc,
_ => bug!("expected memory allocation"),
};
debug!(?data);
return ConstValue::Slice { data, start: 0, end: len as usize };
}
_ => {
match valtree {
ty::ValTree::Branch(_) => {
// create a place for the pointee
let mut place = create_mplace_from_layout(
&mut ecx,
ty::ParamEnv::empty().and(*inner_ty),
);
debug!(?place);
// insert elements of valtree into `place`
fill_place_recursively(&mut ecx, &mut place, valtree, *inner_ty);
dump_place(&ecx, place.into());
intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place)
.unwrap();
let ref_place = place.mplace.to_ref(&tcx);
let imm = ImmTy::from_immediate(
ref_place,
tcx.layout_of(param_env_ty).unwrap(),
);
let const_val = op_to_const(&ecx, &imm.into());
debug!(?const_val);
const_val
}
ty::ValTree::Leaf(_) => {
let mut place = create_mplace_from_layout(
&mut ecx,
ty::ParamEnv::empty().and(*inner_ty),
);
fill_place_recursively(&mut ecx, &mut place, valtree, *inner_ty);
dump_place(&ecx, place.into());
let ref_place = place.mplace.to_ref(&tcx);
let imm = ImmTy::from_immediate(
ref_place,
tcx.layout_of(param_env_ty).unwrap(),
);
op_to_const(&ecx, &imm.into())
}
}
}
}
}
ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
let mut place = create_mplace_from_layout(&mut ecx, param_env_ty);
debug!(?place);
fill_place_recursively(&mut ecx, &mut place, valtree, ty);
dump_place(&ecx, place.into());
intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
let const_val = op_to_const(&ecx, &place.into());
debug!(?const_val);
const_val
}
ty::Never
| ty::FnDef(..)
| ty::Error(_)
| ty::Foreign(..)
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_))
| ty::Projection(..)
| ty::Param(_)
| ty::Bound(..)
| ty::Placeholder(..)
| ty::Opaque(..)
| ty::Infer(_)
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
| ty::FnPtr(_)
| ty::RawPtr(_)
| ty::Str
| ty::Slice(_)
| ty::Dynamic(..) => bug!("no ValTree should have been created for type {:?}", ty.kind()),
}
}
// FIXME Needs a better/correct name
#[instrument(skip(ecx), level = "debug")]
fn fill_place_recursively<'tcx>(
ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
place: &mut MPlaceTy<'tcx>,
valtree: ty::ValTree<'tcx>,
ty: Ty<'tcx>,
) {
// This will match on valtree and write the value(s) corresponding to the ValTree
// inside the place recursively.
let tcx = ecx.tcx.tcx;
match ty.kind() {
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
let scalar_int = valtree.unwrap_leaf();
debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place);
ecx.write_immediate(
Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar_int.into())),
&(*place).into(),
)
.unwrap();
}
ty::Ref(_, inner_ty, _) => {
match inner_ty.kind() {
ty::Slice(_) | ty::Str => {
let slice_ty = match inner_ty.kind() {
ty::Slice(slice_ty) => *slice_ty,
ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
_ => bug!("expected ty::Slice | ty::Str"),
};
debug!(?slice_ty);
let valtrees = valtree.unwrap_branch();
debug!(?valtrees);
let len = valtrees.len();
debug!(?len);
// create a place for the underlying array
let arr_ty = tcx.mk_array(slice_ty, len as u64);
let mut arr_place =
create_mplace_from_layout(ecx, ty::ParamEnv::empty().and(arr_ty));
debug!(?arr_place);
// Insert elements of `arr_valtree` into `place`
fill_place_recursively(ecx, &mut arr_place, valtree, arr_ty);
dump_place(&ecx, arr_place.into());
// Now we need to create a `ScalarPair` from the filled `place`
// and write that into `place`
let (alloc_id, offset) = arr_place.mplace.ptr.into_parts();
debug!(?alloc_id, ?offset);
let unwrapped_ptr = Pointer { offset, provenance: alloc_id.unwrap() };
let len_scalar = ScalarMaybeUninit::Scalar(Scalar::from_u64(len as u64));
let imm = Immediate::ScalarPair(
ScalarMaybeUninit::from_pointer(unwrapped_ptr, &tcx),
len_scalar,
);
debug!(?imm);
// Now write the ScalarPair into the original place we wanted to fill
// in this call
let _ = ecx.write_immediate(imm, &(*place).into()).unwrap();
dump_place(&ecx, (*place).into());
}
_ => {
let mut pointee_place =
create_mplace_from_layout(ecx, ty::ParamEnv::empty().and(*inner_ty));
debug!(?pointee_place);
fill_place_recursively(ecx, &mut pointee_place, valtree, *inner_ty);
dump_place(ecx, pointee_place.into());
intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place)
.unwrap();
let imm = pointee_place.mplace.to_ref(&tcx);
debug!(?imm);
ecx.write_immediate(imm, &(*place).into()).unwrap();
}
}
}
ty::Tuple(tuple_types) => {
let branches = valtree.unwrap_branch();
assert_eq!(tuple_types.len(), branches.len());
for (i, inner_valtree) in branches.iter().enumerate() {
debug!(?i, ?inner_valtree);
let inner_ty = tuple_types.get(i).expect(&format!(
"expected to be able to index at position {} into {:?}",
i, tuple_types
));
debug!(?inner_ty);
// Create the mplace for the tuple element
let mut place_inner = ecx.mplace_field(place, i).unwrap();
debug!(?place_inner);
// insert valtree corresponding to tuple element into place
fill_place_recursively(ecx, &mut place_inner, *inner_valtree, *inner_ty);
}
}
ty::Array(inner_ty, _) => {
let inner_valtrees = valtree.unwrap_branch();
for (i, inner_valtree) in inner_valtrees.iter().enumerate() {
debug!(?i, ?inner_valtree);
let mut place_inner = ecx.mplace_field(place, i).unwrap();
debug!(?place_inner);
fill_place_recursively(ecx, &mut place_inner, *inner_valtree, *inner_ty)
}
}
ty::Adt(def, substs) if def.is_enum() => {
debug!("enum, substs: {:?}", substs);
let inner_valtrees = valtree.unwrap_branch();
// First element of valtree corresponds to variant
let scalar_int = inner_valtrees[0].unwrap_leaf();
let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap());
let variant = def.variant(variant_idx);
debug!(?variant);
// Need to downcast place
let place_downcast = place.project_downcast(ecx, variant_idx).unwrap();
debug!(?place_downcast);
// fill `place_downcast` with the valtree elements corresponding to
// the fields of the enum
let fields = &variant.fields;
let inner_valtrees = &inner_valtrees[1..];
for (i, field) in fields.iter().enumerate() {
debug!(?i, ?field);
let field_ty = field.ty(tcx, substs);
debug!(?field_ty);
let mut field_mplace = ecx.mplace_field(&place_downcast, i).unwrap();
debug!(?field_mplace);
let inner_valtree = inner_valtrees[i];
fill_place_recursively(ecx, &mut field_mplace, inner_valtree, field_ty);
dump_place(&ecx, field_mplace.into());
}
debug!("dump of place_downcast");
dump_place(ecx, place_downcast.into());
// don't forget filling the place with the discriminant of the enum
ecx.write_discriminant(variant_idx, &(*place).into()).unwrap();
dump_place(ecx, (*place).into());
}
ty::Adt(def, substs) => {
debug!("Adt def: {:?} with substs: {:?}", def, substs);
let inner_valtrees = valtree.unwrap_branch();
debug!(?inner_valtrees);
let (fields, inner_valtrees) =
(&def.variant(VariantIdx::from_usize(0)).fields[..], inner_valtrees);
debug!("fields: {:?}", fields);
for (i, field) in fields.iter().enumerate() {
let field_ty = field.ty(tcx, substs);
debug!(?field_ty);
let old_field_ty = tcx.type_of(field.did);
debug!(?old_field_ty);
let projection_elem = ProjectionElem::Field(Field::from_usize(i), field_ty);
let mut field_place = ecx.mplace_projection(place, projection_elem).unwrap();
let inner_valtree = inner_valtrees[i];
fill_place_recursively(ecx, &mut field_place, inner_valtree, field_ty);
}
}
_ => {}
}
}
fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: PlaceTy<'tcx>) {
trace!("{:?}", ecx.dump_place(place.place));
}

View File

@ -14,7 +14,7 @@ mod terminator;
mod traits; mod traits;
mod util; mod util;
mod validity; mod validity;
mod visitor; pub(crate) mod visitor;
pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here

View File

@ -98,7 +98,7 @@ impl<'tcx, Tag: Provenance> Immediate<Tag> {
// as input for binary and cast operations. // as input for binary and cast operations.
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct ImmTy<'tcx, Tag: Provenance = AllocId> { pub struct ImmTy<'tcx, Tag: Provenance = AllocId> {
imm: Immediate<Tag>, pub imm: Immediate<Tag>,
pub layout: TyAndLayout<'tcx>, pub layout: TyAndLayout<'tcx>,
} }
@ -248,7 +248,7 @@ impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> {
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`. /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
/// Returns `None` if the layout does not permit loading this as a value. /// Returns `None` if the layout does not permit loading this as a value.
fn try_read_immediate_from_mplace( pub(crate) fn try_read_immediate_from_mplace(
&self, &self,
mplace: &MPlaceTy<'tcx, M::PointerTag>, mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> { ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
@ -424,6 +424,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}) })
} }
#[instrument(skip(self), level = "debug")]
pub fn operand_projection( pub fn operand_projection(
&self, &self,
base: &OpTy<'tcx, M::PointerTag>, base: &OpTy<'tcx, M::PointerTag>,

View File

@ -82,7 +82,7 @@ rustc_data_structures::static_assert_size!(Place, 56);
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct PlaceTy<'tcx, Tag: Provenance = AllocId> { pub struct PlaceTy<'tcx, Tag: Provenance = AllocId> {
place: Place<Tag>, // Keep this private; it helps enforce invariants. pub(crate) place: Place<Tag>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>, pub layout: TyAndLayout<'tcx>,
} }
@ -100,7 +100,7 @@ impl<'tcx, Tag: Provenance> std::ops::Deref for PlaceTy<'tcx, Tag> {
/// A MemPlace with its layout. Constructing it is only possible in this module. /// A MemPlace with its layout. Constructing it is only possible in this module.
#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
pub struct MPlaceTy<'tcx, Tag: Provenance = AllocId> { pub struct MPlaceTy<'tcx, Tag: Provenance = AllocId> {
mplace: MemPlace<Tag>, pub(crate) mplace: MemPlace<Tag>,
pub layout: TyAndLayout<'tcx>, pub layout: TyAndLayout<'tcx>,
} }
@ -294,6 +294,7 @@ where
/// Take an operand, representing a pointer, and dereference it to a place -- that /// Take an operand, representing a pointer, and dereference it to a place -- that
/// will always be a MemPlace. Lives in `place.rs` because it creates a place. /// will always be a MemPlace. Lives in `place.rs` because it creates a place.
#[instrument(skip(self), level = "debug")]
pub fn deref_operand( pub fn deref_operand(
&self, &self,
src: &OpTy<'tcx, M::PointerTag>, src: &OpTy<'tcx, M::PointerTag>,
@ -487,7 +488,8 @@ where
} }
/// Project into an mplace /// Project into an mplace
pub(super) fn mplace_projection( #[instrument(skip(self), level = "debug")]
pub(crate) fn mplace_projection(
&self, &self,
base: &MPlaceTy<'tcx, M::PointerTag>, base: &MPlaceTy<'tcx, M::PointerTag>,
proj_elem: mir::PlaceElem<'tcx>, proj_elem: mir::PlaceElem<'tcx>,
@ -548,6 +550,7 @@ where
/// Just a convenience function, but used quite a bit. /// Just a convenience function, but used quite a bit.
/// This is the only projection that might have a side-effect: We cannot project /// This is the only projection that might have a side-effect: We cannot project
/// into the field of a local `ScalarPair`, we have to first allocate it. /// into the field of a local `ScalarPair`, we have to first allocate it.
#[instrument(skip(self), level = "debug")]
pub fn place_field( pub fn place_field(
&mut self, &mut self,
base: &PlaceTy<'tcx, M::PointerTag>, base: &PlaceTy<'tcx, M::PointerTag>,
@ -586,6 +589,7 @@ where
} }
/// Projects into a place. /// Projects into a place.
#[instrument(skip(self), level = "debug")]
pub fn place_projection( pub fn place_projection(
&mut self, &mut self,
base: &PlaceTy<'tcx, M::PointerTag>, base: &PlaceTy<'tcx, M::PointerTag>,
@ -617,19 +621,23 @@ where
/// Computes a place. You should only use this if you intend to write into this /// Computes a place. You should only use this if you intend to write into this
/// place; for reading, a more efficient alternative is `eval_place_for_read`. /// place; for reading, a more efficient alternative is `eval_place_for_read`.
#[instrument(skip(self), level = "debug")]
pub fn eval_place( pub fn eval_place(
&mut self, &mut self,
place: mir::Place<'tcx>, place: mir::Place<'tcx>,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
debug!("projection: {:?}", place.projection);
let mut place_ty = PlaceTy { let mut place_ty = PlaceTy {
// This works even for dead/uninitialized locals; we check further when writing // This works even for dead/uninitialized locals; we check further when writing
place: Place::Local { frame: self.frame_idx(), local: place.local }, place: Place::Local { frame: self.frame_idx(), local: place.local },
layout: self.layout_of_local(self.frame(), place.local, None)?, layout: self.layout_of_local(self.frame(), place.local, None)?,
}; };
debug!(?place_ty);
for elem in place.projection.iter() { for elem in place.projection.iter() {
place_ty = self.place_projection(&place_ty, &elem)? place_ty = self.place_projection(&place_ty, &elem)?
} }
debug!("place after projections: {:?}", place_ty);
trace!("{:?}", self.dump_place(place_ty.place)); trace!("{:?}", self.dump_place(place_ty.place));
// Sanity-check the type we ended up with. // Sanity-check the type we ended up with.
@ -646,6 +654,7 @@ where
/// Write an immediate to a place /// Write an immediate to a place
#[inline(always)] #[inline(always)]
#[instrument(skip(self), level = "debug")]
pub fn write_immediate( pub fn write_immediate(
&mut self, &mut self,
src: Immediate<M::PointerTag>, src: Immediate<M::PointerTag>,
@ -684,6 +693,7 @@ where
/// Write an immediate to a place. /// Write an immediate to a place.
/// If you use this you are responsible for validating that things got copied at the /// If you use this you are responsible for validating that things got copied at the
/// right type. /// right type.
#[instrument(skip(self), level = "debug")]
fn write_immediate_no_validate( fn write_immediate_no_validate(
&mut self, &mut self,
src: Immediate<M::PointerTag>, src: Immediate<M::PointerTag>,
@ -736,6 +746,7 @@ where
/// Write an immediate to memory. /// Write an immediate to memory.
/// If you use this you are responsible for validating that things got copied at the /// If you use this you are responsible for validating that things got copied at the
/// right type. /// right type.
#[instrument(skip(self), level = "debug")]
fn write_immediate_to_mplace_no_validate( fn write_immediate_to_mplace_no_validate(
&mut self, &mut self,
value: Immediate<M::PointerTag>, value: Immediate<M::PointerTag>,
@ -758,6 +769,7 @@ where
// cover all the bytes! // cover all the bytes!
match value { match value {
Immediate::Scalar(scalar) => { Immediate::Scalar(scalar) => {
debug!(?scalar);
match dest.layout.abi { match dest.layout.abi {
Abi::Scalar(_) => {} // fine Abi::Scalar(_) => {} // fine
_ => span_bug!( _ => span_bug!(
@ -830,6 +842,7 @@ where
/// Copies the data from an operand to a place. This does not support transmuting! /// Copies the data from an operand to a place. This does not support transmuting!
/// Use `copy_op_transmute` if the layouts could disagree. /// Use `copy_op_transmute` if the layouts could disagree.
#[inline(always)] #[inline(always)]
#[instrument(skip(self), level = "debug")]
pub fn copy_op( pub fn copy_op(
&mut self, &mut self,
src: &OpTy<'tcx, M::PointerTag>, src: &OpTy<'tcx, M::PointerTag>,
@ -849,6 +862,7 @@ where
/// Use `copy_op_transmute` if the layouts could disagree. /// Use `copy_op_transmute` if the layouts could disagree.
/// Also, if you use this you are responsible for validating that things get copied at the /// Also, if you use this you are responsible for validating that things get copied at the
/// right type. /// right type.
#[instrument(skip(self), level = "debug")]
fn copy_op_no_validate( fn copy_op_no_validate(
&mut self, &mut self,
src: &OpTy<'tcx, M::PointerTag>, src: &OpTy<'tcx, M::PointerTag>,
@ -868,6 +882,7 @@ where
// Let us see if the layout is simple so we take a shortcut, avoid force_allocation. // Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
let src = match self.try_read_immediate(src)? { let src = match self.try_read_immediate(src)? {
Ok(src_val) => { Ok(src_val) => {
debug!("immediate from src is {:?}", src_val);
assert!(!src.layout.is_unsized(), "cannot have unsized immediates"); assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
// Yay, we got a value that we can write directly. // Yay, we got a value that we can write directly.
// FIXME: Add a check to make sure that if `src` is indirect, // FIXME: Add a check to make sure that if `src` is indirect,
@ -955,6 +970,7 @@ where
/// This supports unsized types and returns the computed size to avoid some /// This supports unsized types and returns the computed size to avoid some
/// redundant computation when copying; use `force_allocation` for a simpler, sized-only /// redundant computation when copying; use `force_allocation` for a simpler, sized-only
/// version. /// version.
#[instrument(skip(self), level = "debug")]
pub fn force_allocation_maybe_sized( pub fn force_allocation_maybe_sized(
&mut self, &mut self,
place: &PlaceTy<'tcx, M::PointerTag>, place: &PlaceTy<'tcx, M::PointerTag>,
@ -962,6 +978,7 @@ where
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> { ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
let (mplace, size) = match place.place { let (mplace, size) = match place.place {
Place::Local { frame, local } => { Place::Local { frame, local } => {
debug!("LocalPlace");
match M::access_local_mut(self, frame, local)? { match M::access_local_mut(self, frame, local)? {
Ok(&mut local_val) => { Ok(&mut local_val) => {
// We need to make an allocation. // We need to make an allocation.
@ -975,9 +992,12 @@ where
let (size, align) = self let (size, align) = self
.size_and_align_of(&meta, &local_layout)? .size_and_align_of(&meta, &local_layout)?
.expect("Cannot allocate for non-dyn-sized type"); .expect("Cannot allocate for non-dyn-sized type");
debug!(?size, ?align);
let ptr = self.allocate_ptr(size, align, MemoryKind::Stack)?; let ptr = self.allocate_ptr(size, align, MemoryKind::Stack)?;
debug!("allocated ptr: {:?}", ptr);
let mplace = MemPlace { ptr: ptr.into(), align, meta }; let mplace = MemPlace { ptr: ptr.into(), align, meta };
if let LocalValue::Live(Operand::Immediate(value)) = local_val { if let LocalValue::Live(Operand::Immediate(value)) = local_val {
debug!("LocalValue::Live: immediate value {:?}", value);
// Preserve old value. // Preserve old value.
// We don't have to validate as we can assume the local // We don't have to validate as we can assume the local
// was already valid for its type. // was already valid for its type.
@ -1037,6 +1057,7 @@ where
} }
/// Writes the discriminant of the given variant. /// Writes the discriminant of the given variant.
#[instrument(skip(self), level = "debug")]
pub fn write_discriminant( pub fn write_discriminant(
&mut self, &mut self,
variant_index: VariantIdx, variant_index: VariantIdx,

View File

@ -418,6 +418,7 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
/// ///
/// It is the caller's responsibility to check bounds and alignment beforehand. /// It is the caller's responsibility to check bounds and alignment beforehand.
/// Most likely, you want to call `InterpCx::write_scalar` instead of this method. /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
#[instrument(skip(self, cx), level = "debug")]
pub fn write_scalar( pub fn write_scalar(
&mut self, &mut self,
cx: &impl HasDataLayout, cx: &impl HasDataLayout,
@ -432,6 +433,7 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
return self.write_uninit(cx, range); return self.write_uninit(cx, range);
} }
}; };
debug!(?val);
// `to_bits_or_ptr_internal` is the right method because we just want to store this data // `to_bits_or_ptr_internal` is the right method because we just want to store this data
// as-is into memory. // as-is into memory.
@ -442,13 +444,16 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
} }
Ok(data) => (data, None), Ok(data) => (data, None),
}; };
debug!(?bytes, ?provenance);
let endian = cx.data_layout().endian; let endian = cx.data_layout().endian;
let dst = self.get_bytes_mut(cx, range)?; let dst = self.get_bytes_mut(cx, range)?;
debug!(?dst);
write_target_uint(endian, dst, bytes).unwrap(); write_target_uint(endian, dst, bytes).unwrap();
// See if we have to also write a relocation. // See if we have to also write a relocation.
if let Some(provenance) = provenance { if let Some(provenance) = provenance {
debug!("insert relocation for {:?}", provenance);
self.relocations.0.insert(range.start, provenance); self.relocations.0.insert(range.start, provenance);
} }

View File

@ -158,7 +158,7 @@ impl Provenance for AllocId {
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)] #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable)] #[derive(HashStable)]
pub struct Pointer<Tag = AllocId> { pub struct Pointer<Tag = AllocId> {
pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Tag` type) pub offset: Size, // FIXME This should probably be private
pub provenance: Tag, pub provenance: Tag,
} }

View File

@ -20,6 +20,9 @@ pub enum ValTree<'tcx> {
/// See the `ScalarInt` documentation for how `ScalarInt` guarantees that equal values /// See the `ScalarInt` documentation for how `ScalarInt` guarantees that equal values
/// of these types have the same representation. /// of these types have the same representation.
Leaf(ScalarInt), Leaf(ScalarInt),
//SliceOrStr(ValSlice<'tcx>),
// dont use SliceOrStr for now
/// The fields of any kind of aggregate. Structs, tuples and arrays are represented by /// The fields of any kind of aggregate. Structs, tuples and arrays are represented by
/// listing their fields' values in order. /// listing their fields' values in order.
/// Enums are represented by storing their discriminant as a field, followed by all /// Enums are represented by storing their discriminant as a field, followed by all