Auto merge of #46436 - eddyb:unpacked, r=arielb1,oli-obk

Detect unaligned fields via `aggregate.align < field.align`, instead of a `packed` flag.

Closes #46423. cc @oli-obk
This commit is contained in:
bors 2017-12-17 15:46:00 +00:00
commit 3cc68bac7c
26 changed files with 517 additions and 780 deletions

View File

@ -10,14 +10,13 @@ mod value;
pub use self::error::{EvalError, EvalResult, EvalErrorKind};
pub use self::value::{PrimVal, PrimValKind, Value, Pointer, PtrAndAlign, bytes_to_f32, bytes_to_f64};
pub use self::value::{PrimVal, PrimValKind, Value, Pointer, bytes_to_f32, bytes_to_f64};
use std::collections::BTreeMap;
use ty::layout::HasDataLayout;
use std::fmt;
use ty::layout;
use mir;
use ty;
use ty::layout::{self, Align, HasDataLayout};
use middle::region;
use std::iter;
@ -166,7 +165,7 @@ pub struct Allocation {
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
pub undef_mask: UndefMask,
/// The alignment of the allocation to detect unaligned reads.
pub align: u64,
pub align: Align,
}
impl Allocation {
@ -177,7 +176,7 @@ impl Allocation {
bytes: slice.to_owned(),
relocations: BTreeMap::new(),
undef_mask,
align: 1,
align: Align::from_bytes(1, 1).unwrap(),
}
}
}

View File

@ -1,30 +1,11 @@
#![allow(unknown_lints)]
use ty::layout::HasDataLayout;
use ty::layout::{Align, HasDataLayout};
use super::{EvalResult, MemoryPointer, PointerArithmetic};
use syntax::ast::FloatTy;
use rustc_const_math::ConstFloat;
#[derive(Copy, Clone, Debug)]
pub struct PtrAndAlign {
pub ptr: Pointer,
/// Remember whether this place is *supposed* to be aligned.
pub aligned: bool,
}
impl PtrAndAlign {
pub fn to_ptr<'tcx>(self) -> EvalResult<'tcx, MemoryPointer> {
self.ptr.to_ptr()
}
pub fn offset<'tcx, C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
Ok(PtrAndAlign {
ptr: self.ptr.offset(i, cx)?,
aligned: self.aligned,
})
}
}
pub fn bytes_to_f32(bits: u128) -> ConstFloat {
ConstFloat {
bits,
@ -50,7 +31,7 @@ pub fn bytes_to_f64(bits: u128) -> ConstFloat {
/// operations and fat pointers. This idea was taken from rustc's trans.
#[derive(Clone, Copy, Debug)]
pub enum Value {
ByRef(PtrAndAlign),
ByRef(Pointer, Align),
ByVal(PrimVal),
ByValPair(PrimVal, PrimVal),
}
@ -182,13 +163,6 @@ pub enum PrimValKind {
Char,
}
impl<'a, 'tcx: 'a> Value {
#[inline]
pub fn by_ref(ptr: Pointer) -> Self {
Value::ByRef(PtrAndAlign { ptr, aligned: true })
}
}
impl<'tcx> PrimVal {
pub fn from_u128(n: u128) -> Self {
PrimVal::Bytes(n)

View File

@ -895,7 +895,7 @@ pub struct InterpretInterner<'tcx> {
/// Allows checking whether a constant already has an allocation
///
/// The pointers are to the beginning of an `alloc_by_id` allocation
alloc_cache: FxHashMap<interpret::GlobalId<'tcx>, interpret::PtrAndAlign>,
alloc_cache: FxHashMap<interpret::GlobalId<'tcx>, interpret::Pointer>,
/// A cache for basic byte allocations keyed by their contents. This is used to deduplicate
/// allocations for string and bytestring literals.
@ -931,14 +931,14 @@ impl<'tcx> InterpretInterner<'tcx> {
pub fn get_cached(
&self,
global_id: interpret::GlobalId<'tcx>,
) -> Option<interpret::PtrAndAlign> {
) -> Option<interpret::Pointer> {
self.alloc_cache.get(&global_id).cloned()
}
pub fn cache(
&mut self,
global_id: interpret::GlobalId<'tcx>,
ptr: interpret::PtrAndAlign,
ptr: interpret::Pointer,
) {
if let Some(old) = self.alloc_cache.insert(global_id, ptr) {
bug!("tried to cache {:?}, but was already existing as {:#?}", global_id, old);

View File

@ -778,7 +778,6 @@ pub enum Abi {
Aggregate {
/// If true, the size is exact, otherwise it's only a lower bound.
sized: bool,
packed: bool
}
}
@ -790,18 +789,7 @@ impl Abi {
Abi::Scalar(_) |
Abi::ScalarPair(..) |
Abi::Vector { .. } => false,
Abi::Aggregate { sized, .. } => !sized
}
}
/// Returns true if the fields of the layout are packed.
pub fn is_packed(&self) -> bool {
match *self {
Abi::Uninhabited |
Abi::Scalar(_) |
Abi::ScalarPair(..) |
Abi::Vector { .. } => false,
Abi::Aggregate { packed, .. } => packed
Abi::Aggregate { sized } => !sized
}
}
}
@ -1077,10 +1065,7 @@ impl<'a, 'tcx> LayoutDetails {
}
let size = min_size.abi_align(align);
let mut abi = Abi::Aggregate {
sized,
packed
};
let mut abi = Abi::Aggregate { sized };
// Unpack newtype ABIs and find scalar pairs.
if sized && size.bytes() > 0 {
@ -1254,10 +1239,7 @@ impl<'a, 'tcx> LayoutDetails {
stride: element.size,
count
},
abi: Abi::Aggregate {
sized: true,
packed: false
},
abi: Abi::Aggregate { sized: true },
align: element.align,
size
})
@ -1270,10 +1252,7 @@ impl<'a, 'tcx> LayoutDetails {
stride: element.size,
count: 0
},
abi: Abi::Aggregate {
sized: false,
packed: false
},
abi: Abi::Aggregate { sized: false },
align: element.align,
size: Size::from_bytes(0)
})
@ -1285,10 +1264,7 @@ impl<'a, 'tcx> LayoutDetails {
stride: Size::from_bytes(1),
count: 0
},
abi: Abi::Aggregate {
sized: false,
packed: false
},
abi: Abi::Aggregate { sized: false },
align: dl.i8_align,
size: Size::from_bytes(0)
})
@ -1302,7 +1278,7 @@ impl<'a, 'tcx> LayoutDetails {
let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
StructKind::AlwaysSized)?;
match unit.abi {
Abi::Aggregate { ref mut sized, .. } => *sized = false,
Abi::Aggregate { ref mut sized } => *sized = false,
_ => bug!()
}
tcx.intern_layout(unit)
@ -1418,10 +1394,7 @@ impl<'a, 'tcx> LayoutDetails {
return Ok(tcx.intern_layout(LayoutDetails {
variants: Variants::Single { index: 0 },
fields: FieldPlacement::Union(variants[0].len()),
abi: Abi::Aggregate {
sized: true,
packed
},
abi: Abi::Aggregate { sized: true },
align,
size: size.abi_align(align)
}));
@ -1525,15 +1498,10 @@ impl<'a, 'tcx> LayoutDetails {
let abi = if offset.bytes() == 0 && niche.value.size(dl) == size {
Abi::Scalar(niche.clone())
} else {
let mut packed = st[i].abi.is_packed();
if offset.abi_align(niche_align) != offset {
packed = true;
niche_align = dl.i8_align;
}
Abi::Aggregate {
sized: true,
packed
}
Abi::Aggregate { sized: true }
};
align = align.max(niche_align);
@ -1681,10 +1649,7 @@ impl<'a, 'tcx> LayoutDetails {
let abi = if discr.value.size(dl) == size {
Abi::Scalar(discr.clone())
} else {
Abi::Aggregate {
sized: true,
packed: false
}
Abi::Aggregate { sized: true }
};
tcx.intern_layout(LayoutDetails {
variants: Variants::Tagged {
@ -2277,11 +2242,6 @@ impl<'a, 'tcx> TyLayout<'tcx> {
self.abi.is_unsized()
}
/// Returns true if the fields of the layout are packed.
pub fn is_packed(&self) -> bool {
self.abi.is_packed()
}
/// Returns true if the type is a ZST and not unsized.
pub fn is_zst(&self) -> bool {
match self.abi {
@ -2289,7 +2249,7 @@ impl<'a, 'tcx> TyLayout<'tcx> {
Abi::Scalar(_) |
Abi::ScalarPair(..) |
Abi::Vector { .. } => false,
Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0
Abi::Aggregate { sized } => sized && self.size.bytes() == 0
}
}
@ -2452,8 +2412,7 @@ impl<'gcx> HashStable<StableHashingContext<'gcx>> for Abi {
element.hash_stable(hcx, hasher);
count.hash_stable(hcx, hasher);
}
Aggregate { packed, sized } => {
packed.hash_stable(hcx, hasher);
Aggregate { sized } => {
sized.hash_stable(hcx, hasher);
}
}

View File

@ -12,8 +12,8 @@ use rustc_data_structures::indexed_vec::Idx;
use syntax::ast::Mutability;
use syntax::codemap::Span;
use rustc::mir::interpret::{EvalResult, EvalError, EvalErrorKind, GlobalId, Value, PrimVal, PtrAndAlign};
use super::{Place, PlaceExtra, EvalContext, StackPopCleanup, ValTy, HasMemory};
use rustc::mir::interpret::{EvalResult, EvalError, EvalErrorKind, GlobalId, Value, Pointer, PrimVal};
use super::{Place, EvalContext, StackPopCleanup, ValTy};
use rustc_const_math::ConstInt;
@ -45,7 +45,7 @@ pub fn eval_body<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
instance: Instance<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> EvalResult<'tcx, (PtrAndAlign, Ty<'tcx>)> {
) -> EvalResult<'tcx, (Pointer, Ty<'tcx>)> {
debug!("eval_body: {:?}, {:?}", instance, param_env);
let limits = super::ResourceLimits::default();
let mut ecx = EvalContext::new(tcx, param_env, limits, CompileTimeEvaluator, ());
@ -66,16 +66,10 @@ pub fn eval_body<'a, 'tcx>(
assert!(!layout.is_unsized());
let ptr = ecx.memory.allocate(
layout.size.bytes(),
layout.align.abi(),
layout.align,
None,
)?;
tcx.interpret_interner.borrow_mut().cache(
cid,
PtrAndAlign {
ptr: ptr.into(),
aligned: !layout.is_packed(),
},
);
tcx.interpret_interner.borrow_mut().cache(cid, ptr.into());
let cleanup = StackPopCleanup::MarkStatic(Mutability::Immutable);
let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id()));
trace!("const_eval: pushing stack frame for global: {}", name);
@ -83,7 +77,7 @@ pub fn eval_body<'a, 'tcx>(
instance,
mir.span,
mir,
Place::from_ptr(ptr),
Place::from_ptr(ptr, layout.align),
cleanup.clone(),
)?;
@ -101,7 +95,7 @@ pub fn eval_body_as_integer<'a, 'tcx>(
let ptr_ty = eval_body(tcx, instance, param_env);
let (ptr, ty) = ptr_ty?;
let ecx = mk_eval_cx(tcx, instance, param_env)?;
let prim = match ecx.read_maybe_aligned(ptr.aligned, |ectx| ectx.try_read_value(ptr.ptr, ty))? {
let prim = match ecx.try_read_value(ptr, ecx.layout_of(ty)?.align, ty)? {
Some(Value::ByVal(prim)) => prim.to_bytes()?,
_ => return err!(TypeNotPrimitive(ty)),
};
@ -363,7 +357,9 @@ pub fn const_eval_provider<'a, 'tcx>(
(_, Err(err)) => Err(err),
(Ok((miri_val, miri_ty)), Ok(ctfe)) => {
let mut ecx = mk_eval_cx(tcx, instance, key.param_env).unwrap();
check_ctfe_against_miri(&mut ecx, miri_val, miri_ty, ctfe.val);
let layout = ecx.layout_of(miri_ty).unwrap();
let miri_place = Place::from_primval_ptr(miri_val, layout.align);
check_ctfe_against_miri(&mut ecx, miri_place, miri_ty, ctfe.val);
Ok(ctfe)
}
}
@ -374,19 +370,20 @@ pub fn const_eval_provider<'a, 'tcx>(
fn check_ctfe_against_miri<'a, 'tcx>(
ecx: &mut EvalContext<'a, 'tcx, CompileTimeEvaluator>,
miri_val: PtrAndAlign,
miri_place: Place,
miri_ty: Ty<'tcx>,
ctfe: ConstVal<'tcx>,
) {
use rustc::middle::const_val::ConstAggregate::*;
use rustc_const_math::ConstFloat;
use rustc::ty::TypeVariants::*;
let miri_val = ValTy {
value: ecx.read_place(miri_place).unwrap(),
ty: miri_ty
};
match miri_ty.sty {
TyInt(int_ty) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let prim = get_prim(ecx, value);
let prim = get_prim(ecx, miri_val);
let c = ConstInt::new_signed_truncating(prim as i128,
int_ty,
ecx.tcx.sess.target.isize_ty);
@ -394,10 +391,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
assert_eq!(c, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", c, ctfe);
},
TyUint(uint_ty) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let prim = get_prim(ecx, value);
let prim = get_prim(ecx, miri_val);
let c = ConstInt::new_unsigned_truncating(prim,
uint_ty,
ecx.tcx.sess.target.usize_ty);
@ -405,18 +399,12 @@ fn check_ctfe_against_miri<'a, 'tcx>(
assert_eq!(c, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", c, ctfe);
},
TyFloat(ty) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let prim = get_prim(ecx, value);
let prim = get_prim(ecx, miri_val);
let f = ConstVal::Float(ConstFloat { bits: prim, ty });
assert_eq!(f, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", f, ctfe);
},
TyBool => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let bits = get_prim(ecx, value);
let bits = get_prim(ecx, miri_val);
if bits > 1 {
bug!("miri evaluated to {}, but expected a bool {:?}", bits, ctfe);
}
@ -424,10 +412,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
assert_eq!(b, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", b, ctfe);
},
TyChar => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let bits = get_prim(ecx, value);
let bits = get_prim(ecx, miri_val);
if let Some(cm) = ::std::char::from_u32(bits as u32) {
assert_eq!(
ConstVal::Char(cm), ctfe,
@ -438,10 +423,8 @@ fn check_ctfe_against_miri<'a, 'tcx>(
}
},
TyStr => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
if let Ok(Some(Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::Bytes(len)))) = value {
let value = ecx.follow_by_ref_value(miri_val.value, miri_val.ty);
if let Ok(Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::Bytes(len))) = value {
let bytes = ecx
.memory
.read_bytes(ptr.into(), len as u64)
@ -465,7 +448,6 @@ fn check_ctfe_against_miri<'a, 'tcx>(
},
TyArray(elem_ty, n) => {
let n = n.val.to_const_int().unwrap().to_u64().unwrap();
let size = ecx.layout_of(elem_ty).unwrap().size.bytes();
let vec: Vec<(ConstVal, Ty<'tcx>)> = match ctfe {
ConstVal::ByteStr(arr) => arr.data.iter().map(|&b| {
(ConstVal::Integral(ConstInt::U8(b)), ecx.tcx.types.u8)
@ -478,10 +460,12 @@ fn check_ctfe_against_miri<'a, 'tcx>(
},
_ => bug!("miri produced {:?}, but ctfe yielded {:?}", miri_ty, ctfe),
};
let layout = ecx.layout_of(miri_ty).unwrap();
for (i, elem) in vec.into_iter().enumerate() {
assert!((i as u64) < n);
let ptr = miri_val.offset(size * i as u64, &ecx).unwrap();
check_ctfe_against_miri(ecx, ptr, elem_ty, elem.0);
let (field_place, _) =
ecx.place_field(miri_place, Field::new(i), layout).unwrap();
check_ctfe_against_miri(ecx, field_place, elem_ty, elem.0);
}
},
TyTuple(..) => {
@ -491,22 +475,22 @@ fn check_ctfe_against_miri<'a, 'tcx>(
};
let layout = ecx.layout_of(miri_ty).unwrap();
for (i, elem) in vec.into_iter().enumerate() {
let offset = layout.fields.offset(i);
let ptr = miri_val.offset(offset.bytes(), &ecx).unwrap();
check_ctfe_against_miri(ecx, ptr, elem.ty, elem.val);
let (field_place, _) =
ecx.place_field(miri_place, Field::new(i), layout).unwrap();
check_ctfe_against_miri(ecx, field_place, elem.ty, elem.val);
}
},
TyAdt(def, _) => {
let (struct_variant, extra) = if def.is_enum() {
let discr = ecx.read_discriminant_value(
Place::Ptr { ptr: miri_val, extra: PlaceExtra::None },
miri_ty).unwrap();
let mut miri_place = miri_place;
let struct_variant = if def.is_enum() {
let discr = ecx.read_discriminant_value(miri_place, miri_ty).unwrap();
let variant = def.discriminants(ecx.tcx).position(|variant_discr| {
variant_discr.to_u128_unchecked() == discr
}).expect("miri produced invalid enum discriminant");
(&def.variants[variant], PlaceExtra::DowncastVariant(variant))
miri_place = ecx.place_downcast(miri_place, variant).unwrap();
&def.variants[variant]
} else {
(def.struct_variant(), PlaceExtra::None)
def.struct_variant()
};
let vec = match ctfe {
ConstVal::Aggregate(Struct(v)) => v,
@ -520,13 +504,9 @@ fn check_ctfe_against_miri<'a, 'tcx>(
let layout = ecx.layout_of(miri_ty).unwrap();
for &(name, elem) in vec.into_iter() {
let field = struct_variant.fields.iter().position(|f| f.name == name).unwrap();
let (place, _) = ecx.place_field(
Place::Ptr { ptr: miri_val, extra },
Field::new(field),
layout,
).unwrap();
let ptr = place.to_ptr_extra_aligned().0;
check_ctfe_against_miri(ecx, ptr, elem.ty, elem.val);
let (field_place, _) =
ecx.place_field(miri_place, Field::new(field), layout).unwrap();
check_ctfe_against_miri(ecx, field_place, elem.ty, elem.val);
}
},
TySlice(_) => bug!("miri produced a slice?"),
@ -546,11 +526,9 @@ fn check_ctfe_against_miri<'a, 'tcx>(
// should be fine
TyFnDef(..) => {}
TyFnPtr(_) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let value = ecx.value_to_primval(miri_val);
let ptr = match value {
Ok(Some(Value::ByVal(PrimVal::Ptr(ptr)))) => ptr,
Ok(PrimVal::Ptr(ptr)) => ptr,
value => bug!("expected fn ptr, got {:?}", value),
};
let inst = ecx.memory.get_fn(ptr).unwrap();
@ -572,13 +550,10 @@ fn check_ctfe_against_miri<'a, 'tcx>(
fn get_prim<'a, 'tcx>(
ecx: &mut EvalContext<'a, 'tcx, CompileTimeEvaluator>,
res: Result<Option<Value>, EvalError<'tcx>>,
val: ValTy<'tcx>,
) -> u128 {
match res {
Ok(Some(Value::ByVal(prim))) => unwrap_miri(ecx, prim.to_bytes()),
Err(err) => unwrap_miri(ecx, Err(err)),
val => bug!("got {:?}", val),
}
let res = ecx.value_to_primval(val).and_then(|prim| prim.to_bytes());
unwrap_miri(ecx, res)
}
fn unwrap_miri<'a, 'tcx, T>(

View File

@ -13,7 +13,7 @@ use rustc_data_structures::indexed_vec::Idx;
use syntax::codemap::{self, DUMMY_SP};
use syntax::ast::Mutability;
use rustc::mir::interpret::{
PtrAndAlign, GlobalId, Value, Pointer, PrimVal, PrimValKind,
GlobalId, Value, Pointer, PrimVal, PrimValKind,
EvalError, EvalResult, EvalErrorKind, MemoryPointer,
};
@ -211,8 +211,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
assert!(!layout.is_unsized(), "cannot alloc memory for unsized type");
let size = layout.size.bytes();
let align = layout.align.abi();
self.memory.allocate(size, align, Some(MemoryKind::Stack))
self.memory.allocate(size, layout.align, Some(MemoryKind::Stack))
}
pub fn memory(&self) -> &Memory<'a, 'tcx, M> {
@ -241,7 +240,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
))
}
pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> {
pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
use rustc::middle::const_val::ConstVal::*;
let primval = match *const_val {
@ -261,11 +260,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Unevaluated(def_id, substs) => {
let instance = self.resolve(def_id, substs)?;
let cid = GlobalId {
return Ok(self.read_global_as_value(GlobalId {
instance,
promoted: None,
};
return Ok(Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(cid).expect("static/const not cached")));
}, self.layout_of(ty)?));
}
Aggregate(..) |
@ -499,7 +497,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
pub fn deallocate_local(&mut self, local: Option<Value>) -> EvalResult<'tcx> {
if let Some(Value::ByRef(ptr)) = local {
if let Some(Value::ByRef(ptr, _align)) = local {
trace!("deallocating local");
let ptr = ptr.to_ptr()?;
self.memory.dump_alloc(ptr.alloc_id);
@ -613,12 +611,12 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let elem_size = self.layout_of(elem_ty)?.size.bytes();
let value = self.eval_operand(operand)?.value;
let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?);
let (dest, dest_align) = self.force_allocation(dest)?.to_ptr_align();
// FIXME: speed up repeat filling
for i in 0..length {
let elem_dest = dest.offset(i * elem_size, &self)?;
self.write_value_to_ptr(value, elem_dest, elem_ty)?;
self.write_value_to_ptr(value, elem_dest, dest_align, elem_ty)?;
}
}
@ -638,12 +636,12 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let src = self.eval_place(place)?;
// We ignore the alignment of the place here -- special handling for packed structs ends
// at the `&` operator.
let (ptr, extra) = self.force_allocation(src)?.to_ptr_extra_aligned();
let (ptr, _align, extra) = self.force_allocation(src)?.to_ptr_align_extra();
let val = match extra {
PlaceExtra::None => ptr.ptr.to_value(),
PlaceExtra::Length(len) => ptr.ptr.to_value_with_len(len),
PlaceExtra::Vtable(vtable) => ptr.ptr.to_value_with_vtable(vtable),
PlaceExtra::None => ptr.to_value(),
PlaceExtra::Length(len) => ptr.to_value_with_len(len),
PlaceExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable),
PlaceExtra::DowncastVariant(..) => {
bug!("attempted to take a reference to an enum downcast place")
}
@ -678,7 +676,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
match kind {
Unsize => {
let src = self.eval_operand(operand)?;
self.unsize_into(src.value, src.ty, dest, dest_ty)?;
let src_layout = self.layout_of(src.ty)?;
let dst_layout = self.layout_of(dest_ty)?;
self.unsize_into(src.value, src_layout, dest, dst_layout)?;
}
Misc => {
@ -831,14 +831,13 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
use rustc::mir::Literal;
let mir::Constant { ref literal, .. } = **constant;
let value = match *literal {
Literal::Value { ref value } => self.const_to_value(&value.val)?,
Literal::Value { ref value } => self.const_to_value(&value.val, ty)?,
Literal::Promoted { index } => {
let cid = GlobalId {
self.read_global_as_value(GlobalId {
instance: self.frame().instance,
promoted: Some(index),
};
Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(cid).expect("promoted not cached"))
}, self.layout_of(ty)?)
}
};
@ -950,17 +949,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Ok(())
}
pub fn read_global_as_value(&self, gid: GlobalId) -> Value {
Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(gid).expect("global not cached"))
}
fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> {
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(), "cannot copy from an unsized type");
let size = layout.size.bytes();
let align = layout.align.abi();
self.memory.copy(src, dest, size, align, false)?;
Ok(())
pub fn read_global_as_value(&self, gid: GlobalId, layout: TyLayout) -> Value {
Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(gid).expect("global not cached"),
layout.align)
}
pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> {
@ -969,20 +960,23 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// -1 since we don't store the return value
match self.stack[frame].locals[local.index() - 1] {
None => return err!(DeadLocal),
Some(Value::ByRef(ptr)) => {
Some(Value::ByRef(ptr, align)) => {
Place::Ptr {
ptr,
align,
extra: PlaceExtra::None,
}
}
Some(val) => {
let ty = self.stack[frame].mir.local_decls[local].ty;
let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
let layout = self.layout_of(ty)?;
let ptr = self.alloc_ptr(ty)?;
self.stack[frame].locals[local.index() - 1] =
Some(Value::by_ref(ptr.into())); // it stays live
self.write_value_to_ptr(val, ptr.into(), ty)?;
Place::from_ptr(ptr)
Some(Value::ByRef(ptr.into(), layout.align)); // it stays live
let place = Place::from_ptr(ptr, layout.align);
self.write_value(ValTy { value: val, ty }, place)?;
place
}
}
}
@ -998,8 +992,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
ty: Ty<'tcx>,
) -> EvalResult<'tcx, Value> {
match value {
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty))
Value::ByRef(ptr, align) => {
self.read_value(ptr, align, ty)
}
other => Ok(other),
}
@ -1054,15 +1048,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// correct if we never look at this data with the wrong type.
match dest {
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned },
extra,
} => {
Place::Ptr { ptr, align, extra } => {
assert_eq!(extra, PlaceExtra::None);
self.write_maybe_aligned_mut(
aligned,
|ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty),
)
self.write_value_to_ptr(src_val, ptr, align, dest_ty)
}
Place::Local { frame, local } => {
@ -1085,11 +1073,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
old_dest_val: Value,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
if let Value::ByRef(PtrAndAlign {
ptr: dest_ptr,
aligned,
}) = old_dest_val
{
if let Value::ByRef(dest_ptr, align) = old_dest_val {
// If the value is already `ByRef` (that is, backed by an `Allocation`),
// then we must write the new value into this allocation, because there may be
// other pointers into the allocation. These other pointers are logically
@ -1097,15 +1081,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
//
// Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
// knew for certain that there were no outstanding pointers to this allocation.
self.write_maybe_aligned_mut(aligned, |ectx| {
ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty)
})?;
} else if let Value::ByRef(PtrAndAlign {
ptr: src_ptr,
aligned,
}) = src_val
{
self.write_value_to_ptr(src_val, dest_ptr, align, dest_ty)?;
} else if let Value::ByRef(src_ptr, align) = src_val {
// If the value is not `ByRef`, then we know there are no pointers to it
// and we can simply overwrite the `Value` in the locals array directly.
//
@ -1117,17 +1094,14 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// It is a valid optimization to attempt reading a primitive value out of the
// source and write that into the destination without making an allocation, so
// we do so here.
self.read_maybe_aligned_mut(aligned, |ectx| {
if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) {
write_dest(ectx, src_val)?;
} else {
let dest_ptr = ectx.alloc_ptr(dest_ty)?.into();
ectx.copy(src_ptr, dest_ptr, dest_ty)?;
write_dest(ectx, Value::by_ref(dest_ptr))?;
}
Ok(())
})?;
if let Ok(Some(src_val)) = self.try_read_value(src_ptr, align, dest_ty) {
write_dest(self, src_val)?;
} else {
let dest_ptr = self.alloc_ptr(dest_ty)?.into();
let layout = self.layout_of(dest_ty)?;
self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size.bytes(), false)?;
write_dest(self, Value::ByRef(dest_ptr, layout.align))?;
}
} else {
// Finally, we have the simple case where neither source nor destination are
// `ByRef`. We may simply copy the source value over the the destintion.
@ -1140,61 +1114,38 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
&mut self,
value: Value,
dest: Pointer,
dest_align: Align,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
trace!("write_value_to_ptr: {:#?}", value);
let layout = self.layout_of(dest_ty)?;
match value {
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty))
Value::ByRef(ptr, align) => {
self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size.bytes(), false)
}
Value::ByVal(primval) => {
let layout = self.layout_of(dest_ty)?;
if layout.is_zst() {
assert!(primval.is_undef());
Ok(())
} else {
// TODO: Do we need signedness?
self.memory.write_maybe_aligned_mut(!layout.is_packed(), |mem| {
mem.write_primval(dest.to_ptr()?, primval, layout.size.bytes(), false)
})
match layout.abi {
layout::Abi::Scalar(_) => {}
_ if primval.is_undef() => {}
_ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout)
}
// TODO: Do we need signedness?
self.memory.write_primval(dest.to_ptr()?, dest_align, primval, layout.size.bytes(), false)
}
Value::ByValPair(a, b) => {
Value::ByValPair(a_val, b_val) => {
let ptr = dest.to_ptr()?;
let mut layout = self.layout_of(dest_ty)?;
trace!("write_value_to_ptr valpair: {:#?}", layout);
let mut packed = layout.is_packed();
'outer: loop {
for i in 0..layout.fields.count() {
let field = layout.field(&self, i)?;
if layout.fields.offset(i).bytes() == 0 && layout.size == field.size {
layout = field;
packed |= layout.is_packed();
continue 'outer;
}
}
break;
}
trace!("write_value_to_ptr valpair: {:#?}", layout);
assert_eq!(layout.fields.count(), 2);
let field_0 = layout.field(&self, 0)?;
let field_1 = layout.field(&self, 1)?;
trace!("write_value_to_ptr field 0: {:#?}", field_0);
trace!("write_value_to_ptr field 1: {:#?}", field_1);
assert_eq!(
field_0.is_packed(),
field_1.is_packed(),
"the two fields must agree on being packed"
);
packed |= field_0.is_packed();
let field_0_ptr = ptr.offset(layout.fields.offset(0).bytes(), &self)?.into();
let field_1_ptr = ptr.offset(layout.fields.offset(1).bytes(), &self)?.into();
let (a, b) = match layout.abi {
layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
_ => bug!("write_value_to_ptr: invalid ByValPair layout: {:#?}", layout)
};
let (a_size, b_size) = (a.size(&self), b.size(&self));
let a_ptr = ptr;
let b_offset = a_size.abi_align(b.align(&self));
let b_ptr = ptr.offset(b_offset.bytes(), &self)?.into();
// TODO: What about signedess?
self.memory.write_maybe_aligned_mut(!packed, |mem| {
mem.write_primval(field_0_ptr, a, field_0.size.bytes(), false)?;
mem.write_primval(field_1_ptr, b, field_1.size.bytes(), false)
})?;
Ok(())
self.memory.write_primval(a_ptr, dest_align, a_val, a_size.bytes(), false)?;
self.memory.write_primval(b_ptr, dest_align, b_val, b_size.bytes(), false)
}
}
}
@ -1277,8 +1228,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
}
pub fn read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
if let Some(val) = self.try_read_value(ptr, ty)? {
pub fn read_value(&self, ptr: Pointer, align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
if let Some(val) = self.try_read_value(ptr, align, ty)? {
Ok(val)
} else {
bug!("primitive read failed for type: {:?}", ty);
@ -1288,10 +1239,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub(crate) fn read_ptr(
&self,
ptr: MemoryPointer,
ptr_align: Align,
pointee_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Value> {
let ptr_size = self.memory.pointer_size();
let p : Pointer = self.memory.read_ptr_sized_unsigned(ptr)?.into();
let p: Pointer = self.memory.read_ptr_sized_unsigned(ptr, ptr_align)?.into();
if self.type_is_sized(pointee_ty) {
Ok(p.to_value())
} else {
@ -1299,23 +1251,23 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let extra = ptr.offset(ptr_size, self)?;
match self.tcx.struct_tail(pointee_ty).sty {
ty::TyDynamic(..) => Ok(p.to_value_with_vtable(
self.memory.read_ptr_sized_unsigned(extra)?.to_ptr()?,
self.memory.read_ptr_sized_unsigned(extra, ptr_align)?.to_ptr()?,
)),
ty::TySlice(..) | ty::TyStr => Ok(
p.to_value_with_len(self.memory.read_ptr_sized_unsigned(extra)?.to_bytes()? as u64),
p.to_value_with_len(self.memory.read_ptr_sized_unsigned(extra, ptr_align)?.to_bytes()? as u64),
),
_ => bug!("unsized primval ptr read from {:?}", pointee_ty),
}
}
}
pub fn try_read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
pub fn try_read_value(&self, ptr: Pointer, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
use syntax::ast::FloatTy;
let ptr = ptr.to_ptr()?;
let val = match ty.sty {
ty::TyBool => {
let val = self.memory.read_primval(ptr, 1, false)?;
let val = self.memory.read_primval(ptr, ptr_align, 1, false)?;
let val = match val {
PrimVal::Bytes(0) => false,
PrimVal::Bytes(1) => true,
@ -1325,7 +1277,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
PrimVal::from_bool(val)
}
ty::TyChar => {
let c = self.memory.read_primval(ptr, 4, false)?.to_bytes()? as u32;
let c = self.memory.read_primval(ptr, ptr_align, 4, false)?.to_bytes()? as u32;
match ::std::char::from_u32(c) {
Some(ch) => PrimVal::from_char(ch),
None => return err!(InvalidChar(c as u128)),
@ -1342,7 +1294,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
I128 => 16,
Is => self.memory.pointer_size(),
};
self.memory.read_primval(ptr, size, true)?
self.memory.read_primval(ptr, ptr_align, size, true)?
}
ty::TyUint(uint_ty) => {
@ -1355,19 +1307,23 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
U128 => 16,
Us => self.memory.pointer_size(),
};
self.memory.read_primval(ptr, size, false)?
self.memory.read_primval(ptr, ptr_align, size, false)?
}
ty::TyFloat(FloatTy::F32) => PrimVal::Bytes(self.memory.read_primval(ptr, 4, false)?.to_bytes()?),
ty::TyFloat(FloatTy::F64) => PrimVal::Bytes(self.memory.read_primval(ptr, 8, false)?.to_bytes()?),
ty::TyFloat(FloatTy::F32) => {
PrimVal::Bytes(self.memory.read_primval(ptr, ptr_align, 4, false)?.to_bytes()?)
}
ty::TyFloat(FloatTy::F64) => {
PrimVal::Bytes(self.memory.read_primval(ptr, ptr_align, 8, false)?.to_bytes()?)
}
ty::TyFnPtr(_) => self.memory.read_ptr_sized_unsigned(ptr)?,
ty::TyFnPtr(_) => self.memory.read_ptr_sized_unsigned(ptr, ptr_align)?,
ty::TyRef(_, ref tam) |
ty::TyRawPtr(ref tam) => return self.read_ptr(ptr, tam.ty).map(Some),
ty::TyRawPtr(ref tam) => return self.read_ptr(ptr, ptr_align, tam.ty).map(Some),
ty::TyAdt(def, _) => {
if def.is_box() {
return self.read_ptr(ptr, ty.boxed_ty()).map(Some);
return self.read_ptr(ptr, ptr_align, ty.boxed_ty()).map(Some);
}
if let layout::Abi::Scalar(ref scalar) = self.layout_of(ty)?.abi {
@ -1376,7 +1332,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
signed = s;
}
let size = scalar.value.size(self).bytes();
self.memory.read_primval(ptr, size, signed)?
self.memory.read_primval(ptr, ptr_align, size, signed)?
} else {
return Ok(None);
}
@ -1462,92 +1418,62 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
fn unsize_into(
&mut self,
src: Value,
src_ty: Ty<'tcx>,
src_layout: TyLayout<'tcx>,
dst: Place,
dst_ty: Ty<'tcx>,
dst_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx> {
let src_layout = self.layout_of(src_ty)?;
let dst_layout = self.layout_of(dst_ty)?;
match (&src_ty.sty, &dst_ty.sty) {
match (&src_layout.ty.sty, &dst_layout.ty.sty) {
(&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) |
(&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) |
(&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => {
self.unsize_into_ptr(src, src_ty, dst, dst_ty, s.ty, d.ty)
self.unsize_into_ptr(src, src_layout.ty, dst, dst_layout.ty, s.ty, d.ty)
}
(&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
assert_eq!(def_a, def_b);
if def_a.is_box() || def_b.is_box() {
if !def_a.is_box() || !def_b.is_box() {
panic!("invalid unsizing between {:?} -> {:?}", src_ty, dst_ty);
bug!("invalid unsizing between {:?} -> {:?}", src_layout, dst_layout);
}
return self.unsize_into_ptr(
src,
src_ty,
src_layout.ty,
dst,
dst_ty,
src_ty.boxed_ty(),
dst_ty.boxed_ty(),
dst_layout.ty,
src_layout.ty.boxed_ty(),
dst_layout.ty.boxed_ty(),
);
}
if self.ty_to_primval_kind(src_ty).is_ok() {
// TODO: We ignore the packed flag here
let sty = src_layout.field(&self, 0)?.ty;
let dty = dst_layout.field(&self, 0)?.ty;
return self.unsize_into(src, sty, dst, dty);
}
// unsizing of generic struct with pointer fields
// Example: `Arc<T>` -> `Arc<Trait>`
// here we need to increase the size of every &T thin ptr field to a fat ptr
assert_eq!(def_a, def_b);
let src_ptr = match src {
Value::ByRef(PtrAndAlign { ptr, aligned: true }) => ptr,
// the entire struct is just a pointer
Value::ByVal(_) => {
for i in 0..src_layout.fields.count() {
let src_field = src_layout.field(&self, i)?;
let dst_field = dst_layout.field(&self, i)?;
if dst_layout.is_zst() {
continue;
}
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
assert_eq!(src_field.size, src_layout.size);
assert_eq!(dst_field.size, dst_layout.size);
return self.unsize_into(
src,
src_field.ty,
dst,
dst_field.ty,
);
}
bug!("by val unsize into where the value doesn't cover the entire type")
}
// TODO: Is it possible for unaligned pointers to occur here?
_ => bug!("expected aligned pointer, got {:?}", src),
};
// FIXME(solson)
let dst = self.force_allocation(dst)?.to_ptr()?;
for i in 0..src_layout.fields.count() {
let src_field = src_layout.field(&self, i)?;
let dst_field = dst_layout.field(&self, i)?;
let (dst_f_place, dst_field) =
self.place_field(dst, mir::Field::new(i), dst_layout)?;
if dst_field.is_zst() {
continue;
}
let src_field_offset = src_layout.fields.offset(i).bytes();
let dst_field_offset = dst_layout.fields.offset(i).bytes();
let src_f_ptr = src_ptr.offset(src_field_offset, &self)?;
let dst_f_ptr = dst.offset(dst_field_offset, &self)?;
let (src_f_value, src_field) = match src {
Value::ByRef(ptr, align) => {
let src_place = Place::from_primval_ptr(ptr, align);
let (src_f_place, src_field) =
self.place_field(src_place, mir::Field::new(i), src_layout)?;
(self.read_place(src_f_place)?, src_field)
}
Value::ByVal(_) | Value::ByValPair(..) => {
let src_field = src_layout.field(&self, i)?;
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(src_field.size, src_layout.size);
(src, src_field)
}
};
if src_field.ty == dst_field.ty {
self.copy(src_f_ptr, dst_f_ptr.into(), src_field.ty)?;
self.write_value(ValTy {
value: src_f_value,
ty: src_field.ty,
}, dst_f_place)?;
} else {
self.unsize_into(
Value::by_ref(src_f_ptr),
src_field.ty,
Place::from_ptr(dst_f_ptr),
dst_field.ty,
)?;
self.unsize_into(src_f_value, src_field, dst_f_place, dst_field)?;
}
}
Ok(())
@ -1555,8 +1481,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
_ => {
bug!(
"unsize_into: invalid conversion: {:?} -> {:?}",
src_ty,
dst_ty
src_layout,
dst_layout
)
}
}
@ -1580,11 +1506,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Err(err) => {
panic!("Failed to access local: {:?}", err);
}
Ok(Value::ByRef(PtrAndAlign { ptr, aligned })) => {
Ok(Value::ByRef(ptr, align)) => {
match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => {
write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " })
.unwrap();
write!(msg, " by align({}) ref:", align.abi()).unwrap();
allocs.push(ptr.alloc_id);
}
ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
@ -1610,10 +1535,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
trace!("{}", msg);
self.memory.dump_allocs(allocs);
}
Place::Ptr { ptr: PtrAndAlign { ptr, aligned }, .. } => {
Place::Ptr { ptr, align, .. } => {
match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => {
trace!("by {}ref:", if aligned { "" } else { "unaligned " });
trace!("by align({}) ref:", align.abi());
self.memory.dump_alloc(ptr.alloc_id);
}
ptr => trace!(" integral by ref: {:?}", ptr),

View File

@ -1,13 +1,12 @@
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
use std::{ptr, mem, io};
use std::cell::Cell;
use rustc::ty::{Instance, TyCtxt};
use rustc::ty::layout::{self, TargetDataLayout};
use rustc::ty::layout::{self, Align, TargetDataLayout};
use syntax::ast::Mutability;
use rustc::mir::interpret::{MemoryPointer, AllocId, Allocation, AccessKind, UndefMask, PtrAndAlign, Value, Pointer,
use rustc::mir::interpret::{MemoryPointer, AllocId, Allocation, AccessKind, UndefMask, Value, Pointer,
EvalResult, PrimVal, EvalErrorKind};
use super::{EvalContext, Machine};
@ -51,11 +50,6 @@ pub struct Memory<'a, 'tcx: 'a, M: Machine<'tcx>> {
/// Maximum number of virtual bytes that may be allocated.
memory_size: u64,
/// To avoid having to pass flags to every single memory access, we have some global state saying whether
/// alignment checking is currently enforced for read and/or write accesses.
reads_are_aligned: Cell<bool>,
writes_are_aligned: Cell<bool>,
/// The current stack frame. Used to check accesses against locks.
pub cur_frame: usize,
@ -72,8 +66,6 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
tcx,
memory_size: max_memory,
memory_usage: 0,
reads_are_aligned: Cell::new(true),
writes_are_aligned: Cell::new(true),
cur_frame: usize::max_value(),
}
}
@ -98,12 +90,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub fn allocate(
&mut self,
size: u64,
align: u64,
align: Align,
kind: Option<MemoryKind<M::MemoryKinds>>,
) -> EvalResult<'tcx, MemoryPointer> {
assert_ne!(align, 0);
assert!(align.is_power_of_two());
if self.memory_size - self.memory_usage < size {
return err!(OutOfMemory {
allocation_size: size,
@ -139,13 +128,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
&mut self,
ptr: MemoryPointer,
old_size: u64,
old_align: u64,
old_align: Align,
new_size: u64,
new_align: u64,
new_align: Align,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, MemoryPointer> {
use std::cmp::min;
if ptr.offset != 0 {
return err!(ReallocateNonBasePtr);
}
@ -163,9 +150,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
let new_ptr = self.allocate(new_size, new_align, Some(kind))?;
self.copy(
ptr.into(),
old_align,
new_ptr.into(),
min(old_size, new_size),
min(old_align, new_align),
new_align,
old_size.min(new_size),
/*nonoverlapping*/
true,
)?;
@ -190,7 +178,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub fn deallocate(
&mut self,
ptr: MemoryPointer,
size_and_align: Option<(u64, u64)>,
size_and_align: Option<(u64, Align)>,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx> {
if ptr.offset != 0 {
@ -236,7 +224,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
}
if let Some((size, align)) = size_and_align {
if size != alloc.bytes.len() as u64 || align != alloc.align {
return err!(IncorrectAllocationInformation(size, alloc.bytes.len(), align, alloc.align));
return err!(IncorrectAllocationInformation(size, alloc.bytes.len(), align.abi(), alloc.align.abi()));
}
}
@ -255,7 +243,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
}
/// Check that the pointer is aligned AND non-NULL.
pub fn check_align(&self, ptr: Pointer, align: u64, access: Option<AccessKind>) -> EvalResult<'tcx> {
pub fn check_align(&self, ptr: Pointer, required_align: Align) -> EvalResult<'tcx> {
// Check non-NULL/Undef, extract offset
let (offset, alloc_align) = match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => {
@ -267,32 +255,24 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
if v == 0 {
return err!(InvalidNullPointerUsage);
}
(v, align) // the base address if the "integer allocation" is 0 and hence always aligned
// the base address if the "integer allocation" is 0 and hence always aligned
(v, required_align)
}
PrimVal::Undef => return err!(ReadUndefBytes),
};
// See if alignment checking is disabled
let enforce_alignment = match access {
Some(AccessKind::Read) => self.reads_are_aligned.get(),
Some(AccessKind::Write) => self.writes_are_aligned.get(),
None => true,
};
if !enforce_alignment {
return Ok(());
}
// Check alignment
if alloc_align < align {
if alloc_align.abi() < required_align.abi() {
return err!(AlignmentCheckFailed {
has: alloc_align,
required: align,
has: alloc_align.abi(),
required: required_align.abi(),
});
}
if offset % align == 0 {
if offset % required_align.abi() == 0 {
Ok(())
} else {
err!(AlignmentCheckFailed {
has: offset % align,
required: align,
has: offset % required_align.abi(),
required: required_align.abi(),
})
}
}
@ -437,7 +417,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
"{}({} bytes, alignment {}){}",
msg,
alloc.bytes.len(),
alloc.align,
alloc.align.abi(),
immutable
);
@ -482,10 +462,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
&self,
ptr: MemoryPointer,
size: u64,
align: u64,
align: Align,
) -> EvalResult<'tcx, &[u8]> {
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
self.check_align(ptr.into(), align, Some(AccessKind::Read))?;
self.check_align(ptr.into(), align)?;
if size == 0 {
return Ok(&[]);
}
@ -502,10 +482,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
&mut self,
ptr: MemoryPointer,
size: u64,
align: u64,
align: Align,
) -> EvalResult<'tcx, &mut [u8]> {
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
self.check_align(ptr.into(), align, Some(AccessKind::Write))?;
self.check_align(ptr.into(), align)?;
if size == 0 {
return Ok(&mut []);
}
@ -518,7 +498,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
Ok(&mut alloc.bytes[offset..offset + size as usize])
}
fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: Align) -> EvalResult<'tcx, &[u8]> {
assert_ne!(size, 0);
if self.relocations(ptr, size)?.count() != 0 {
return err!(ReadPointerAsBytes);
@ -531,7 +511,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
&mut self,
ptr: MemoryPointer,
size: u64,
align: u64,
align: Align,
) -> EvalResult<'tcx, &mut [u8]> {
assert_ne!(size, 0);
self.clear_relocations(ptr, size)?;
@ -629,14 +609,15 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub fn copy(
&mut self,
src: Pointer,
src_align: Align,
dest: Pointer,
dest_align: Align,
size: u64,
align: u64,
nonoverlapping: bool,
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be aligned
self.check_align(src, align, Some(AccessKind::Read))?;
self.check_align(dest, align, Some(AccessKind::Write))?;
self.check_align(src, src_align)?;
self.check_align(dest, dest_align)?;
if size == 0 {
return Ok(());
}
@ -655,8 +636,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
})
.collect();
let src_bytes = self.get_bytes_unchecked(src, size, align)?.as_ptr();
let dest_bytes = self.get_bytes_mut(dest, size, align)?.as_mut_ptr();
let src_bytes = self.get_bytes_unchecked(src, size, src_align)?.as_ptr();
let dest_bytes = self.get_bytes_mut(dest, size, dest_align)?.as_mut_ptr();
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
@ -705,41 +686,44 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
self.check_align(ptr, 1, Some(AccessKind::Read))?;
let align = Align::from_bytes(1, 1).unwrap();
self.check_align(ptr, align)?;
if size == 0 {
return Ok(&[]);
}
self.get_bytes(ptr.to_ptr()?, size, 1)
self.get_bytes(ptr.to_ptr()?, size, align)
}
pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
self.check_align(ptr, 1, Some(AccessKind::Write))?;
let align = Align::from_bytes(1, 1).unwrap();
self.check_align(ptr, align)?;
if src.is_empty() {
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?;
let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, align)?;
bytes.clone_from_slice(src);
Ok(())
}
pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
self.check_align(ptr, 1, Some(AccessKind::Write))?;
let align = Align::from_bytes(1, 1).unwrap();
self.check_align(ptr, align)?;
if count == 0 {
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?;
let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?;
for b in bytes {
*b = val;
}
Ok(())
}
pub fn read_primval(&self, ptr: MemoryPointer, size: u64, signed: bool) -> EvalResult<'tcx, PrimVal> {
pub fn read_primval(&self, ptr: MemoryPointer, ptr_align: Align, size: u64, signed: bool) -> EvalResult<'tcx, PrimVal> {
self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
let endianess = self.endianess();
let bytes = self.get_bytes_unchecked(ptr, size, self.int_align(size))?;
let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?;
// Undef check happens *after* we established that the alignment is correct.
// We must not return Ok() for unaligned pointers!
if self.check_defined(ptr, size).is_err() {
@ -767,11 +751,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
Ok(PrimVal::Bytes(bytes))
}
pub fn read_ptr_sized_unsigned(&self, ptr: MemoryPointer) -> EvalResult<'tcx, PrimVal> {
self.read_primval(ptr, self.pointer_size(), false)
pub fn read_ptr_sized_unsigned(&self, ptr: MemoryPointer, ptr_align: Align) -> EvalResult<'tcx, PrimVal> {
self.read_primval(ptr, ptr_align, self.pointer_size(), false)
}
pub fn write_primval(&mut self, ptr: MemoryPointer, val: PrimVal, size: u64, signed: bool) -> EvalResult<'tcx> {
pub fn write_primval(&mut self, ptr: MemoryPointer, ptr_align: Align, val: PrimVal, size: u64, signed: bool) -> EvalResult<'tcx> {
let endianess = self.endianess();
let bytes = match val {
@ -802,7 +786,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
{
let align = self.int_align(size);
let dst = self.get_bytes_mut(ptr, size, align)?;
let dst = self.get_bytes_mut(ptr, size, ptr_align.min(align))?;
if signed {
write_target_int(endianess, dst, bytes as i128).unwrap();
} else {
@ -824,22 +808,23 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
Ok(())
}
pub fn write_ptr_sized_unsigned(&mut self, ptr: MemoryPointer, val: PrimVal) -> EvalResult<'tcx> {
pub fn write_ptr_sized_unsigned(&mut self, ptr: MemoryPointer, ptr_align: Align, val: PrimVal) -> EvalResult<'tcx> {
let ptr_size = self.pointer_size();
self.write_primval(ptr, val, ptr_size, false)
self.write_primval(ptr, ptr_align, val, ptr_size, false)
}
fn int_align(&self, size: u64) -> u64 {
fn int_align(&self, size: u64) -> Align {
// We assume pointer-sized integers have the same alignment as pointers.
// We also assume signed and unsigned integers of the same size have the same alignment.
match size {
1 => self.tcx.data_layout.i8_align.abi(),
2 => self.tcx.data_layout.i16_align.abi(),
4 => self.tcx.data_layout.i32_align.abi(),
8 => self.tcx.data_layout.i64_align.abi(),
16 => self.tcx.data_layout.i128_align.abi(),
let ity = match size {
1 => layout::I8,
2 => layout::I16,
4 => layout::I32,
8 => layout::I64,
16 => layout::I128,
_ => bug!("bad integer size: {}", size),
}
};
ity.align(self)
}
}
@ -1004,43 +989,6 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M>;
fn memory(&self) -> &Memory<'a, 'tcx, M>;
// These are not supposed to be overriden.
fn read_maybe_aligned<F, T>(&self, aligned: bool, f: F) -> EvalResult<'tcx, T>
where
F: FnOnce(&Self) -> EvalResult<'tcx, T>,
{
let old = self.memory().reads_are_aligned.get();
// Do alignment checking if *all* nested calls say it has to be aligned.
self.memory().reads_are_aligned.set(old && aligned);
let t = f(self);
self.memory().reads_are_aligned.set(old);
t
}
fn read_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
where
F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
{
let old = self.memory().reads_are_aligned.get();
// Do alignment checking if *all* nested calls say it has to be aligned.
self.memory().reads_are_aligned.set(old && aligned);
let t = f(self);
self.memory().reads_are_aligned.set(old);
t
}
fn write_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
where
F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
{
let old = self.memory().writes_are_aligned.get();
// Do alignment checking if *all* nested calls say it has to be aligned.
self.memory().writes_are_aligned.set(old && aligned);
let t = f(self);
self.memory().writes_are_aligned.set(old);
t
}
/// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef,
/// this may have to perform a load.
fn into_ptr(
@ -1048,8 +996,8 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
value: Value,
) -> EvalResult<'tcx, Pointer> {
Ok(match value {
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
self.memory().read_maybe_aligned(aligned, |mem| mem.read_ptr_sized_unsigned(ptr.to_ptr()?))?
Value::ByRef(ptr, align) => {
self.memory().read_ptr_sized_unsigned(ptr.to_ptr()?, align)?
}
Value::ByVal(ptr) |
Value::ByValPair(ptr, _) => ptr,
@ -1061,17 +1009,14 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
value: Value,
) -> EvalResult<'tcx, (Pointer, MemoryPointer)> {
match value {
Value::ByRef(PtrAndAlign {
ptr: ref_ptr,
aligned,
}) => {
self.memory().read_maybe_aligned(aligned, |mem| {
let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into();
let vtable = mem.read_ptr_sized_unsigned(
ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
)?.to_ptr()?;
Ok((ptr, vtable))
})
Value::ByRef(ref_ptr, align) => {
let mem = self.memory();
let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?, align)?.into();
let vtable = mem.read_ptr_sized_unsigned(
ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
align
)?.to_ptr()?;
Ok((ptr, vtable))
}
Value::ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)),
@ -1086,17 +1031,14 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
value: Value,
) -> EvalResult<'tcx, (Pointer, u64)> {
match value {
Value::ByRef(PtrAndAlign {
ptr: ref_ptr,
aligned,
}) => {
self.memory().read_maybe_aligned(aligned, |mem| {
let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into();
let len = mem.read_ptr_sized_unsigned(
ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
)?.to_bytes()? as u64;
Ok((ptr, len))
})
Value::ByRef(ref_ptr, align) => {
let mem = self.memory();
let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?, align)?.into();
let len = mem.read_ptr_sized_unsigned(
ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
align
)?.to_bytes()? as u64;
Ok((ptr, len))
}
Value::ByValPair(ptr, val) => {
let len = val.to_u128()?;

View File

@ -1,10 +1,9 @@
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{LayoutOf, TyLayout};
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use rustc_data_structures::indexed_vec::Idx;
use rustc::mir::interpret::{GlobalId, PtrAndAlign};
use rustc::mir::interpret::{Value, PrimVal, EvalResult, Pointer, MemoryPointer};
use rustc::mir::interpret::{GlobalId, Value, PrimVal, EvalResult, Pointer, MemoryPointer};
use super::{EvalContext, Machine, ValTy};
use interpret::memory::HasMemory;
@ -15,7 +14,8 @@ pub enum Place {
/// An place may have an invalid (integral or undef) pointer,
/// since it might be turned back into a reference
/// before ever being dereferenced.
ptr: PtrAndAlign,
ptr: Pointer,
align: Align,
extra: PlaceExtra,
},
@ -35,34 +35,38 @@ pub enum PlaceExtra {
impl<'tcx> Place {
/// Produces an Place that will error if attempted to be read from
pub fn undef() -> Self {
Self::from_primval_ptr(PrimVal::Undef.into())
Self::from_primval_ptr(PrimVal::Undef.into(), Align::from_bytes(1, 1).unwrap())
}
pub fn from_primval_ptr(ptr: Pointer) -> Self {
pub fn from_primval_ptr(ptr: Pointer, align: Align) -> Self {
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned: true },
ptr,
align,
extra: PlaceExtra::None,
}
}
pub fn from_ptr(ptr: MemoryPointer) -> Self {
Self::from_primval_ptr(ptr.into())
pub fn from_ptr(ptr: MemoryPointer, align: Align) -> Self {
Self::from_primval_ptr(ptr.into(), align)
}
pub fn to_ptr_extra_aligned(self) -> (PtrAndAlign, PlaceExtra) {
pub fn to_ptr_align_extra(self) -> (Pointer, Align, PlaceExtra) {
match self {
Place::Ptr { ptr, extra } => (ptr, extra),
Place::Ptr { ptr, align, extra } => (ptr, align, extra),
_ => bug!("to_ptr_and_extra: expected Place::Ptr, got {:?}", self),
}
}
pub fn to_ptr_align(self) -> (Pointer, Align) {
let (ptr, align, _extra) = self.to_ptr_align_extra();
(ptr, align)
}
pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
let (ptr, extra) = self.to_ptr_extra_aligned();
// At this point, we forget about the alignment information -- the place has been turned into a reference,
// and no matter where it came from, it now must be aligned.
assert_eq!(extra, PlaceExtra::None);
ptr.to_ptr()
self.to_ptr_align().0.to_ptr()
}
pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) {
@ -102,13 +106,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// Directly reading a static will always succeed
Static(ref static_) => {
let instance = ty::Instance::mono(self.tcx, static_.def_id);
let cid = GlobalId {
Ok(Some(self.read_global_as_value(GlobalId {
instance,
promoted: None,
};
Ok(Some(Value::ByRef(
self.tcx.interpret_interner.borrow().get_cached(cid).expect("global not cached"),
)))
}, self.layout_of(self.place_ty(place))?)))
}
Projection(ref proj) => self.try_read_place_projection(proj),
}
@ -169,9 +170,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub fn read_place(&self, place: Place) -> EvalResult<'tcx, Value> {
match place {
Place::Ptr { ptr, extra } => {
Place::Ptr { ptr, align, extra } => {
assert_eq!(extra, PlaceExtra::None);
Ok(Value::ByRef(ptr))
Ok(Value::ByRef(ptr, align))
}
Place::Local { frame, local } => self.stack[frame].get_local(local),
}
@ -192,8 +193,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
instance,
promoted: None,
};
let layout = self.layout_of(self.place_ty(mir_place))?;
Place::Ptr {
ptr: self.tcx.interpret_interner.borrow().get_cached(gid).expect("uncached global"),
align: layout.align,
extra: PlaceExtra::None,
}
}
@ -229,18 +232,18 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let offset = base_layout.fields.offset(field_index);
// Do not allocate in trivial cases
let (base_ptr, base_extra) = match base {
Place::Ptr { ptr, extra } => (ptr, extra),
let (base_ptr, base_align, base_extra) = match base {
Place::Ptr { ptr, align, extra } => (ptr, align, extra),
Place::Local { frame, local } => {
match self.stack[frame].get_local(local)? {
match (&self.stack[frame].get_local(local)?, &base_layout.abi) {
// in case the field covers the entire type, just return the value
Value::ByVal(_) if offset.bytes() == 0 &&
field.size == base_layout.size => {
(&Value::ByVal(_), &layout::Abi::Scalar(_)) |
(&Value::ByValPair(..), &layout::Abi::ScalarPair(..))
if offset.bytes() == 0 && field.size == base_layout.size =>
{
return Ok((base, field));
}
Value::ByRef { .. } |
Value::ByValPair(..) |
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
_ => self.force_allocation(base)?.to_ptr_align_extra(),
}
}
};
@ -249,18 +252,15 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
PlaceExtra::Vtable(tab) => {
let (_, align) = self.size_and_align_of_dst(
base_layout.ty,
base_ptr.ptr.to_value_with_vtable(tab),
base_ptr.to_value_with_vtable(tab),
)?;
offset.abi_align(align).bytes()
}
_ => offset.bytes(),
};
let mut ptr = base_ptr.offset(offset, &self)?;
// if we were unaligned, stay unaligned
// no matter what we were, if we are packed, we must not be aligned anymore
ptr.aligned &= !base_layout.is_packed();
let ptr = base_ptr.offset(offset, &self)?;
let align = base_align.min(base_layout.align).min(field.align);
let extra = if !field.is_unsized() {
PlaceExtra::None
} else {
@ -275,26 +275,29 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
base_extra
};
Ok((Place::Ptr { ptr, extra }, field))
Ok((Place::Ptr { ptr, align, extra }, field))
}
pub fn val_to_place(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Place> {
let layout = self.layout_of(ty)?;
Ok(match self.tcx.struct_tail(ty).sty {
ty::TyDynamic(..) => {
let (ptr, vtable) = self.into_ptr_vtable_pair(val)?;
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned: true },
ptr,
align: layout.align,
extra: PlaceExtra::Vtable(vtable),
}
}
ty::TyStr | ty::TySlice(_) => {
let (ptr, len) = self.into_slice(val)?;
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned: true },
ptr,
align: layout.align,
extra: PlaceExtra::Length(len),
}
}
_ => Place::from_primval_ptr(self.into_ptr(val)?),
_ => Place::from_primval_ptr(self.into_ptr(val)?, layout.align),
})
}
@ -306,7 +309,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
) -> EvalResult<'tcx, Place> {
// Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (base_ptr, align) = base.to_ptr_align();
let (elem_ty, len) = base.elem_ty_and_len(outer_ty);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
@ -319,6 +322,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let ptr = base_ptr.offset(n * elem_size, &*self)?;
Ok(Place::Ptr {
ptr,
align,
extra: PlaceExtra::None,
})
}
@ -330,9 +334,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
) -> EvalResult<'tcx, Place> {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (ptr, _) = base.to_ptr_extra_aligned();
let (ptr, align) = base.to_ptr_align();
let extra = PlaceExtra::DowncastVariant(variant);
Ok(Place::Ptr { ptr, extra })
Ok(Place::Ptr { ptr, align, extra })
}
pub fn eval_place_projection(
@ -342,14 +346,14 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>,
) -> EvalResult<'tcx, Place> {
use rustc::mir::ProjectionElem::*;
let (ptr, extra) = match *proj_elem {
match *proj_elem {
Field(field, _) => {
let layout = self.layout_of(base_ty)?;
return Ok(self.place_field(base, field, layout)?.0);
Ok(self.place_field(base, field, layout)?.0)
}
Downcast(_, variant) => {
return self.place_downcast(base, variant);
self.place_downcast(base, variant)
}
Deref => {
@ -364,14 +368,14 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
trace!("deref to {} on {:?}", pointee_type, val);
return self.val_to_place(val, pointee_type);
self.val_to_place(val, pointee_type)
}
Index(local) => {
let value = self.frame().get_local(local)?;
let ty = self.tcx.types.usize;
let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?;
return self.place_index(base, base_ty, n);
self.place_index(base, base_ty, n)
}
ConstantIndex {
@ -381,7 +385,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
} => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (base_ptr, align) = base.to_ptr_align();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
@ -394,13 +398,13 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
};
let ptr = base_ptr.offset(index * elem_size, &self)?;
(ptr, PlaceExtra::None)
Ok(Place::Ptr { ptr, align, extra: PlaceExtra::None })
}
Subslice { from, to } => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (base_ptr, align) = base.to_ptr_align();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
@ -412,11 +416,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
} else {
PlaceExtra::Length(n - u64::from(to) - u64::from(from))
};
(ptr, extra)
Ok(Place::Ptr { ptr, align, extra })
}
};
Ok(Place::Ptr { ptr, extra })
}
}
pub fn place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {

View File

@ -8,7 +8,7 @@ use rustc::mir;
use rustc::ty::{self, Instance};
use rustc::ty::layout::LayoutOf;
use rustc::middle::const_val::ConstVal;
use rustc::mir::interpret::{PtrAndAlign, GlobalId};
use rustc::mir::interpret::GlobalId;
use rustc::mir::interpret::{EvalResult, EvalErrorKind};
use super::{EvalContext, StackPopCleanup, Place, Machine};
@ -179,16 +179,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
assert!(!layout.is_unsized());
let ptr = self.memory.allocate(
layout.size.bytes(),
layout.align.abi(),
layout.align,
None,
)?;
self.tcx.interpret_interner.borrow_mut().cache(
cid,
PtrAndAlign {
ptr: ptr.into(),
aligned: !layout.is_packed(),
},
);
self.tcx.interpret_interner.borrow_mut().cache(cid, ptr.into());
let internally_mutable = !layout.ty.is_freeze(self.tcx, self.param_env, span);
let mutability = if mutability == Mutability::Mutable || internally_mutable {
Mutability::Mutable
@ -203,7 +197,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
instance,
span,
mir,
Place::from_ptr(ptr),
Place::from_ptr(ptr, layout.align),
cleanup,
)?;
Ok(true)
@ -270,22 +264,16 @@ impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b,
assert!(!layout.is_unsized());
let ptr = this.ecx.memory.allocate(
layout.size.bytes(),
layout.align.abi(),
layout.align,
None,
)?;
this.ecx.tcx.interpret_interner.borrow_mut().cache(
cid,
PtrAndAlign {
ptr: ptr.into(),
aligned: !layout.is_packed(),
},
);
this.ecx.tcx.interpret_interner.borrow_mut().cache(cid, ptr.into());
trace!("pushing stack frame for {:?}", index);
this.ecx.push_stack_frame(
this.instance,
constant.span,
mir,
Place::from_ptr(ptr),
Place::from_ptr(ptr, layout.align),
StackPopCleanup::MarkStatic(Mutability::Immutable),
)?;
Ok(true)

View File

@ -21,16 +21,19 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let val = match self.force_allocation(place)? {
Place::Ptr {
ptr,
align: _,
extra: PlaceExtra::Vtable(vtable),
} => ptr.ptr.to_value_with_vtable(vtable),
} => ptr.to_value_with_vtable(vtable),
Place::Ptr {
ptr,
align: _,
extra: PlaceExtra::Length(len),
} => ptr.ptr.to_value_with_len(len),
} => ptr.to_value_with_len(len),
Place::Ptr {
ptr,
align: _,
extra: PlaceExtra::None,
} => ptr.ptr.to_value(),
} => ptr.to_value(),
_ => bug!("force_allocation broken"),
};
self.drop(val, instance, ty, span, target)

View File

@ -4,7 +4,7 @@ use rustc::ty::layout::LayoutOf;
use syntax::codemap::Span;
use syntax::abi::Abi;
use rustc::mir::interpret::{PtrAndAlign, EvalResult, PrimVal, Value};
use rustc::mir::interpret::{EvalResult, PrimVal, Value};
use super::{EvalContext, eval_context,
Place, Machine, ValTy};
@ -327,15 +327,12 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
if let ty::TyTuple(..) = args[1].ty.sty {
if self.frame().mir.args_iter().count() == layout.fields.count() + 1 {
match args[1].value {
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
assert!(
aligned,
"Unaligned ByRef-values cannot occur as function arguments"
);
Value::ByRef(ptr, align) => {
for (i, arg_local) in arg_locals.enumerate() {
let field = layout.field(&self, i)?;
let offset = layout.fields.offset(i).bytes();
let arg = Value::by_ref(ptr.offset(offset, &self)?);
let arg = Value::ByRef(ptr.offset(offset, &self)?,
align.min(field.align));
let dest =
self.eval_place(&mir::Place::Local(arg_local))?;
trace!(
@ -403,9 +400,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?;
let fn_ptr = self.memory.read_ptr_sized_unsigned(
vtable.offset(ptr_size * (idx as u64 + 3), &self)?
vtable.offset(ptr_size * (idx as u64 + 3), &self)?,
ptr_align
)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
let mut args = args.to_vec();

View File

@ -26,28 +26,29 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let align = layout.align.abi();
let ptr_size = self.memory.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let methods = self.tcx.vtable_methods(trait_ref);
let vtable = self.memory.allocate(
ptr_size * (3 + methods.len() as u64),
ptr_size,
ptr_align,
None,
)?;
let drop = eval_context::resolve_drop_in_place(self.tcx, ty);
let drop = self.memory.create_fn_alloc(drop);
self.memory.write_ptr_sized_unsigned(vtable, PrimVal::Ptr(drop))?;
self.memory.write_ptr_sized_unsigned(vtable, ptr_align, PrimVal::Ptr(drop))?;
let size_ptr = vtable.offset(ptr_size, &self)?;
self.memory.write_ptr_sized_unsigned(size_ptr, PrimVal::Bytes(size as u128))?;
self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, PrimVal::Bytes(size as u128))?;
let align_ptr = vtable.offset(ptr_size * 2, &self)?;
self.memory.write_ptr_sized_unsigned(align_ptr, PrimVal::Bytes(align as u128))?;
self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, PrimVal::Bytes(align as u128))?;
for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method {
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance);
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
self.memory.write_ptr_sized_unsigned(method_ptr, PrimVal::Ptr(fn_ptr))?;
self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, PrimVal::Ptr(fn_ptr))?;
}
}
@ -64,7 +65,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
vtable: MemoryPointer,
) -> EvalResult<'tcx, Option<ty::Instance<'tcx>>> {
// we don't care about the pointee type, we just want a pointer
match self.read_ptr(vtable, self.tcx.mk_nil_ptr())? {
let pointer_align = self.tcx.data_layout.pointer_align;
match self.read_ptr(vtable, pointer_align, self.tcx.mk_nil_ptr())? {
// some values don't need to call a drop impl, so the value is null
Value::ByVal(PrimVal::Bytes(0)) => Ok(None),
Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some),
@ -77,9 +79,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
vtable: MemoryPointer,
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory.pointer_size();
let size = self.memory.read_ptr_sized_unsigned(vtable.offset(pointer_size, self)?)?.to_bytes()? as u64;
let pointer_align = self.tcx.data_layout.pointer_align;
let size = self.memory.read_ptr_sized_unsigned(vtable.offset(pointer_size, self)?, pointer_align)?.to_bytes()? as u64;
let align = self.memory.read_ptr_sized_unsigned(
vtable.offset(pointer_size * 2, self)?
vtable.offset(pointer_size * 2, self)?,
pointer_align
)?.to_bytes()? as u64;
Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap()))
}

View File

@ -30,7 +30,7 @@ use cabi_sparc64;
use cabi_nvptx;
use cabi_nvptx64;
use cabi_hexagon;
use mir::place::{Alignment, PlaceRef};
use mir::place::PlaceRef;
use mir::operand::OperandValue;
use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
@ -561,14 +561,14 @@ impl<'a, 'tcx> ArgType<'tcx> {
}
let ccx = bcx.ccx;
if self.is_indirect() {
OperandValue::Ref(val, Alignment::AbiAligned).store(bcx, dst)
OperandValue::Ref(val, self.layout.align).store(bcx, dst)
} else if let PassMode::Cast(cast) = self.mode {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_dst = bcx.pointercast(dst.llval, cast.llvm_type(ccx).ptr_to());
bcx.store(val, cast_dst, Some(self.layout.align));
bcx.store(val, cast_dst, self.layout.align);
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
@ -585,19 +585,20 @@ impl<'a, 'tcx> ArgType<'tcx> {
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
let llscratch = bcx.alloca(cast.llvm_type(ccx), "abi_cast", cast.align(ccx));
let scratch_size = cast.size(ccx);
let scratch_align = cast.align(ccx);
let llscratch = bcx.alloca(cast.llvm_type(ccx), "abi_cast", scratch_align);
bcx.lifetime_start(llscratch, scratch_size);
// ...where we first store the value...
bcx.store(val, llscratch, None);
bcx.store(val, llscratch, scratch_align);
// ...and then memcpy it to the intended destination.
base::call_memcpy(bcx,
bcx.pointercast(dst.llval, Type::i8p(ccx)),
bcx.pointercast(llscratch, Type::i8p(ccx)),
C_usize(ccx, self.layout.size.bytes()),
self.layout.align.min(cast.align(ccx)));
self.layout.align.min(scratch_align));
bcx.lifetime_end(llscratch, scratch_size);
}

View File

@ -316,7 +316,7 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
if src_f.layout.ty == dst_f.layout.ty {
memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f.layout,
(src_f.alignment | dst_f.alignment).non_abi());
src_f.align.min(dst_f.align));
} else {
coerce_unsized_into(bcx, src_f, dst_f);
}
@ -430,14 +430,13 @@ pub fn memcpy_ty<'a, 'tcx>(
dst: ValueRef,
src: ValueRef,
layout: TyLayout<'tcx>,
align: Option<Align>,
align: Align,
) {
let size = layout.size.bytes();
if size == 0 {
return;
}
let align = align.unwrap_or(layout.align);
call_memcpy(bcx, dst, src, C_usize(bcx.ccx, size), align);
}

View File

@ -518,13 +518,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
pub fn load(&self, ptr: ValueRef, align: Option<Align>) -> ValueRef {
pub fn load(&self, ptr: ValueRef, align: Align) -> ValueRef {
self.count_insn("load");
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
if let Some(align) = align {
llvm::LLVMSetAlignment(load, align.abi() as c_uint);
}
llvm::LLVMSetAlignment(load, align.abi() as c_uint);
load
}
}
@ -573,16 +571,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option<Align>) -> ValueRef {
pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Align) -> ValueRef {
debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
assert!(!self.llbuilder.is_null());
self.count_insn("store");
let ptr = self.check_store(val, ptr);
unsafe {
let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
if let Some(align) = align {
llvm::LLVMSetAlignment(store, align.abi() as c_uint);
}
llvm::LLVMSetAlignment(store, align.abi() as c_uint);
store
}
}

View File

@ -69,7 +69,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let field_ty = layout.field(ccx, i).ty;
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
let (unsized_size, mut unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
@ -81,6 +81,13 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
// Return the sum of sizes and max of aligns.
let size = bcx.add(sized_size, unsized_size);
// Packed types ignore the alignment of their fields.
if let ty::TyAdt(def, _) = t.sty {
if def.repr.packed() {
unsized_align = sized_align;
}
}
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let align = match (const_to_opt_u128(sized_align, false),

View File

@ -14,7 +14,7 @@ use intrinsics::{self, Intrinsic};
use llvm;
use llvm::{ValueRef};
use abi::{Abi, FnType, PassMode};
use mir::place::{PlaceRef, Alignment};
use mir::place::PlaceRef;
use mir::operand::{OperandRef, OperandValue};
use base::*;
use common::*;
@ -106,7 +106,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let name = &*tcx.item_name(def_id);
let llret_ty = ccx.layout_of(ret_ty).llvm_type(ccx);
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, Alignment::AbiAligned);
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align);
let simple = get_simple_intrinsic(ccx, name);
let llval = match name {
@ -254,7 +254,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
bcx.volatile_store(b, dst.project_field(bcx, 1).llval);
} else {
let val = if let OperandValue::Ref(ptr, align) = args[1].val {
bcx.load(ptr, align.non_abi())
bcx.load(ptr, align)
} else {
if dst.layout.is_zst() {
return;
@ -330,9 +330,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let overflow = bcx.zext(bcx.extract_value(pair, 1), Type::bool(ccx));
let dest = result.project_field(bcx, 0);
bcx.store(val, dest.llval, dest.alignment.non_abi());
bcx.store(val, dest.llval, dest.align);
let dest = result.project_field(bcx, 1);
bcx.store(overflow, dest.llval, dest.alignment.non_abi());
bcx.store(overflow, dest.llval, dest.align);
return;
},
@ -473,9 +473,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let success = bcx.zext(bcx.extract_value(pair, 1), Type::bool(bcx.ccx));
let dest = result.project_field(bcx, 0);
bcx.store(val, dest.llval, dest.alignment.non_abi());
bcx.store(val, dest.llval, dest.align);
let dest = result.project_field(bcx, 1);
bcx.store(success, dest.llval, dest.alignment.non_abi());
bcx.store(success, dest.llval, dest.align);
return;
} else {
return invalid_monomorphization(ty);
@ -544,7 +544,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let tp_ty = substs.type_at(0);
let dst = args[0].deref(bcx.ccx);
let val = if let OperandValue::Ref(ptr, align) = args[1].val {
bcx.load(ptr, align.non_abi())
bcx.load(ptr, align)
} else {
from_immediate(bcx, args[1].immediate())
};
@ -677,7 +677,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
for i in 0..elems.len() {
let dest = result.project_field(bcx, i);
let val = bcx.extract_value(val, i as u64);
bcx.store(val, dest.llval, dest.alignment.non_abi());
bcx.store(val, dest.llval, dest.align);
}
return;
}
@ -688,8 +688,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
if !fn_ty.ret.is_ignore() {
if let PassMode::Cast(ty) = fn_ty.ret.mode {
let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to());
bcx.store(llval, ptr, Some(ccx.align_of(ret_ty)));
let ptr = bcx.pointercast(result.llval, ty.llvm_type(ccx).ptr_to());
bcx.store(llval, ptr, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(bcx, llval, result.layout)
.val.store(bcx, result);
@ -758,7 +758,8 @@ fn try_intrinsic<'a, 'tcx>(
) {
if bcx.sess().no_landing_pads() {
bcx.call(func, &[data], None);
bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None);
let ptr_align = bcx.tcx().data_layout.pointer_align;
bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, ptr_align);
} else if wants_msvc_seh(bcx.sess()) {
trans_msvc_try(bcx, ccx, func, data, local_ptr, dest);
} else {
@ -833,7 +834,8 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
//
// More information can be found in libstd's seh.rs implementation.
let i64p = Type::i64(ccx).ptr_to();
let slot = bcx.alloca(i64p, "slot", ccx.data_layout().pointer_align);
let ptr_align = bcx.tcx().data_layout.pointer_align;
let slot = bcx.alloca(i64p, "slot", ptr_align);
bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
None);
@ -848,13 +850,15 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
None => bug!("msvc_try_filter not defined"),
};
let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(ccx, 0), slot]);
let addr = catchpad.load(slot, None);
let arg1 = catchpad.load(addr, None);
let addr = catchpad.load(slot, ptr_align);
let i64_align = bcx.tcx().data_layout.i64_align;
let arg1 = catchpad.load(addr, i64_align);
let val1 = C_i32(ccx, 1);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), None);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
let local_ptr = catchpad.bitcast(local_ptr, i64p);
catchpad.store(arg1, local_ptr, None);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), None);
catchpad.store(arg1, local_ptr, i64_align);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
catchpad.catch_ret(tok, caught.llbb());
caught.ret(C_i32(ccx, 1));
@ -863,7 +867,8 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bcx.call(llfn, &[func, data, local_ptr], None);
bcx.store(ret, dest, None);
let i32_align = bcx.tcx().data_layout.i32_align;
bcx.store(ret, dest, i32_align);
}
// Definition of the standard "try" function for Rust using the GNU-like model
@ -923,14 +928,16 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.llfn());
catch.add_clause(vals, C_null(Type::i8p(ccx)));
let ptr = catch.extract_value(vals, 0);
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None);
let ptr_align = bcx.tcx().data_layout.pointer_align;
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), ptr_align);
catch.ret(C_i32(ccx, 1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bcx.call(llfn, &[func, data, local_ptr], None);
bcx.store(ret, dest, None);
let i32_align = bcx.tcx().data_layout.i32_align;
bcx.store(ret, dest, i32_align);
}
// Helper function to give a Block to a closure to translate a shim function.

View File

@ -40,7 +40,8 @@ impl<'a, 'tcx> VirtualIndex {
debug!("get_fn({:?}, {:?})", Value(llvtable), self);
let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.ccx).ptr_to().ptr_to());
let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None);
let ptr_align = bcx.tcx().data_layout.pointer_align;
let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), ptr_align);
bcx.nonnull_metadata(ptr);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
@ -52,7 +53,8 @@ impl<'a, 'tcx> VirtualIndex {
debug!("get_int({:?}, {:?})", Value(llvtable), self);
let llvtable = bcx.pointercast(llvtable, Type::isize(bcx.ccx).ptr_to());
let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None);
let usize_align = bcx.tcx().data_layout.pointer_align;
let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), usize_align);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
ptr

View File

@ -31,7 +31,7 @@ use syntax_pos::Pos;
use super::{MirContext, LocalRef};
use super::constant::Const;
use super::place::{Alignment, PlaceRef};
use super::place::PlaceRef;
use super::operand::OperandRef;
use super::operand::OperandValue::{Pair, Ref, Immediate};
@ -216,7 +216,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
PassMode::Direct(_) | PassMode::Pair(..) => {
let op = self.trans_consume(&bcx, &mir::Place::Local(mir::RETURN_PLACE));
if let Ref(llval, align) = op.val {
bcx.load(llval, align.non_abi())
bcx.load(llval, align)
} else {
op.immediate_or_packed_pair(&bcx)
}
@ -228,7 +228,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
LocalRef::Operand(None) => bug!("use of return before def"),
LocalRef::Place(tr_place) => {
OperandRef {
val: Ref(tr_place.llval, tr_place.alignment),
val: Ref(tr_place.llval, tr_place.align),
layout: tr_place.layout
}
}
@ -240,14 +240,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
scratch.llval
}
Ref(llval, align) => {
assert_eq!(align, Alignment::AbiAligned,
assert_eq!(align.abi(), op.layout.align.abi(),
"return place is unaligned!");
llval
}
};
bcx.load(
bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()),
Some(self.fn_ty.ret.layout.align))
self.fn_ty.ret.layout.align)
}
};
bcx.ret(llval);
@ -579,7 +579,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
(&mir::Operand::Constant(_), Ref(..)) => {
let tmp = PlaceRef::alloca(&bcx, op.layout, "const");
op.val.store(&bcx, tmp);
op.val = Ref(tmp.llval, tmp.alignment);
op.val = Ref(tmp.llval, tmp.align);
}
_ => {}
}
@ -639,38 +639,40 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
PassMode::Indirect(_) | PassMode::Cast(_) => {
let scratch = PlaceRef::alloca(bcx, arg.layout, "arg");
op.val.store(bcx, scratch);
(scratch.llval, Alignment::AbiAligned, true)
(scratch.llval, scratch.align, true)
}
_ => {
(op.immediate_or_packed_pair(bcx), Alignment::AbiAligned, false)
(op.immediate_or_packed_pair(bcx), arg.layout.align, false)
}
}
}
Ref(llval, align @ Alignment::Packed(_)) if arg.is_indirect() => {
// `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.
Ref(llval, align) => {
if arg.is_indirect() && align.abi() < arg.layout.align.abi() {
// `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.
let scratch = PlaceRef::alloca(bcx, arg.layout, "arg");
base::memcpy_ty(bcx, scratch.llval, llval, op.layout, align.non_abi());
(scratch.llval, Alignment::AbiAligned, true)
let scratch = PlaceRef::alloca(bcx, arg.layout, "arg");
base::memcpy_ty(bcx, scratch.llval, llval, op.layout, align);
(scratch.llval, scratch.align, true)
} else {
(llval, align, true)
}
}
Ref(llval, align) => (llval, align, true)
};
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if let PassMode::Cast(ty) = arg.mode {
llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()),
(align | Alignment::Packed(arg.layout.align))
.non_abi());
align.min(arg.layout.align));
} else {
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
// used for this call is passing it by-value. In that case,
// the load would just produce `OperandValue::Ref` instead
// of the `OperandValue::Immediate` we need for the call.
llval = bcx.load(llval, align.non_abi());
llval = bcx.load(llval, align);
if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
if scalar.is_bool() {
bcx.range_metadata(llval, 0..2);
@ -820,21 +822,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
self.trans_place(bcx, dest)
};
if fn_ret.is_indirect() {
match dest.alignment {
Alignment::AbiAligned => {
llargs.push(dest.llval);
ReturnDest::Nothing
},
Alignment::Packed(_) => {
// Currently, MIR code generation does not create calls
// that store directly to fields of packed structs (in
// fact, the calls it creates write only to temps),
//
// If someone changes that, please update this code path
// to create a temporary.
span_bug!(self.mir.span, "can't directly store to unaligned value");
}
if dest.align.abi() < dest.layout.align.abi() {
// Currently, MIR code generation does not create calls
// that store directly to fields of packed structs (in
// fact, the calls it creates write only to temps),
//
// If someone changes that, please update this code path
// to create a temporary.
span_bug!(self.mir.span, "can't directly store to unaligned value");
}
llargs.push(dest.llval);
ReturnDest::Nothing
} else {
ReturnDest::Store(dest)
}
@ -874,8 +872,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let llty = src.layout.llvm_type(bcx.ccx);
let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
let align = src.layout.align.min(dst.layout.align);
src.val.store(bcx,
PlaceRef::new_sized(cast_ptr, src.layout, Alignment::Packed(align)));
src.val.store(bcx, PlaceRef::new_sized(cast_ptr, src.layout, align));
}

View File

@ -42,7 +42,6 @@ use syntax::ast;
use std::fmt;
use std::ptr;
use super::place::Alignment;
use super::operand::{OperandRef, OperandValue};
use super::MirContext;
@ -182,12 +181,12 @@ impl<'a, 'tcx> Const<'tcx> {
let align = ccx.align_of(self.ty);
let ptr = consts::addr_of(ccx, self.llval, align, "const");
OperandValue::Ref(consts::ptrcast(ptr, layout.llvm_type(ccx).ptr_to()),
Alignment::AbiAligned)
layout.align)
};
OperandRef {
val,
layout: ccx.layout_of(self.ty)
layout
}
}
}
@ -1134,12 +1133,14 @@ fn trans_const_adt<'a, 'tcx>(
if let layout::FieldPlacement::Union(_) = l.fields {
assert_eq!(variant_index, 0);
assert_eq!(vals.len(), 1);
let (field_size, field_align) = ccx.size_and_align_of(vals[0].ty);
let contents = [
vals[0].llval,
padding(ccx, l.size - ccx.size_of(vals[0].ty))
padding(ccx, l.size - field_size)
];
Const::new(C_struct(ccx, &contents, l.is_packed()), t)
let packed = l.align.abi() < field_align.abi();
Const::new(C_struct(ccx, &contents, packed), t)
} else {
if let layout::Abi::Vector { .. } = l.abi {
if let layout::FieldPlacement::Array { .. } = l.fields {
@ -1232,28 +1233,33 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
// offset of current value
let mut packed = false;
let mut offset = Size::from_bytes(0);
let mut cfields = Vec::new();
cfields.reserve(discr.is_some() as usize + 1 + layout.fields.count() * 2);
if let Some(discr) = discr {
let (field_size, field_align) = ccx.size_and_align_of(discr.ty);
packed |= layout.align.abi() < field_align.abi();
cfields.push(discr.llval);
offset = ccx.size_of(discr.ty);
offset = field_size;
}
let parts = layout.fields.index_by_increasing_offset().map(|i| {
(vals[i], layout.fields.offset(i))
});
for (val, target_offset) in parts {
let (field_size, field_align) = ccx.size_and_align_of(val.ty);
packed |= layout.align.abi() < field_align.abi();
cfields.push(padding(ccx, target_offset - offset));
cfields.push(val.llval);
offset = target_offset + ccx.size_of(val.ty);
offset = target_offset + field_size;
}
// Pad to the size of the whole type, not e.g. the variant.
cfields.push(padding(ccx, ccx.size_of(layout.ty) - offset));
Const::new(C_struct(ccx, &cfields, layout.is_packed()), layout.ty)
Const::new(C_struct(ccx, &cfields, packed), layout.ty)
}
fn padding(ccx: &CrateContext, size: Size) -> ValueRef {

View File

@ -35,7 +35,7 @@ use rustc_data_structures::indexed_vec::{IndexVec, Idx};
pub use self::constant::trans_static_initializer;
use self::analyze::CleanupKind;
use self::place::{Alignment, PlaceRef};
use self::place::PlaceRef;
use rustc::mir::traversal;
use self::operand::{OperandRef, OperandValue};
@ -279,9 +279,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
if local == mir::RETURN_PLACE && mircx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local);
let llretptr = llvm::get_param(llfn, 0);
LocalRef::Place(PlaceRef::new_sized(llretptr,
layout,
Alignment::AbiAligned))
LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align))
} else if memory_locals.contains(local.index()) {
debug!("alloc: {:?} -> place", local);
LocalRef::Place(PlaceRef::alloca(&bcx, layout, &format!("{:?}", local)))
@ -474,7 +472,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(llarg, &name);
llarg_idx += 1;
PlaceRef::new_sized(llarg, arg.layout, Alignment::AbiAligned)
PlaceRef::new_sized(llarg, arg.layout, arg.layout.align)
} else {
let tmp = PlaceRef::alloca(bcx, arg.layout, &name);
arg.store_fn_arg(bcx, &mut llarg_idx, tmp);
@ -532,11 +530,11 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// doesn't actually strip the offset when splitting the closure
// environment into its components so it ends up out of bounds.
let env_ptr = if !env_ref {
let alloc = PlaceRef::alloca(bcx,
let scratch = PlaceRef::alloca(bcx,
bcx.ccx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
"__debuginfo_env_ptr");
bcx.store(place.llval, alloc.llval, None);
alloc.llval
bcx.store(place.llval, scratch.llval, scratch.align);
scratch.llval
} else {
place.llval
};

View File

@ -10,7 +10,7 @@
use llvm::ValueRef;
use rustc::ty;
use rustc::ty::layout::{self, LayoutOf, TyLayout};
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
@ -25,7 +25,7 @@ use std::fmt;
use std::ptr;
use super::{MirContext, LocalRef};
use super::place::{Alignment, PlaceRef};
use super::place::PlaceRef;
/// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a
@ -34,7 +34,7 @@ use super::place::{Alignment, PlaceRef};
pub enum OperandValue {
/// A reference to the actual operand. The data is guaranteed
/// to be valid for the operand's lifetime.
Ref(ValueRef, Alignment),
Ref(ValueRef, Align),
/// A single LLVM value.
Immediate(ValueRef),
/// A pair of immediate LLVM values. Used by fat pointers too.
@ -107,11 +107,12 @@ impl<'a, 'tcx> OperandRef<'tcx> {
OperandValue::Pair(llptr, llextra) => (llptr, llextra),
OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self)
};
let layout = ccx.layout_of(projected_ty);
PlaceRef {
llval: llptr,
llextra,
layout: ccx.layout_of(projected_ty),
alignment: Alignment::AbiAligned,
layout,
align: layout.align,
}
}
@ -222,9 +223,9 @@ impl<'a, 'tcx> OperandValue {
match self {
OperandValue::Ref(r, source_align) =>
base::memcpy_ty(bcx, dest.llval, r, dest.layout,
(source_align | dest.alignment).non_abi()),
source_align.min(dest.align)),
OperandValue::Immediate(s) => {
bcx.store(base::from_immediate(bcx, s), dest.llval, dest.alignment.non_abi());
bcx.store(base::from_immediate(bcx, s), dest.llval, dest.align);
}
OperandValue::Pair(a, b) => {
for (i, &x) in [a, b].iter().enumerate() {
@ -233,7 +234,7 @@ impl<'a, 'tcx> OperandValue {
if common::val_ty(x) == Type::i1(bcx.ccx) {
llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
}
bcx.store(base::from_immediate(bcx, x), llptr, dest.alignment.non_abi());
bcx.store(base::from_immediate(bcx, x), llptr, dest.align);
}
}
}

View File

@ -24,54 +24,10 @@ use value::Value;
use glue;
use std::ptr;
use std::ops;
use super::{MirContext, LocalRef};
use super::operand::{OperandRef, OperandValue};
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Alignment {
Packed(Align),
AbiAligned,
}
impl ops::BitOr for Alignment {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
match (self, rhs) {
(Alignment::Packed(a), Alignment::Packed(b)) => {
Alignment::Packed(a.min(b))
}
(Alignment::Packed(x), _) | (_, Alignment::Packed(x)) => {
Alignment::Packed(x)
}
(Alignment::AbiAligned, Alignment::AbiAligned) => {
Alignment::AbiAligned
}
}
}
}
impl<'a> From<TyLayout<'a>> for Alignment {
fn from(layout: TyLayout) -> Self {
if layout.is_packed() {
Alignment::Packed(layout.align)
} else {
Alignment::AbiAligned
}
}
}
impl Alignment {
pub fn non_abi(self) -> Option<Align> {
match self {
Alignment::Packed(x) => Some(x),
Alignment::AbiAligned => None,
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct PlaceRef<'tcx> {
/// Pointer to the contents of the place
@ -83,20 +39,20 @@ pub struct PlaceRef<'tcx> {
/// Monomorphized type of this place, including variant information
pub layout: TyLayout<'tcx>,
/// Whether this place is known to be aligned according to its layout
pub alignment: Alignment,
/// What alignment we know for this place
pub align: Align,
}
impl<'a, 'tcx> PlaceRef<'tcx> {
pub fn new_sized(llval: ValueRef,
layout: TyLayout<'tcx>,
alignment: Alignment)
align: Align)
-> PlaceRef<'tcx> {
PlaceRef {
llval,
llextra: ptr::null_mut(),
layout,
alignment
align
}
}
@ -104,7 +60,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
-> PlaceRef<'tcx> {
debug!("alloca({:?}: {:?})", name, layout);
let tmp = bcx.alloca(layout.llvm_type(bcx.ccx), name, layout.align);
Self::new_sized(tmp, layout, Alignment::AbiAligned)
Self::new_sized(tmp, layout, layout.align)
}
pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
@ -171,7 +127,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
let llval = if !const_llval.is_null() {
const_llval
} else {
let load = bcx.load(self.llval, self.alignment.non_abi());
let load = bcx.load(self.llval, self.align);
if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
scalar_load_metadata(load, scalar);
}
@ -185,7 +141,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
if scalar.is_bool() {
llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
}
let load = bcx.load(llptr, self.alignment.non_abi());
let load = bcx.load(llptr, self.align);
scalar_load_metadata(load, scalar);
if scalar.is_bool() {
bcx.trunc(load, Type::i1(bcx.ccx))
@ -195,7 +151,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
};
OperandValue::Pair(load(0, a), load(1, b))
} else {
OperandValue::Ref(self.llval, self.alignment)
OperandValue::Ref(self.llval, self.align)
};
OperandRef { val, layout: self.layout }
@ -206,7 +162,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
let ccx = bcx.ccx;
let field = self.layout.field(ccx, ix);
let offset = self.layout.fields.offset(ix);
let alignment = self.alignment | Alignment::from(self.layout);
let align = self.align.min(self.layout.align).min(field.align);
let simple = || {
// Unions and newtypes only use an offset of 0.
@ -228,29 +184,31 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
ptr::null_mut()
},
layout: field,
alignment,
align,
}
};
// Simple case - we can just GEP the field
// * Packed struct - There is no alignment padding
// * Field is sized - pointer is properly aligned already
if self.layout.is_packed() || !field.is_unsized() {
return simple();
}
// If the type of the last field is [T], str or a foreign type, then we don't need to do
// any adjusments
// Simple cases, which don't need DST adjustment:
// * no metadata available - just log the case
// * known alignment - sized types, [T], str or a foreign type
// * packed struct - there is no alignment padding
match field.ty.sty {
_ if !self.has_extra() => {
debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, Value(self.llval));
return simple();
}
_ if !field.is_unsized() => return simple(),
ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => return simple(),
_ => ()
}
// There's no metadata available, log the case and just do the GEP.
if !self.has_extra() {
debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, Value(self.llval));
return simple();
ty::TyAdt(def, _) => {
if def.repr.packed() {
// FIXME(eddyb) generalize the adjustment when we
// start supporting packing to larger alignments.
assert_eq!(self.layout.align.abi(), 1);
return simple();
}
}
_ => {}
}
// We need to get the pointer manually now.
@ -273,7 +231,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
let unaligned_offset = C_usize(ccx, offset.bytes());
// Get the alignment of the field
let (_, align) = glue::size_and_align_of_dst(bcx, field.ty, meta);
let (_, unsized_align) = glue::size_and_align_of_dst(bcx, field.ty, meta);
// Bump the unaligned offset up to the appropriate alignment using the
// following expression:
@ -281,9 +239,9 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
// (unaligned offset + (align - 1)) & -align
// Calculate offset
let align_sub_1 = bcx.sub(align, C_usize(ccx, 1u64));
let align_sub_1 = bcx.sub(unsized_align, C_usize(ccx, 1u64));
let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
bcx.neg(align));
bcx.neg(unsized_align));
debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
@ -299,7 +257,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()),
llextra: self.llextra,
layout: field,
alignment,
align,
}
}
@ -372,7 +330,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
.discriminant_for_variant(bcx.tcx(), variant_index)
.to_u128_unchecked() as u64;
bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64),
ptr.llval, ptr.alignment.non_abi());
ptr.llval, ptr.align);
}
layout::Variants::NicheFilling {
dataful_variant,
@ -416,7 +374,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
llval: bcx.inbounds_gep(self.llval, &[C_usize(bcx.ccx, 0), llindex]),
llextra: ptr::null_mut(),
layout: self.layout.field(bcx.ccx, 0),
alignment: self.alignment
align: self.align
}
}
@ -465,9 +423,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let result = match *place {
mir::Place::Local(_) => bug!(), // handled above
mir::Place::Static(box mir::Static { def_id, ty }) => {
PlaceRef::new_sized(consts::get_static(ccx, def_id),
ccx.layout_of(self.monomorphize(&ty)),
Alignment::AbiAligned)
let layout = ccx.layout_of(self.monomorphize(&ty));
PlaceRef::new_sized(consts::get_static(ccx, def_id), layout, layout.align)
},
mir::Place::Projection(box mir::Projection {
ref base,

View File

@ -104,9 +104,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let start = dest.project_index(&bcx, C_usize(bcx.ccx, 0)).llval;
if let OperandValue::Immediate(v) = tr_elem.val {
let align = dest.alignment.non_abi()
.unwrap_or(tr_elem.layout.align);
let align = C_i32(bcx.ccx, align.abi() as i32);
let align = C_i32(bcx.ccx, dest.align.abi() as i32);
let size = C_usize(bcx.ccx, dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
@ -139,7 +137,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
tr_elem.val.store(&body_bcx,
PlaceRef::new_sized(current, tr_elem.layout, dest.alignment));
PlaceRef::new_sized(current, tr_elem.layout, dest.align));
let next = body_bcx.inbounds_gep(current, &[C_usize(bcx.ccx, 1)]);
body_bcx.br(header_bcx.llbb());

View File

@ -79,13 +79,14 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
match layout.fields {
layout::FieldPlacement::Union(_) => {
let fill = Type::padding_filler(ccx, layout.size, layout.align);
let packed = false;
match name {
None => {
Type::struct_(ccx, &[fill], layout.is_packed())
Type::struct_(ccx, &[fill], packed)
}
Some(ref name) => {
let mut llty = Type::named_struct(ccx, name);
llty.set_struct_body(&[fill], layout.is_packed());
llty.set_struct_body(&[fill], packed);
llty
}
}
@ -96,7 +97,8 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout::FieldPlacement::Arbitrary { .. } => {
match name {
None => {
Type::struct_(ccx, &struct_llfields(ccx, layout), layout.is_packed())
let (llfields, packed) = struct_llfields(ccx, layout);
Type::struct_(ccx, &llfields, packed)
}
Some(ref name) => {
let llty = Type::named_struct(ccx, name);
@ -109,15 +111,19 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: TyLayout<'tcx>) -> Vec<Type> {
layout: TyLayout<'tcx>)
-> (Vec<Type>, bool) {
debug!("struct_llfields: {:#?}", layout);
let field_count = layout.fields.count();
let mut packed = false;
let mut offset = Size::from_bytes(0);
let mut prev_align = layout.align;
let mut result: Vec<Type> = Vec::with_capacity(1 + field_count * 2);
for i in layout.fields.index_by_increasing_offset() {
let field = layout.field(ccx, i);
packed |= layout.align.abi() < field.align.abi();
let target_offset = layout.fields.offset(i as usize);
debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}",
i, field, offset, target_offset);
@ -129,15 +135,6 @@ fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
debug!(" padding before: {:?}", padding);
result.push(field.llvm_type(ccx));
if layout.is_packed() {
assert_eq!(padding.bytes(), 0);
} else {
assert!(field.align.abi() <= layout.align.abi(),
"non-packed type has field with larger align ({}): {:#?}",
field.align.abi(), layout);
}
offset = target_offset + field.size;
prev_align = field.align;
}
@ -158,7 +155,7 @@ fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
offset, layout.size);
}
result
(result, packed)
}
impl<'a, 'tcx> CrateContext<'a, 'tcx> {
@ -301,7 +298,8 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
ccx.lltypes().borrow_mut().insert((self.ty, variant_index), llty);
if let Some((mut llty, layout)) = defer {
llty.set_struct_body(&struct_llfields(ccx, layout), layout.is_packed())
let (llfields, packed) = struct_llfields(ccx, layout);
llty.set_struct_body(&llfields, packed)
}
llty

View File

@ -23,7 +23,7 @@
# Each tool has a list of people to ping
# ping @oli-obk @RalfJung @eddyb
miri = "Testing"
miri = "Broken"
# ping @Manishearth @llogiq @mcarton @oli-obk
clippy = "Testing"