replace most uses of usize with u64 so the host architecture isn't exposed anymore

This commit is contained in:
Oliver Schneider 2016-11-18 12:55:14 +01:00
parent e361b63fa0
commit 0039ebc940
No known key found for this signature in database
GPG Key ID: 56D6EEA0FC67AC46
10 changed files with 207 additions and 174 deletions

View File

@ -53,7 +53,7 @@ impl<'a> CompilerCalls<'a> for MiriCompilerCalls {
NestedMetaItemKind::MetaItem(ref inner) => match inner.node {
MetaItemKind::NameValue(ref name, ref value) => {
match &**name {
"memory_size" => memory_size = extract_int(value) as usize,
"memory_size" => memory_size = extract_int(value),
"step_limit" => step_limit = extract_int(value),
"stack_limit" => stack_limit = extract_int(value) as usize,
_ => state.session.span_err(item.span, "unknown miri attribute"),

View File

@ -18,8 +18,8 @@ pub enum EvalError<'tcx> {
InvalidDiscriminant,
PointerOutOfBounds {
ptr: Pointer,
size: usize,
allocation_size: usize,
size: u64,
allocation_size: u64,
},
ReadPointerAsBytes,
InvalidPointerMath,
@ -32,15 +32,15 @@ pub enum EvalError<'tcx> {
Math(Span, ConstMathErr),
InvalidChar(u64),
OutOfMemory {
allocation_size: usize,
memory_size: usize,
memory_usage: usize,
allocation_size: u64,
memory_size: u64,
memory_usage: u64,
},
ExecutionTimeLimitReached,
StackFrameLimitReached,
AlignmentCheckFailed {
required: usize,
has: usize,
required: u64,
has: u64,
},
CalledClosureAsFunction,
VtableForArgumentlessMethod,

View File

@ -67,7 +67,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
TyChar if v as u8 as u64 == v => Ok(PrimVal::new(v, Char)),
TyChar => Err(EvalError::InvalidChar(v)),
TyRawPtr(_) => Ok(PrimVal::from_ptr(Pointer::from_int(v as usize))),
TyRawPtr(_) => Ok(PrimVal::from_ptr(Pointer::from_int(v))),
_ => Err(EvalError::Unimplemented(format!("int to {:?} cast", ty))),
}

View File

@ -167,7 +167,7 @@ pub enum StackPopCleanup {
}
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, memory_size: usize, stack_limit: usize, step_limit: u64) -> Self {
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, memory_size: u64, stack_limit: usize, step_limit: u64) -> Self {
EvalContext {
tcx: tcx,
memory: Memory::new(&tcx.data_layout, memory_size),
@ -211,7 +211,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
// FIXME: cache these allocs
let ptr = self.memory.allocate(s.len(), 1)?;
let ptr = self.memory.allocate(s.len() as u64, 1)?;
self.memory.write_bytes(ptr, s.as_bytes())?;
self.memory.freeze(ptr.alloc_id)?;
Ok(Value::ByValPair(PrimVal::from_ptr(ptr), self.usize_primval(s.len() as u64)))
@ -255,7 +255,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Str(ref s) => return self.str_to_value(s),
ByteStr(ref bs) => {
let ptr = self.memory.allocate(bs.len(), 1)?;
let ptr = self.memory.allocate(bs.len() as u64, 1)?;
self.memory.write_bytes(ptr, bs)?;
self.memory.freeze(ptr.alloc_id)?;
PrimVal::from_ptr(ptr)
@ -292,25 +292,25 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
self.tcx.normalize_associated_type(&substituted)
}
fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<usize>> {
fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<u64>> {
self.type_size_with_substs(ty, self.substs())
}
fn type_align(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, usize> {
fn type_align(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
self.type_align_with_substs(ty, self.substs())
}
fn type_size_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, Option<usize>> {
fn type_size_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, Option<u64>> {
let layout = self.type_layout_with_substs(ty, substs)?;
if layout.is_unsized() {
Ok(None)
} else {
Ok(Some(layout.size(&self.tcx.data_layout).bytes() as usize))
Ok(Some(layout.size(&self.tcx.data_layout).bytes()))
}
}
fn type_align_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, usize> {
self.type_layout_with_substs(ty, substs).map(|layout| layout.align(&self.tcx.data_layout).abi() as usize)
fn type_align_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, u64> {
self.type_layout_with_substs(ty, substs).map(|layout| layout.align(&self.tcx.data_layout).abi())
}
fn type_layout(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, &'tcx Layout> {
@ -464,7 +464,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
for (offset, operand) in offsets.into_iter().zip(operands) {
let value = self.eval_operand(operand)?;
let value_ty = self.operand_ty(operand);
let field_dest = dest.offset(offset as isize);
let field_dest = dest.offset(offset);
self.write_value_to_ptr(value, field_dest, value_ty)?;
}
Ok(())
@ -525,8 +525,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
General { discr, ref variants, .. } => {
if let mir::AggregateKind::Adt(adt_def, variant, _, _) = *kind {
let discr_val = adt_def.variants[variant].disr_val.to_u64_unchecked();
let discr_size = discr.size().bytes() as usize;
let discr_offset = variants[variant].offsets[0].bytes() as isize;
let discr_size = discr.size().bytes();
let discr_offset = variants[variant].offsets[0].bytes();
// FIXME(solson)
let dest = self.force_allocation(dest)?;
@ -581,7 +581,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
// FIXME(solson)
let dest = self.force_allocation(dest)?.to_ptr();
let dest = dest.offset(offset.bytes() as isize);
let dest = dest.offset(offset.bytes());
let dest_size = self.type_size(ty)?.expect("bad StructWrappedNullablePointer discrfield");
try!(self.memory.write_int(dest, 0, dest_size));
}
@ -594,7 +594,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
assert_eq!(operands.len(), 0);
if let mir::AggregateKind::Adt(adt_def, variant, _, _) = *kind {
let n = adt_def.variants[variant].disr_val.to_u64_unchecked();
let size = discr.size().bytes() as usize;
let size = discr.size().bytes();
let val = if signed {
PrimVal::from_int_with_size(n as i64, size)
@ -621,10 +621,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Repeat(ref operand, _) => {
let (elem_ty, length) = match dest_ty.sty {
ty::TyArray(elem_ty, n) => (elem_ty, n),
ty::TyArray(elem_ty, n) => (elem_ty, n as u64),
_ => bug!("tried to assign array-repeat to non-array type {:?}", dest_ty),
};
self.inc_step_counter_and_check_limit(length as u64)?;
self.inc_step_counter_and_check_limit(length)?;
let elem_size = self.type_size(elem_ty)?.expect("repeat element type must be sized");
let value = self.eval_operand(operand)?;
@ -632,7 +632,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let dest = self.force_allocation(dest)?.to_ptr();
for i in 0..length {
let elem_dest = dest.offset((i * elem_size) as isize);
let elem_dest = dest.offset(i * elem_size);
self.write_value_to_ptr(value, elem_dest, elem_ty)?;
}
}
@ -741,15 +741,15 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
fn nonnull_offset_and_ty(&self, ty: Ty<'tcx>, nndiscr: u64, discrfield: &[u32]) -> EvalResult<'tcx, (Size, Ty<'tcx>)> {
// Skip the constant 0 at the start meant for LLVM GEP.
let mut path = discrfield.iter().skip(1).map(|&i| i as usize);
// Skip the constant 0 at the start meant for LLVM GEP and the outer non-null variant
let path = discrfield.iter().skip(2).map(|&i| i as usize);
// Handle the field index for the outer non-null variant.
let inner_ty = match ty.sty {
ty::TyAdt(adt_def, substs) => {
let variant = &adt_def.variants[nndiscr as usize];
let index = path.next().unwrap();
let field = &variant.fields[index];
let index = discrfield[1];
let field = &variant.fields[index as usize];
field.ty(self.tcx, substs)
}
_ => bug!("non-enum for StructWrappedNullablePointer: {}", ty),
@ -804,8 +804,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Ok(variant.offsets[field_index])
}
FatPointer { .. } => {
let bytes = field_index * self.memory.pointer_size();
Ok(Size::from_bytes(bytes as u64))
let bytes = field_index as u64 * self.memory.pointer_size();
Ok(Size::from_bytes(bytes))
}
_ => {
let msg = format!("can't handle type: {:?}, with layout: {:?}", ty, layout);
@ -980,7 +980,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
_ => bug!("field access on non-product type: {:?}", base_layout),
};
let ptr = base_ptr.offset(offset.bytes() as isize);
let ptr = base_ptr.offset(offset.bytes());
let extra = if self.type_is_sized(field_ty) {
LvalueExtra::None
} else {
@ -1048,7 +1048,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let n = self.value_to_primval(n_ptr, usize)?
.expect_uint("Projection::Index expected usize");
assert!(n < len);
let ptr = base_ptr.offset(n as isize * elem_size as isize);
let ptr = base_ptr.offset(n * elem_size);
(ptr, LvalueExtra::None)
}
@ -1062,12 +1062,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
assert!(n >= min_length as u64);
let index = if from_end {
n as isize - offset as isize
n - u64::from(offset)
} else {
offset as isize
u64::from(offset)
};
let ptr = base_ptr.offset(index * elem_size as isize);
let ptr = base_ptr.offset(index * elem_size);
(ptr, LvalueExtra::None)
}
@ -1078,9 +1078,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
assert!((from as u64) <= n - (to as u64));
let ptr = base_ptr.offset(from as isize * elem_size as isize);
let extra = LvalueExtra::Length(n - to as u64 - from as u64);
assert!(u64::from(from) <= n - u64::from(to));
let ptr = base_ptr.offset(u64::from(from) * elem_size);
let extra = LvalueExtra::Length(n - u64::from(to) - u64::from(from));
(ptr, extra)
}
};
@ -1318,8 +1318,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
ty: Ty<'tcx>
) -> EvalResult<'tcx, ()> {
assert_eq!(self.get_field_count(ty)?, 2);
let field_0 = self.get_field_offset(ty, 0)?.bytes() as isize;
let field_1 = self.get_field_offset(ty, 1)?.bytes() as isize;
let field_0 = self.get_field_offset(ty, 0)?.bytes();
let field_1 = self.get_field_offset(ty, 1)?.bytes();
self.memory.write_primval(ptr.offset(field_0), a)?;
self.memory.write_primval(ptr.offset(field_1), b)?;
Ok(())
@ -1368,7 +1368,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
ty::TyAdt(..) => {
use rustc::ty::layout::Layout::*;
if let CEnum { discr, signed, .. } = *self.type_layout(ty)? {
let size = discr.size().bytes() as usize;
let size = discr.size().bytes();
if signed {
PrimValKind::from_int_size(size)
} else {
@ -1450,7 +1450,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
PrimVal::from_ptr(p)
} else {
trace!("reading fat pointer extra of type {}", ty);
let extra = ptr.offset(self.memory.pointer_size() as isize);
let extra = ptr.offset(self.memory.pointer_size());
let extra = match self.tcx.struct_tail(ty).sty {
ty::TyTrait(..) => PrimVal::from_ptr(self.memory.read_ptr(extra)?),
ty::TySlice(..) |
@ -1464,7 +1464,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
ty::TyAdt(..) => {
use rustc::ty::layout::Layout::*;
if let CEnum { discr, signed, .. } = *self.type_layout(ty)? {
let size = discr.size().bytes() as usize;
let size = discr.size().bytes();
if signed {
let n = self.memory.read_int(ptr, size)?;
PrimVal::from_int_with_size(n, size)
@ -1566,8 +1566,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
if self.type_size(dst_fty)? == Some(0) {
continue;
}
let src_field_offset = self.get_field_offset(src_ty, i)?.bytes() as isize;
let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes() as isize;
let src_field_offset = self.get_field_offset(src_ty, i)?.bytes();
let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes();
let src_f_ptr = src_ptr.offset(src_field_offset);
let dst_f_ptr = dest.offset(dst_field_offset);
if src_fty == dst_fty {
@ -1699,7 +1699,7 @@ impl<'tcx> Lvalue<'tcx> {
pub fn eval_main<'a, 'tcx: 'a>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
memory_size: usize,
memory_size: u64,
step_limit: u64,
stack_limit: usize,
) {

View File

@ -46,7 +46,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let ptr = arg_vals[0].read_ptr(&self.memory)?;
let offset = self.value_to_primval(arg_vals[1], isize)?
.expect_int("arith_offset second arg not isize");
let new_ptr = ptr.offset(offset as isize);
let new_ptr = ptr.signed_offset(offset);
self.write_primval(dest, PrimVal::from_ptr(new_ptr))?;
}
@ -150,7 +150,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let dest = arg_vals[1].read_ptr(&self.memory)?;
let count = self.value_to_primval(arg_vals[2], usize)?
.expect_uint("arith_offset second arg not isize");
self.memory.copy(src, dest, count as usize * elem_size, elem_align)?;
self.memory.copy(src, dest, count * elem_size, elem_align)?;
}
"ctpop" |
@ -220,7 +220,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"forget" => {}
"init" => {
let size = dest_layout.size(&self.tcx.data_layout).bytes() as usize;
let size = dest_layout.size(&self.tcx.data_layout).bytes();
let init = |this: &mut Self, val: Option<Value>| {
match val {
Some(Value::ByRef(ptr)) => {
@ -280,12 +280,13 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"offset" => {
let pointee_ty = substs.type_at(0);
let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as isize;
// FIXME: assuming here that type size is < i64::max_value()
let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as i64;
let offset = self.value_to_primval(arg_vals[1], isize)?
.expect_int("offset second arg not isize");
let ptr = arg_vals[0].read_ptr(&self.memory)?;
let result_ptr = ptr.offset(offset as isize * pointee_size);
let result_ptr = ptr.signed_offset(offset * pointee_size);
self.write_primval(dest, PrimVal::from_ptr(result_ptr))?;
}
@ -378,7 +379,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
"uninit" => {
let size = dest_layout.size(&self.tcx.data_layout).bytes() as usize;
let size = dest_layout.size(&self.tcx.data_layout).bytes();
let uninit = |this: &mut Self, val: Option<Value>| {
match val {
Some(Value::ByRef(ptr)) => {
@ -482,8 +483,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
ty::TyTrait(..) => {
let (_, vtable) = value.expect_ptr_vtable_pair(&self.memory)?;
// the second entry in the vtable is the dynamic size of the object.
let size = self.memory.read_usize(vtable.offset(pointer_size as isize))?;
let align = self.memory.read_usize(vtable.offset(pointer_size as isize * 2))?;
let size = self.memory.read_usize(vtable.offset(pointer_size))?;
let align = self.memory.read_usize(vtable.offset(pointer_size * 2))?;
Ok((size, align))
}

View File

@ -254,23 +254,23 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let discr_val = match *adt_layout {
General { discr, .. } | CEnum { discr, signed: false, .. } => {
let discr_size = discr.size().bytes();
self.memory.read_uint(adt_ptr, discr_size as usize)?
self.memory.read_uint(adt_ptr, discr_size)?
}
CEnum { discr, signed: true, .. } => {
let discr_size = discr.size().bytes();
self.memory.read_int(adt_ptr, discr_size as usize)? as u64
self.memory.read_int(adt_ptr, discr_size)? as u64
}
RawNullablePointer { nndiscr, value } => {
let discr_size = value.size(&self.tcx.data_layout).bytes() as usize;
let discr_size = value.size(&self.tcx.data_layout).bytes();
trace!("rawnullablepointer with size {}", discr_size);
self.read_nonnull_discriminant_value(adt_ptr, nndiscr, discr_size)?
}
StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
let (offset, ty) = self.nonnull_offset_and_ty(adt_ty, nndiscr, discrfield)?;
let nonnull = adt_ptr.offset(offset.bytes() as isize);
let nonnull = adt_ptr.offset(offset.bytes());
trace!("struct wrapped nullable pointer type: {}", ty);
// only the pointer part of a fat pointer is used for this space optimization
let discr_size = self.type_size(ty)?.expect("bad StructWrappedNullablePointer discrfield");
@ -285,7 +285,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Ok(discr_val)
}
fn read_nonnull_discriminant_value(&self, ptr: Pointer, nndiscr: u64, discr_size: usize) -> EvalResult<'tcx, u64> {
fn read_nonnull_discriminant_value(&self, ptr: Pointer, nndiscr: u64, discr_size: u64) -> EvalResult<'tcx, u64> {
let not_null = match self.memory.read_uint(ptr, discr_size) {
Ok(0) => false,
Ok(_) | Err(EvalError::ReadPointerAsBytes) => true,
@ -300,7 +300,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
def_id: DefId,
args: &[mir::Operand<'tcx>],
dest: Lvalue<'tcx>,
dest_size: usize,
dest_size: u64,
) -> EvalResult<'tcx, ()> {
let name = self.tcx.item_name(def_id);
let attrs = self.tcx.get_attrs(def_id);
@ -327,7 +327,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
.expect_uint("__rust_allocate first arg not usize");
let align = self.value_to_primval(args[1], usize)?
.expect_uint("__rust_allocate second arg not usize");
let ptr = self.memory.allocate(size as usize, align as usize)?;
let ptr = self.memory.allocate(size, align)?;
self.write_primval(dest, PrimVal::from_ptr(ptr))?;
}
@ -345,14 +345,14 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let ptr = args[0].read_ptr(&self.memory)?;
let size = self.value_to_primval(args[2], usize)?.expect_uint("__rust_reallocate third arg not usize");
let align = self.value_to_primval(args[3], usize)?.expect_uint("__rust_reallocate fourth arg not usize");
let new_ptr = self.memory.reallocate(ptr, size as usize, align as usize)?;
let new_ptr = self.memory.reallocate(ptr, size, align)?;
self.write_primval(dest, PrimVal::from_ptr(new_ptr))?;
}
"memcmp" => {
let left = args[0].read_ptr(&self.memory)?;
let right = args[1].read_ptr(&self.memory)?;
let n = self.value_to_primval(args[2], usize)?.expect_uint("__rust_reallocate first arg not usize") as usize;
let n = self.value_to_primval(args[2], usize)?.expect_uint("__rust_reallocate first arg not usize");
let result = {
let left_bytes = self.memory.read_bytes(left, n)?;
@ -414,7 +414,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
_ => bug!("rust-call ABI tuple argument wasn't Value::ByRef"),
};
for (offset, ty) in offsets.zip(fields) {
let arg = Value::ByRef(last_ptr.offset(offset as isize));
let arg = Value::ByRef(last_ptr.offset(offset));
args.push((arg, ty));
}
}
@ -496,13 +496,13 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
traits::VtableObject(ref data) => {
let idx = self.tcx.get_vtable_index_of_object_method(data, def_id);
let idx = self.tcx.get_vtable_index_of_object_method(data, def_id) as u64;
if let Some(&mut(ref mut first_arg, ref mut first_ty)) = args.get_mut(0) {
let (self_ptr, vtable) = first_arg.expect_ptr_vtable_pair(&self.memory)?;
*first_arg = Value::ByVal(PrimVal::from_ptr(self_ptr));
let idx = idx + 3;
let offset = idx * self.memory.pointer_size();
let fn_ptr = self.memory.read_ptr(vtable.offset(offset as isize))?;
let fn_ptr = self.memory.read_ptr(vtable.offset(offset))?;
let (def_id, substs, _abi, sig) = self.memory.get_fn(fn_ptr.alloc_id)?;
*first_ty = sig.inputs[0];
Ok((def_id, substs))
@ -600,6 +600,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
let discr = self.read_discriminant_value(adt_ptr, ty)?;
if discr == nndiscr {
assert_eq!(discr as usize as u64, discr);
adt_def.variants[discr as usize].fields.iter().zip(&nonnull.offsets)
} else {
// FIXME: the zst variant might contain zst types that impl Drop
@ -609,6 +610,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Layout::RawNullablePointer { nndiscr, .. } => {
let discr = self.read_discriminant_value(adt_ptr, ty)?;
if discr == nndiscr {
assert_eq!(discr as usize as u64, discr);
assert_eq!(adt_def.variants[discr as usize].fields.len(), 1);
let field_ty = &adt_def.variants[discr as usize].fields[0];
let field_ty = monomorphize_field_ty(self.tcx, field_ty, substs);
@ -656,10 +658,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
},
ty::TySlice(elem_ty) => {
let (ptr, len) = match lval {
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => (ptr, len as isize),
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => (ptr, len),
_ => bug!("expected an lvalue with a length"),
};
let size = self.type_size(elem_ty)?.expect("slice element must be sized") as isize;
let size = self.type_size(elem_ty)?.expect("slice element must be sized");
// FIXME: this creates a lot of stack frames if the element type has
// a drop impl
for i in 0..len {
@ -672,11 +674,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Lvalue::Ptr { ptr, extra } => (ptr, extra),
_ => bug!("expected an lvalue with optional extra data"),
};
let size = self.type_size(elem_ty)?.expect("array element cannot be unsized") as isize;
let size = self.type_size(elem_ty)?.expect("array element cannot be unsized");
// FIXME: this creates a lot of stack frames if the element type has
// a drop impl
for i in 0..len {
self.drop(Lvalue::Ptr { ptr: ptr.offset(i as isize * size), extra: extra }, elem_ty, drop)?;
for i in 0..(len as u64) {
self.drop(Lvalue::Ptr { ptr: ptr.offset(i * size), extra: extra }, elem_ty, drop)?;
}
},
// FIXME: what about TyClosure and TyAnon?
@ -699,7 +701,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let (adt_ptr, extra) = self.force_allocation(lval)?.to_ptr_and_extra();
// manual iteration, because we need to be careful about the last field if it is unsized
while let Some((field_ty, offset)) = fields.next() {
let ptr = adt_ptr.offset(offset.bytes() as isize);
let ptr = adt_ptr.offset(offset.bytes());
if self.type_is_sized(field_ty) {
self.drop(Lvalue::from_ptr(ptr), field_ty, drop)?;
} else {

View File

@ -34,7 +34,7 @@ impl<'a, 'tcx: 'a> Value {
match *self {
ByRef(ref_ptr) => {
let ptr = mem.read_ptr(ref_ptr)?;
let vtable = mem.read_ptr(ref_ptr.offset(mem.pointer_size() as isize))?;
let vtable = mem.read_ptr(ref_ptr.offset(mem.pointer_size()))?;
Ok((ptr, vtable))
}
@ -49,7 +49,7 @@ impl<'a, 'tcx: 'a> Value {
match *self {
ByRef(ref_ptr) => {
let ptr = mem.read_ptr(ref_ptr)?;
let len = mem.read_usize(ref_ptr.offset(mem.pointer_size() as isize))?;
let len = mem.read_usize(ref_ptr.offset(mem.pointer_size()))?;
Ok((ptr, len))
},
ByValPair(ptr, val) => {

View File

@ -84,7 +84,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let align = self.type_align(trait_ref.self_ty())?;
let ptr_size = self.memory.pointer_size();
let vtable = self.memory.allocate(ptr_size * (3 + methods.len()), ptr_size)?;
let vtable = self.memory.allocate(ptr_size * (3 + methods.len() as u64), ptr_size)?;
// in case there is no drop function to be called, this still needs to be initialized
self.memory.write_usize(vtable, 0)?;
@ -99,12 +99,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
}
self.memory.write_usize(vtable.offset(ptr_size as isize), size as u64)?;
self.memory.write_usize(vtable.offset((ptr_size * 2) as isize), align as u64)?;
self.memory.write_usize(vtable.offset(ptr_size), size)?;
self.memory.write_usize(vtable.offset((ptr_size * 2)), align)?;
for (i, method) in methods.into_iter().enumerate() {
if let Some(method) = method {
self.memory.write_ptr(vtable.offset(ptr_size as isize * (3 + i as isize)), method)?;
self.memory.write_ptr(vtable.offset(ptr_size * (3 + i as u64)), method)?;
}
}

View File

@ -33,11 +33,11 @@ pub struct Allocation {
pub bytes: Vec<u8>,
/// Maps from byte addresses to allocations.
/// Only the first byte of a pointer is inserted into the map.
pub relocations: BTreeMap<usize, AllocId>,
pub relocations: BTreeMap<u64, AllocId>,
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
pub undef_mask: UndefMask,
/// The alignment of the allocation to detect unaligned reads.
pub align: usize,
pub align: u64,
/// Whether the allocation may be modified.
/// Use the `freeze` method of `Memory` to ensure that an error occurs, if the memory of this
/// allocation is modified in the future.
@ -47,24 +47,35 @@ pub struct Allocation {
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Pointer {
pub alloc_id: AllocId,
pub offset: usize,
pub offset: u64,
}
impl Pointer {
pub fn new(alloc_id: AllocId, offset: usize) -> Self {
pub fn new(alloc_id: AllocId, offset: u64) -> Self {
Pointer { alloc_id: alloc_id, offset: offset }
}
pub fn offset(self, i: isize) -> Self {
let new_offset = (self.offset as isize + i) as usize;
Pointer::new(self.alloc_id, new_offset)
pub fn signed_offset(self, i: i64) -> Self {
// FIXME: is it possible to over/underflow here?
if i < 0 {
// trickery to ensure that i64::min_value() works fine
// this formula only works for true negative values, it panics for zero!
let n = u64::max_value() - (i as u64) + 1;
Pointer::new(self.alloc_id, self.offset - n)
} else {
self.offset(i as u64)
}
}
pub fn offset(self, i: u64) -> Self {
Pointer::new(self.alloc_id, self.offset + i)
}
pub fn points_to_zst(&self) -> bool {
self.alloc_id == ZST_ALLOC_ID
}
pub fn to_int<'tcx>(&self) -> EvalResult<'tcx, usize> {
pub fn to_int<'tcx>(&self) -> EvalResult<'tcx, u64> {
match self.alloc_id {
NEVER_ALLOC_ID |
ZST_ALLOC_ID => Ok(self.offset),
@ -72,9 +83,7 @@ impl Pointer {
}
}
// FIXME(solson): Integer pointers should use u64, not usize. Target pointers can be larger
// than host usize.
pub fn from_int(i: usize) -> Self {
pub fn from_int(i: u64) -> Self {
Pointer::new(NEVER_ALLOC_ID, i)
}
@ -103,9 +112,9 @@ pub struct Memory<'a, 'tcx> {
/// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations)
alloc_map: HashMap<AllocId, Allocation>,
/// Number of virtual bytes allocated
memory_usage: usize,
memory_usage: u64,
/// Maximum number of virtual bytes that may be allocated
memory_size: usize,
memory_size: u64,
/// Function "allocations". They exist solely so pointers have something to point to, and
/// we can figure out what they point to.
functions: HashMap<AllocId, FunctionDefinition<'tcx>>,
@ -119,7 +128,7 @@ const ZST_ALLOC_ID: AllocId = AllocId(0);
const NEVER_ALLOC_ID: AllocId = AllocId(1);
impl<'a, 'tcx> Memory<'a, 'tcx> {
pub fn new(layout: &'a TargetDataLayout, max_memory: usize) -> Self {
pub fn new(layout: &'a TargetDataLayout, max_memory: u64) -> Self {
Memory {
alloc_map: HashMap::new(),
functions: HashMap::new(),
@ -175,7 +184,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
Pointer::new(id, 0)
}
pub fn allocate(&mut self, size: usize, align: usize) -> EvalResult<'tcx, Pointer> {
pub fn allocate(&mut self, size: u64, align: u64) -> EvalResult<'tcx, Pointer> {
if size == 0 {
return Ok(Pointer::zst_ptr());
}
@ -189,8 +198,9 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
});
}
self.memory_usage += size;
assert_eq!(size as usize as u64, size);
let alloc = Allocation {
bytes: vec![0; size],
bytes: vec![0; size as usize],
relocations: BTreeMap::new(),
undef_mask: UndefMask::new(size),
align: align,
@ -204,7 +214,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
// TODO(solson): Track which allocations were returned from __rust_allocate and report an error
// when reallocating/deallocating any others.
pub fn reallocate(&mut self, ptr: Pointer, new_size: usize, align: usize) -> EvalResult<'tcx, Pointer> {
pub fn reallocate(&mut self, ptr: Pointer, new_size: u64, align: u64) -> EvalResult<'tcx, Pointer> {
// TODO(solson): Report error about non-__rust_allocate'd pointer.
if ptr.offset != 0 {
return Err(EvalError::Unimplemented(format!("bad pointer offset: {}", ptr.offset)));
@ -216,19 +226,21 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
return Err(EvalError::ReallocatedFrozenMemory);
}
let size = self.get(ptr.alloc_id)?.bytes.len();
let size = self.get(ptr.alloc_id)?.bytes.len() as u64;
if new_size > size {
let amount = new_size - size;
self.memory_usage += amount;
let alloc = self.get_mut(ptr.alloc_id)?;
alloc.bytes.extend(iter::repeat(0).take(amount));
assert_eq!(amount as usize as u64, amount);
alloc.bytes.extend(iter::repeat(0).take(amount as usize));
alloc.undef_mask.grow(amount, false);
} else if size > new_size {
self.memory_usage -= size - new_size;
self.clear_relocations(ptr.offset(new_size as isize), size - new_size)?;
self.clear_relocations(ptr.offset(new_size), size - new_size)?;
let alloc = self.get_mut(ptr.alloc_id)?;
alloc.bytes.truncate(new_size);
// `as usize` is fine here, since it is smaller than `size`, which came from a usize
alloc.bytes.truncate(new_size as usize);
alloc.bytes.shrink_to_fit();
alloc.undef_mask.truncate(new_size);
}
@ -250,7 +262,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
}
if let Some(alloc) = self.alloc_map.remove(&ptr.alloc_id) {
self.memory_usage -= alloc.bytes.len();
self.memory_usage -= alloc.bytes.len() as u64;
} else {
debug!("deallocated a pointer twice: {}", ptr.alloc_id);
// TODO(solson): Report error about erroneous free. This is blocked on properly tracking
@ -262,15 +274,15 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
Ok(())
}
pub fn pointer_size(&self) -> usize {
self.layout.pointer_size.bytes() as usize
pub fn pointer_size(&self) -> u64 {
self.layout.pointer_size.bytes()
}
pub fn endianess(&self) -> layout::Endian {
self.layout.endian
}
pub fn check_align(&self, ptr: Pointer, align: usize) -> EvalResult<'tcx, ()> {
pub fn check_align(&self, ptr: Pointer, align: u64) -> EvalResult<'tcx, ()> {
let alloc = self.get(ptr.alloc_id)?;
if alloc.align < align {
return Err(EvalError::AlignmentCheckFailed {
@ -358,7 +370,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
(Some(_), Some(_)) => bug!("miri invariant broken: an allocation id exists that points to both a function and a memory location"),
};
for i in 0..alloc.bytes.len() {
for i in 0..(alloc.bytes.len() as u64) {
if let Some(&target_id) = alloc.relocations.get(&i) {
if !allocs_seen.contains(&target_id) {
allocs_to_print.push_back(target_id);
@ -366,7 +378,8 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
relocations.push((i, target_id));
}
if alloc.undef_mask.is_range_defined(i, i + 1) {
write!(msg, "{:02x} ", alloc.bytes[i]).unwrap();
// this `as usize` is fine, since `i` came from a `usize`
write!(msg, "{:02x} ", alloc.bytes[i as usize]).unwrap();
} else {
msg.push_str("__ ");
}
@ -381,13 +394,15 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
let mut pos = 0;
let relocation_width = (self.pointer_size() - 1) * 3;
for (i, target_id) in relocations {
write!(msg, "{:1$}", "", (i - pos) * 3).unwrap();
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
write!(msg, "{:1$}", "", ((i - pos) * 3) as usize).unwrap();
let target = match target_id {
ZST_ALLOC_ID => String::from("zst"),
NEVER_ALLOC_ID => String::from("int ptr"),
_ => format!("({})", target_id),
};
write!(msg, "└{0:─^1$}┘ ", target, relocation_width).unwrap();
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
pos = i + self.pointer_size();
}
trace!("{}", msg);
@ -398,37 +413,43 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
/// Byte accessors
impl<'a, 'tcx> Memory<'a, 'tcx> {
fn get_bytes_unchecked(&self, ptr: Pointer, size: usize) -> EvalResult<'tcx, &[u8]> {
fn get_bytes_unchecked(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
if size == 0 {
return Ok(&[]);
}
let alloc = self.get(ptr.alloc_id)?;
if ptr.offset + size > alloc.bytes.len() {
if ptr.offset + size > alloc.bytes.len() as u64 {
return Err(EvalError::PointerOutOfBounds {
ptr: ptr,
size: size,
allocation_size: alloc.bytes.len(),
allocation_size: alloc.bytes.len() as u64,
});
}
Ok(&alloc.bytes[ptr.offset..ptr.offset + size])
assert_eq!(ptr.offset as usize as u64, ptr.offset);
assert_eq!(size as usize as u64, size);
let offset = ptr.offset as usize;
Ok(&alloc.bytes[offset..offset + size as usize])
}
fn get_bytes_unchecked_mut(&mut self, ptr: Pointer, size: usize) -> EvalResult<'tcx, &mut [u8]> {
fn get_bytes_unchecked_mut(&mut self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &mut [u8]> {
if size == 0 {
return Ok(&mut []);
}
let alloc = self.get_mut(ptr.alloc_id)?;
if ptr.offset + size > alloc.bytes.len() {
if ptr.offset + size > alloc.bytes.len() as u64 {
return Err(EvalError::PointerOutOfBounds {
ptr: ptr,
size: size,
allocation_size: alloc.bytes.len(),
allocation_size: alloc.bytes.len() as u64,
});
}
Ok(&mut alloc.bytes[ptr.offset..ptr.offset + size])
assert_eq!(ptr.offset as usize as u64, ptr.offset);
assert_eq!(size as usize as u64, size);
let offset = ptr.offset as usize;
Ok(&mut alloc.bytes[offset..offset + size as usize])
}
fn get_bytes(&self, ptr: Pointer, size: usize, align: usize) -> EvalResult<'tcx, &[u8]> {
fn get_bytes(&self, ptr: Pointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
if size == 0 {
return Ok(&[]);
}
@ -440,7 +461,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
self.get_bytes_unchecked(ptr, size)
}
fn get_bytes_mut(&mut self, ptr: Pointer, size: usize, align: usize) -> EvalResult<'tcx, &mut [u8]> {
fn get_bytes_mut(&mut self, ptr: Pointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> {
if size == 0 {
return Ok(&mut []);
}
@ -476,7 +497,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
Ok(())
}
pub fn copy(&mut self, src: Pointer, dest: Pointer, size: usize, align: usize) -> EvalResult<'tcx, ()> {
pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64) -> EvalResult<'tcx, ()> {
if size == 0 {
return Ok(());
}
@ -489,10 +510,11 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
// `dest` could possibly overlap.
unsafe {
assert_eq!(size as usize as u64, size);
if src.alloc_id == dest.alloc_id {
ptr::copy(src_bytes, dest_bytes, size);
ptr::copy(src_bytes, dest_bytes, size as usize);
} else {
ptr::copy_nonoverlapping(src_bytes, dest_bytes, size);
ptr::copy_nonoverlapping(src_bytes, dest_bytes, size as usize);
}
}
@ -502,17 +524,17 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
Ok(())
}
pub fn read_bytes(&self, ptr: Pointer, size: usize) -> EvalResult<'tcx, &[u8]> {
pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
self.get_bytes(ptr, size, 1)
}
pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx, ()> {
let bytes = self.get_bytes_mut(ptr, src.len(), 1)?;
let bytes = self.get_bytes_mut(ptr, src.len() as u64, 1)?;
bytes.clone_from_slice(src);
Ok(())
}
pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: usize) -> EvalResult<'tcx, ()> {
pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx, ()> {
let bytes = self.get_bytes_mut(ptr, count, 1)?;
for b in bytes { *b = val; }
Ok(())
@ -523,7 +545,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
self.check_defined(ptr, size)?;
let endianess = self.endianess();
let bytes = self.get_bytes_unchecked(ptr, size)?;
let offset = read_target_uint(endianess, bytes).unwrap() as usize;
let offset = read_target_uint(endianess, bytes).unwrap();
let alloc = self.get(ptr.alloc_id)?;
match alloc.relocations.get(&ptr.offset) {
Some(&alloc_id) => Ok(Pointer::new(alloc_id, offset)),
@ -539,7 +561,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
pub fn write_primval(&mut self, dest: Pointer, val: PrimVal) -> EvalResult<'tcx, ()> {
if let Some(alloc_id) = val.relocation {
return self.write_ptr(dest, Pointer::new(alloc_id, val.bits as usize));
return self.write_ptr(dest, Pointer::new(alloc_id, val.bits));
}
use primval::PrimValKind::*;
@ -556,7 +578,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
}
pub fn read_bool(&self, ptr: Pointer) -> EvalResult<'tcx, bool> {
let bytes = self.get_bytes(ptr, 1, self.layout.i1_align.abi() as usize)?;
let bytes = self.get_bytes(ptr, 1, self.layout.i1_align.abi())?;
match bytes[0] {
0 => Ok(false),
1 => Ok(true),
@ -565,27 +587,27 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
}
pub fn write_bool(&mut self, ptr: Pointer, b: bool) -> EvalResult<'tcx, ()> {
let align = self.layout.i1_align.abi() as usize;
let align = self.layout.i1_align.abi();
self.get_bytes_mut(ptr, 1, align)
.map(|bytes| bytes[0] = b as u8)
}
fn int_align(&self, size: usize) -> EvalResult<'tcx, usize> {
fn int_align(&self, size: u64) -> EvalResult<'tcx, u64> {
match size {
1 => Ok(self.layout.i8_align.abi() as usize),
2 => Ok(self.layout.i16_align.abi() as usize),
4 => Ok(self.layout.i32_align.abi() as usize),
8 => Ok(self.layout.i64_align.abi() as usize),
1 => Ok(self.layout.i8_align.abi()),
2 => Ok(self.layout.i16_align.abi()),
4 => Ok(self.layout.i32_align.abi()),
8 => Ok(self.layout.i64_align.abi()),
_ => bug!("bad integer size: {}", size),
}
}
pub fn read_int(&self, ptr: Pointer, size: usize) -> EvalResult<'tcx, i64> {
pub fn read_int(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, i64> {
let align = self.int_align(size)?;
self.get_bytes(ptr, size, align).map(|b| read_target_int(self.endianess(), b).unwrap())
}
pub fn write_int(&mut self, ptr: Pointer, n: i64, size: usize) -> EvalResult<'tcx, ()> {
pub fn write_int(&mut self, ptr: Pointer, n: i64, size: u64) -> EvalResult<'tcx, ()> {
let align = self.int_align(size)?;
let endianess = self.endianess();
let b = self.get_bytes_mut(ptr, size, align)?;
@ -593,12 +615,12 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
Ok(())
}
pub fn read_uint(&self, ptr: Pointer, size: usize) -> EvalResult<'tcx, u64> {
pub fn read_uint(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, u64> {
let align = self.int_align(size)?;
self.get_bytes(ptr, size, align).map(|b| read_target_uint(self.endianess(), b).unwrap())
}
pub fn write_uint(&mut self, ptr: Pointer, n: u64, size: usize) -> EvalResult<'tcx, ()> {
pub fn write_uint(&mut self, ptr: Pointer, n: u64, size: u64) -> EvalResult<'tcx, ()> {
let align = self.int_align(size)?;
let endianess = self.endianess();
let b = self.get_bytes_mut(ptr, size, align)?;
@ -626,7 +648,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
pub fn write_f32(&mut self, ptr: Pointer, f: f32) -> EvalResult<'tcx, ()> {
let endianess = self.endianess();
let align = self.layout.f32_align.abi() as usize;
let align = self.layout.f32_align.abi();
let b = self.get_bytes_mut(ptr, 4, align)?;
write_target_f32(endianess, b, f).unwrap();
Ok(())
@ -634,34 +656,34 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
pub fn write_f64(&mut self, ptr: Pointer, f: f64) -> EvalResult<'tcx, ()> {
let endianess = self.endianess();
let align = self.layout.f64_align.abi() as usize;
let align = self.layout.f64_align.abi();
let b = self.get_bytes_mut(ptr, 8, align)?;
write_target_f64(endianess, b, f).unwrap();
Ok(())
}
pub fn read_f32(&self, ptr: Pointer) -> EvalResult<'tcx, f32> {
self.get_bytes(ptr, 4, self.layout.f32_align.abi() as usize)
self.get_bytes(ptr, 4, self.layout.f32_align.abi())
.map(|b| read_target_f32(self.endianess(), b).unwrap())
}
pub fn read_f64(&self, ptr: Pointer) -> EvalResult<'tcx, f64> {
self.get_bytes(ptr, 8, self.layout.f64_align.abi() as usize)
self.get_bytes(ptr, 8, self.layout.f64_align.abi())
.map(|b| read_target_f64(self.endianess(), b).unwrap())
}
}
/// Relocations
impl<'a, 'tcx> Memory<'a, 'tcx> {
fn relocations(&self, ptr: Pointer, size: usize)
-> EvalResult<'tcx, btree_map::Range<usize, AllocId>>
fn relocations(&self, ptr: Pointer, size: u64)
-> EvalResult<'tcx, btree_map::Range<u64, AllocId>>
{
let start = ptr.offset.saturating_sub(self.pointer_size() - 1);
let end = ptr.offset + size;
Ok(self.get(ptr.alloc_id)?.relocations.range(Included(&start), Excluded(&end)))
}
fn clear_relocations(&mut self, ptr: Pointer, size: usize) -> EvalResult<'tcx, ()> {
fn clear_relocations(&mut self, ptr: Pointer, size: u64) -> EvalResult<'tcx, ()> {
// Find all relocations overlapping the given range.
let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect();
if keys.is_empty() { return Ok(()); }
@ -685,16 +707,16 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
Ok(())
}
fn check_relocation_edges(&self, ptr: Pointer, size: usize) -> EvalResult<'tcx, ()> {
fn check_relocation_edges(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, ()> {
let overlapping_start = self.relocations(ptr, 0)?.count();
let overlapping_end = self.relocations(ptr.offset(size as isize), 0)?.count();
let overlapping_end = self.relocations(ptr.offset(size), 0)?.count();
if overlapping_start + overlapping_end != 0 {
return Err(EvalError::ReadPointerAsBytes);
}
Ok(())
}
fn copy_relocations(&mut self, src: Pointer, dest: Pointer, size: usize) -> EvalResult<'tcx, ()> {
fn copy_relocations(&mut self, src: Pointer, dest: Pointer, size: u64) -> EvalResult<'tcx, ()> {
let relocations: Vec<_> = self.relocations(src, size)?
.map(|(&offset, &alloc_id)| {
// Update relocation offsets for the new positions in the destination allocation.
@ -709,20 +731,21 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
/// Undefined bytes
impl<'a, 'tcx> Memory<'a, 'tcx> {
// FIXME(solson): This is a very naive, slow version.
fn copy_undef_mask(&mut self, src: Pointer, dest: Pointer, size: usize) -> EvalResult<'tcx, ()> {
fn copy_undef_mask(&mut self, src: Pointer, dest: Pointer, size: u64) -> EvalResult<'tcx, ()> {
// The bits have to be saved locally before writing to dest in case src and dest overlap.
let mut v = Vec::with_capacity(size);
assert_eq!(size as usize as u64, size);
let mut v = Vec::with_capacity(size as usize);
for i in 0..size {
let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + i);
v.push(defined);
}
for (i, defined) in v.into_iter().enumerate() {
self.get_mut(dest.alloc_id)?.undef_mask.set(dest.offset + i, defined);
self.get_mut(dest.alloc_id)?.undef_mask.set(dest.offset + i as u64, defined);
}
Ok(())
}
fn check_defined(&self, ptr: Pointer, size: usize) -> EvalResult<'tcx, ()> {
fn check_defined(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, ()> {
let alloc = self.get(ptr.alloc_id)?;
if !alloc.undef_mask.is_range_defined(ptr.offset, ptr.offset + size) {
return Err(EvalError::ReadUndefBytes);
@ -730,7 +753,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
Ok(())
}
pub fn mark_definedness(&mut self, ptr: Pointer, size: usize, new_state: bool)
pub fn mark_definedness(&mut self, ptr: Pointer, size: u64, new_state: bool)
-> EvalResult<'tcx, ()>
{
if size == 0 {
@ -809,16 +832,16 @@ fn read_target_f64(endianess: layout::Endian, mut source: &[u8]) -> Result<f64,
////////////////////////////////////////////////////////////////////////////////
type Block = u64;
const BLOCK_SIZE: usize = 64;
const BLOCK_SIZE: u64 = 64;
#[derive(Clone, Debug)]
pub struct UndefMask {
blocks: Vec<Block>,
len: usize,
len: u64,
}
impl UndefMask {
fn new(size: usize) -> Self {
fn new(size: u64) -> Self {
let mut m = UndefMask {
blocks: vec![],
len: 0,
@ -828,7 +851,7 @@ impl UndefMask {
}
/// Check whether the range `start..end` (end-exclusive) is entirely defined.
pub fn is_range_defined(&self, start: usize, end: usize) -> bool {
pub fn is_range_defined(&self, start: u64, end: u64) -> bool {
if end > self.len { return false; }
for i in start..end {
if !self.get(i) { return false; }
@ -836,22 +859,22 @@ impl UndefMask {
true
}
fn set_range(&mut self, start: usize, end: usize, new_state: bool) {
fn set_range(&mut self, start: u64, end: u64, new_state: bool) {
let len = self.len;
if end > len { self.grow(end - len, new_state); }
self.set_range_inbounds(start, end, new_state);
}
fn set_range_inbounds(&mut self, start: usize, end: usize, new_state: bool) {
fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) {
for i in start..end { self.set(i, new_state); }
}
fn get(&self, i: usize) -> bool {
fn get(&self, i: u64) -> bool {
let (block, bit) = bit_index(i);
(self.blocks[block] & 1 << bit) != 0
}
fn set(&mut self, i: usize, new_state: bool) {
fn set(&mut self, i: u64, new_state: bool) {
let (block, bit) = bit_index(i);
if new_state {
self.blocks[block] |= 1 << bit;
@ -860,24 +883,31 @@ impl UndefMask {
}
}
fn grow(&mut self, amount: usize, new_state: bool) {
let unused_trailing_bits = self.blocks.len() * BLOCK_SIZE - self.len;
fn grow(&mut self, amount: u64, new_state: bool) {
let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len;
if amount > unused_trailing_bits {
let additional_blocks = amount / BLOCK_SIZE + 1;
self.blocks.extend(iter::repeat(0).take(additional_blocks));
assert_eq!(additional_blocks as usize as u64, additional_blocks);
self.blocks.extend(iter::repeat(0).take(additional_blocks as usize));
}
let start = self.len;
self.len += amount;
self.set_range_inbounds(start, start + amount, new_state);
}
fn truncate(&mut self, length: usize) {
fn truncate(&mut self, length: u64) {
self.len = length;
self.blocks.truncate(self.len / BLOCK_SIZE + 1);
let truncate = self.len / BLOCK_SIZE + 1;
assert_eq!(truncate as usize as u64, truncate);
self.blocks.truncate(truncate as usize);
self.blocks.shrink_to_fit();
}
}
fn bit_index(bits: usize) -> (usize, usize) {
(bits / BLOCK_SIZE, bits % BLOCK_SIZE)
fn bit_index(bits: u64) -> (usize, usize) {
let a = bits / BLOCK_SIZE;
let b = bits % BLOCK_SIZE;
assert_eq!(a as usize as u64, a);
assert_eq!(b as usize as u64, b);
(a as usize, b as usize)
}

View File

@ -65,7 +65,7 @@ impl PrimValKind {
}
}
pub fn from_uint_size(size: usize) -> Self {
pub fn from_uint_size(size: u64) -> Self {
match size {
1 => PrimValKind::U8,
2 => PrimValKind::U16,
@ -75,7 +75,7 @@ impl PrimValKind {
}
}
pub fn from_int_size(size: usize) -> Self {
pub fn from_int_size(size: u64) -> Self {
match size {
1 => PrimValKind::I8,
2 => PrimValKind::I16,
@ -119,11 +119,11 @@ impl PrimVal {
PrimVal::new(f64_to_bits(f), PrimValKind::F64)
}
pub fn from_uint_with_size(n: u64, size: usize) -> Self {
pub fn from_uint_with_size(n: u64, size: u64) -> Self {
PrimVal::new(n, PrimValKind::from_uint_size(size))
}
pub fn from_int_with_size(n: i64, size: usize) -> Self {
pub fn from_int_with_size(n: i64, size: u64) -> Self {
PrimVal::new(n as u64, PrimValKind::from_int_size(size))
}
@ -139,8 +139,8 @@ impl PrimVal {
pub fn to_ptr(self) -> Pointer {
self.relocation.map(|alloc_id| {
Pointer::new(alloc_id, self.bits as usize)
}).unwrap_or_else(|| Pointer::from_int(self.bits as usize))
Pointer::new(alloc_id, self.bits)
}).unwrap_or_else(|| Pointer::from_int(self.bits))
}
pub fn try_as_uint<'tcx>(self) -> EvalResult<'tcx, u64> {