2017-01-12 07:28:42 +00:00
use byteorder ::{ ReadBytesExt , WriteBytesExt , LittleEndian , BigEndian } ;
2017-01-30 08:44:52 +00:00
use std ::collections ::{ btree_map , BTreeMap , HashMap , HashSet , VecDeque , BTreeSet } ;
2017-01-12 07:28:42 +00:00
use std ::{ fmt , iter , ptr , mem , io } ;
2016-03-05 06:48:23 +00:00
2017-03-21 12:53:55 +00:00
use rustc ::ty ;
2016-06-23 13:16:25 +00:00
use rustc ::ty ::layout ::{ self , TargetDataLayout } ;
2016-06-08 11:43:34 +00:00
2016-03-15 03:48:00 +00:00
use error ::{ EvalError , EvalResult } ;
2016-12-19 04:59:01 +00:00
use value ::PrimVal ;
2016-03-05 06:48:23 +00:00
2016-04-05 02:33:41 +00:00
////////////////////////////////////////////////////////////////////////////////
// Allocations and pointers
////////////////////////////////////////////////////////////////////////////////
2016-12-08 06:00:46 +00:00
#[ derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd) ]
2016-06-30 09:29:25 +00:00
pub struct AllocId ( pub u64 ) ;
2016-04-05 02:33:41 +00:00
2016-04-10 01:31:53 +00:00
impl fmt ::Display for AllocId {
fn fmt ( & self , f : & mut fmt ::Formatter ) -> fmt ::Result {
write! ( f , " {} " , self . 0 )
}
}
2016-04-05 02:33:41 +00:00
#[ derive(Debug) ]
pub struct Allocation {
2016-09-09 15:11:43 +00:00
/// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer
2016-04-06 23:29:56 +00:00
pub bytes : Vec < u8 > ,
2016-09-09 15:11:43 +00:00
/// Maps from byte addresses to allocations.
/// Only the first byte of a pointer is inserted into the map.
2016-11-18 11:55:14 +00:00
pub relocations : BTreeMap < u64 , AllocId > ,
2016-09-09 15:11:43 +00:00
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
2016-04-06 09:45:06 +00:00
pub undef_mask : UndefMask ,
2016-09-09 15:11:43 +00:00
/// The alignment of the allocation to detect unaligned reads.
2016-11-18 11:55:14 +00:00
pub align : u64 ,
2016-09-09 15:44:04 +00:00
/// Whether the allocation may be modified.
2017-02-08 15:27:28 +00:00
/// Use the `mark_static_initalized` method of `Memory` to ensure that an error occurs, if the memory of this
2017-02-07 19:28:54 +00:00
/// allocation is modified or deallocated in the future.
2017-02-07 18:20:16 +00:00
pub static_kind : StaticKind ,
}
#[ derive(Debug, PartialEq, Copy, Clone) ]
pub enum StaticKind {
/// may be deallocated without breaking miri's invariants
NotStatic ,
/// may be modified, but never deallocated
Mutable ,
/// may neither be modified nor deallocated
Immutable ,
2016-04-05 02:33:41 +00:00
}
#[ derive(Copy, Clone, Debug, Eq, PartialEq) ]
pub struct Pointer {
pub alloc_id : AllocId ,
2016-11-18 11:55:14 +00:00
pub offset : u64 ,
2016-04-05 02:33:41 +00:00
}
impl Pointer {
2016-11-18 11:55:14 +00:00
pub fn new ( alloc_id : AllocId , offset : u64 ) -> Self {
2017-01-17 02:45:30 +00:00
Pointer { alloc_id , offset }
2016-10-20 10:42:19 +00:00
}
2016-11-18 11:55:14 +00:00
pub fn signed_offset ( self , i : i64 ) -> Self {
// FIXME: is it possible to over/underflow here?
if i < 0 {
// trickery to ensure that i64::min_value() works fine
// this formula only works for true negative values, it panics for zero!
let n = u64 ::max_value ( ) - ( i as u64 ) + 1 ;
Pointer ::new ( self . alloc_id , self . offset - n )
} else {
self . offset ( i as u64 )
}
}
pub fn offset ( self , i : u64 ) -> Self {
Pointer ::new ( self . alloc_id , self . offset + i )
2016-04-05 02:33:41 +00:00
}
2016-10-16 05:31:42 +00:00
2016-06-30 14:42:09 +00:00
pub fn points_to_zst ( & self ) -> bool {
2016-07-01 22:40:52 +00:00
self . alloc_id = = ZST_ALLOC_ID
2016-06-30 14:42:09 +00:00
}
2016-10-16 05:31:42 +00:00
2016-11-18 11:55:14 +00:00
pub fn to_int < ' tcx > ( & self ) -> EvalResult < ' tcx , u64 > {
2016-10-01 13:33:07 +00:00
match self . alloc_id {
2016-12-17 11:09:57 +00:00
NEVER_ALLOC_ID = > Ok ( self . offset ) ,
2016-10-01 13:33:07 +00:00
_ = > Err ( EvalError ::ReadPointerAsBytes ) ,
2016-09-26 09:37:23 +00:00
}
2016-09-23 08:27:14 +00:00
}
2016-10-16 05:31:42 +00:00
2016-11-18 11:55:14 +00:00
pub fn from_int ( i : u64 ) -> Self {
2016-11-10 18:20:11 +00:00
Pointer ::new ( NEVER_ALLOC_ID , i )
2016-09-22 13:47:16 +00:00
}
2016-10-16 05:31:42 +00:00
2016-10-16 06:12:11 +00:00
pub fn zst_ptr ( ) -> Self {
2016-10-20 10:42:19 +00:00
Pointer ::new ( ZST_ALLOC_ID , 0 )
2016-06-30 14:42:09 +00:00
}
2016-10-16 05:31:42 +00:00
2016-10-01 13:33:07 +00:00
pub fn never_ptr ( ) -> Self {
2016-10-20 10:42:19 +00:00
Pointer ::new ( NEVER_ALLOC_ID , 0 )
2016-10-01 13:33:07 +00:00
}
2017-05-26 05:38:07 +00:00
pub fn is_null_ptr ( & self ) -> bool {
return * self = = Pointer ::from_int ( 0 )
}
}
pub type TlsKey = usize ;
#[ derive(Copy, Clone, Debug) ]
pub struct TlsEntry < ' tcx > {
data : Pointer , // will eventually become a map from thread IDs to pointers
dtor : Option < ty ::Instance < ' tcx > > ,
2016-04-05 02:33:41 +00:00
}
////////////////////////////////////////////////////////////////////////////////
// Top-level interpreter memory
////////////////////////////////////////////////////////////////////////////////
2016-06-23 07:36:24 +00:00
pub struct Memory < ' a , ' tcx > {
2017-02-10 21:35:45 +00:00
/// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
2016-04-10 01:31:53 +00:00
alloc_map : HashMap < AllocId , Allocation > ,
2017-02-10 21:35:45 +00:00
/// The AllocId to assign to the next new allocation. Always incremented, never gets smaller.
next_id : AllocId ,
/// Set of statics, constants, promoteds, vtables, ... to prevent `mark_static_initalized` from
/// stepping out of its own allocations. This set only contains statics backed by an
/// allocation. If they are ByVal or ByValPair they are not here, but will be inserted once
/// they become ByRef.
2017-02-08 15:27:28 +00:00
static_alloc : HashSet < AllocId > ,
2017-02-10 21:35:45 +00:00
/// Number of virtual bytes allocated.
2016-11-18 11:55:14 +00:00
memory_usage : u64 ,
2017-02-10 21:35:45 +00:00
/// Maximum number of virtual bytes that may be allocated.
2016-11-18 11:55:14 +00:00
memory_size : u64 ,
2017-02-10 21:35:45 +00:00
2016-06-13 09:39:15 +00:00
/// Function "allocations". They exist solely so pointers have something to point to, and
/// we can figure out what they point to.
2017-03-21 12:53:55 +00:00
functions : HashMap < AllocId , ty ::Instance < ' tcx > > ,
2017-02-10 21:35:45 +00:00
2016-06-20 08:35:15 +00:00
/// Inverse map of `functions` so we don't allocate a new pointer every time we need one
2017-03-21 12:53:55 +00:00
function_alloc_cache : HashMap < ty ::Instance < ' tcx > , AllocId > ,
2017-02-10 21:35:45 +00:00
/// Target machine data layout to emulate.
2016-06-23 07:36:24 +00:00
pub layout : & ' a TargetDataLayout ,
2017-02-10 21:35:45 +00:00
/// List of memory regions containing packed structures.
///
/// We mark memory as "packed" or "unaligned" for a single statement, and clear the marking
/// afterwards. In the case where no packed structs are present, it's just a single emptyness
/// check of a set instead of heavily influencing all memory access code as other solutions
/// would.
2017-01-31 09:36:27 +00:00
///
2017-02-10 21:35:45 +00:00
/// One disadvantage of this solution is the fact that you can cast a pointer to a packed
/// struct to a pointer to a normal struct and if you access a field of both in the same MIR
/// statement, the normal struct access will succeed even though it shouldn't. But even with
/// mir optimizations, that situation is hard/impossible to produce.
2017-01-30 08:44:52 +00:00
packed : BTreeSet < Entry > ,
2017-02-10 21:35:33 +00:00
/// A cache for basic byte allocations keyed by their contents. This is used to deduplicate
/// allocations for string and bytestring literals.
literal_alloc_cache : HashMap < Vec < u8 > , AllocId > ,
2017-05-25 23:40:13 +00:00
/// pthreads-style Thread-local storage. We only have one thread, so this is just a map from TLS keys (indices into the vector) to the pointer stored there.
2017-05-26 05:38:07 +00:00
thread_local : HashMap < TlsKey , TlsEntry < ' tcx > > ,
2017-05-25 23:40:13 +00:00
/// The Key to use for the next thread-local allocation.
next_thread_local : TlsKey ,
2016-03-24 03:40:58 +00:00
}
2016-07-01 11:08:19 +00:00
const ZST_ALLOC_ID : AllocId = AllocId ( 0 ) ;
2016-10-01 13:33:07 +00:00
const NEVER_ALLOC_ID : AllocId = AllocId ( 1 ) ;
2016-07-01 11:08:19 +00:00
2016-06-23 07:36:24 +00:00
impl < ' a , ' tcx > Memory < ' a , ' tcx > {
2016-11-18 11:55:14 +00:00
pub fn new ( layout : & ' a TargetDataLayout , max_memory : u64 ) -> Self {
2016-09-22 13:22:00 +00:00
Memory {
2016-03-17 13:53:26 +00:00
alloc_map : HashMap ::new ( ) ,
2016-06-08 11:43:34 +00:00
functions : HashMap ::new ( ) ,
2016-06-20 14:52:53 +00:00
function_alloc_cache : HashMap ::new ( ) ,
2016-10-01 13:33:07 +00:00
next_id : AllocId ( 2 ) ,
2017-01-17 02:45:30 +00:00
layout ,
2016-07-05 08:47:10 +00:00
memory_size : max_memory ,
memory_usage : 0 ,
2017-01-30 08:44:52 +00:00
packed : BTreeSet ::new ( ) ,
2017-02-08 15:27:28 +00:00
static_alloc : HashSet ::new ( ) ,
2017-02-10 21:35:33 +00:00
literal_alloc_cache : HashMap ::new ( ) ,
2017-05-25 23:40:13 +00:00
thread_local : HashMap ::new ( ) ,
next_thread_local : 0 ,
2016-09-22 13:22:00 +00:00
}
2016-03-05 06:48:23 +00:00
}
2016-07-07 11:19:17 +00:00
pub fn allocations ( & self ) -> ::std ::collections ::hash_map ::Iter < AllocId , Allocation > {
2016-06-30 09:29:25 +00:00
self . alloc_map . iter ( )
}
2017-03-21 12:53:55 +00:00
pub fn create_fn_alloc ( & mut self , instance : ty ::Instance < ' tcx > ) -> Pointer {
if let Some ( & alloc_id ) = self . function_alloc_cache . get ( & instance ) {
2016-10-20 10:42:19 +00:00
return Pointer ::new ( alloc_id , 0 ) ;
2016-06-20 08:35:15 +00:00
}
let id = self . next_id ;
debug! ( " creating fn ptr: {} " , id ) ;
self . next_id . 0 + = 1 ;
2017-03-21 12:53:55 +00:00
self . functions . insert ( id , instance ) ;
self . function_alloc_cache . insert ( instance , id ) ;
2016-10-20 10:42:19 +00:00
Pointer ::new ( id , 0 )
2016-06-08 11:43:34 +00:00
}
2017-02-10 21:35:33 +00:00
pub fn allocate_cached ( & mut self , bytes : & [ u8 ] ) -> EvalResult < ' tcx , Pointer > {
if let Some ( & alloc_id ) = self . literal_alloc_cache . get ( bytes ) {
return Ok ( Pointer ::new ( alloc_id , 0 ) ) ;
}
let ptr = self . allocate ( bytes . len ( ) as u64 , 1 ) ? ;
self . write_bytes ( ptr , bytes ) ? ;
self . mark_static_initalized ( ptr . alloc_id , false ) ? ;
self . literal_alloc_cache . insert ( bytes . to_vec ( ) , ptr . alloc_id ) ;
Ok ( ptr )
}
2016-11-18 11:55:14 +00:00
pub fn allocate ( & mut self , size : u64 , align : u64 ) -> EvalResult < ' tcx , Pointer > {
2016-06-30 14:42:09 +00:00
if size = = 0 {
2016-07-05 08:47:10 +00:00
return Ok ( Pointer ::zst_ptr ( ) ) ;
2016-06-30 14:42:09 +00:00
}
2017-04-26 10:15:42 +00:00
assert_ne! ( align , 0 ) ;
2016-09-14 03:31:12 +00:00
2016-07-07 09:20:46 +00:00
if self . memory_size - self . memory_usage < size {
2016-07-05 08:47:10 +00:00
return Err ( EvalError ::OutOfMemory {
2016-07-22 14:35:39 +00:00
allocation_size : size ,
2016-07-05 08:47:10 +00:00
memory_size : self . memory_size ,
memory_usage : self . memory_usage ,
} ) ;
}
2016-07-07 09:20:46 +00:00
self . memory_usage + = size ;
2016-11-18 11:55:14 +00:00
assert_eq! ( size as usize as u64 , size ) ;
2016-03-22 06:48:28 +00:00
let alloc = Allocation {
2016-11-18 11:55:14 +00:00
bytes : vec ! [ 0 ; size as usize ] ,
2016-03-22 06:48:28 +00:00
relocations : BTreeMap ::new ( ) ,
2016-07-22 14:35:39 +00:00
undef_mask : UndefMask ::new ( size ) ,
2017-01-17 02:45:30 +00:00
align ,
2017-02-07 18:20:16 +00:00
static_kind : StaticKind ::NotStatic ,
2016-03-22 06:48:28 +00:00
} ;
2016-04-10 01:31:53 +00:00
let id = self . next_id ;
self . next_id . 0 + = 1 ;
self . alloc_map . insert ( id , alloc ) ;
2016-10-20 10:42:19 +00:00
Ok ( Pointer ::new ( id , 0 ) )
2016-03-05 06:48:23 +00:00
}
2016-05-10 02:08:37 +00:00
// TODO(solson): Track which allocations were returned from __rust_allocate and report an error
2016-04-06 23:29:56 +00:00
// when reallocating/deallocating any others.
2016-11-18 11:55:14 +00:00
pub fn reallocate ( & mut self , ptr : Pointer , new_size : u64 , align : u64 ) -> EvalResult < ' tcx , Pointer > {
2016-07-22 14:35:39 +00:00
// TODO(solson): Report error about non-__rust_allocate'd pointer.
if ptr . offset ! = 0 {
2016-05-30 13:27:52 +00:00
return Err ( EvalError ::Unimplemented ( format! ( " bad pointer offset: {} " , ptr . offset ) ) ) ;
2016-04-06 23:29:56 +00:00
}
2016-07-01 11:08:19 +00:00
if ptr . points_to_zst ( ) {
2016-07-05 12:27:27 +00:00
return self . allocate ( new_size , align ) ;
2016-07-01 11:08:19 +00:00
}
2017-02-07 18:20:16 +00:00
if self . get ( ptr . alloc_id ) . ok ( ) . map_or ( false , | alloc | alloc . static_kind ! = StaticKind ::NotStatic ) {
return Err ( EvalError ::ReallocatedStaticMemory ) ;
2016-11-17 13:48:34 +00:00
}
2016-04-06 23:29:56 +00:00
2016-11-18 11:55:14 +00:00
let size = self . get ( ptr . alloc_id ) ? . bytes . len ( ) as u64 ;
2016-06-13 09:24:01 +00:00
2016-07-22 14:35:39 +00:00
if new_size > size {
let amount = new_size - size ;
2016-07-07 09:20:46 +00:00
self . memory_usage + = amount ;
2016-06-13 09:24:01 +00:00
let alloc = self . get_mut ( ptr . alloc_id ) ? ;
2016-11-18 11:55:14 +00:00
assert_eq! ( amount as usize as u64 , amount ) ;
alloc . bytes . extend ( iter ::repeat ( 0 ) . take ( amount as usize ) ) ;
2016-04-06 23:29:56 +00:00
alloc . undef_mask . grow ( amount , false ) ;
2016-07-22 14:35:39 +00:00
} else if size > new_size {
self . memory_usage - = size - new_size ;
2016-11-18 11:55:14 +00:00
self . clear_relocations ( ptr . offset ( new_size ) , size - new_size ) ? ;
2016-06-13 09:24:01 +00:00
let alloc = self . get_mut ( ptr . alloc_id ) ? ;
2016-11-18 11:55:14 +00:00
// `as usize` is fine here, since it is smaller than `size`, which came from a usize
alloc . bytes . truncate ( new_size as usize ) ;
2016-07-25 10:30:35 +00:00
alloc . bytes . shrink_to_fit ( ) ;
2016-07-22 14:35:39 +00:00
alloc . undef_mask . truncate ( new_size ) ;
2016-04-06 23:29:56 +00:00
}
2016-10-20 10:42:19 +00:00
Ok ( Pointer ::new ( ptr . alloc_id , 0 ) )
2016-04-06 23:29:56 +00:00
}
2016-05-10 02:08:37 +00:00
// TODO(solson): See comment on `reallocate`.
2017-02-04 21:09:10 +00:00
pub fn deallocate ( & mut self , ptr : Pointer ) -> EvalResult < ' tcx > {
2016-06-30 14:42:09 +00:00
if ptr . points_to_zst ( ) {
return Ok ( ( ) ) ;
}
2016-07-22 14:35:39 +00:00
if ptr . offset ! = 0 {
2016-05-10 02:08:37 +00:00
// TODO(solson): Report error about non-__rust_allocate'd pointer.
2016-05-30 13:27:52 +00:00
return Err ( EvalError ::Unimplemented ( format! ( " bad pointer offset: {} " , ptr . offset ) ) ) ;
2016-04-07 09:02:02 +00:00
}
2017-02-07 18:20:16 +00:00
if self . get ( ptr . alloc_id ) . ok ( ) . map_or ( false , | alloc | alloc . static_kind ! = StaticKind ::NotStatic ) {
return Err ( EvalError ::DeallocatedStaticMemory ) ;
2016-11-17 13:48:34 +00:00
}
2016-04-07 09:02:02 +00:00
2016-07-05 08:47:10 +00:00
if let Some ( alloc ) = self . alloc_map . remove ( & ptr . alloc_id ) {
2016-11-18 11:55:14 +00:00
self . memory_usage - = alloc . bytes . len ( ) as u64 ;
2016-07-05 08:47:10 +00:00
} else {
2016-07-01 11:08:19 +00:00
debug! ( " deallocated a pointer twice: {} " , ptr . alloc_id ) ;
2016-05-10 02:08:37 +00:00
// TODO(solson): Report error about erroneous free. This is blocked on properly tracking
2016-04-07 09:02:02 +00:00
// already-dropped state since this if-statement is entered even in safe code without
// it.
}
2016-07-01 11:08:19 +00:00
debug! ( " deallocated : {} " , ptr . alloc_id ) ;
2016-04-07 09:02:02 +00:00
Ok ( ( ) )
}
2016-06-23 07:59:16 +00:00
2016-11-18 11:55:14 +00:00
pub fn pointer_size ( & self ) -> u64 {
self . layout . pointer_size . bytes ( )
2016-06-23 07:59:16 +00:00
}
2016-06-23 13:16:25 +00:00
pub fn endianess ( & self ) -> layout ::Endian {
self . layout . endian
}
2016-07-22 14:35:39 +00:00
2017-02-04 21:09:10 +00:00
pub fn check_align ( & self , ptr : Pointer , align : u64 , len : u64 ) -> EvalResult < ' tcx > {
2016-07-22 14:35:39 +00:00
let alloc = self . get ( ptr . alloc_id ) ? ;
2017-01-31 09:36:27 +00:00
// check whether the memory was marked as packed
// we select all elements that have the correct alloc_id and are within
// the range given by the offset into the allocation and the length
let start = Entry {
alloc_id : ptr . alloc_id ,
packed_start : 0 ,
packed_end : ptr . offset + len ,
} ;
let end = Entry {
alloc_id : ptr . alloc_id ,
packed_start : ptr . offset + len ,
packed_end : 0 ,
} ;
for & Entry { packed_start , packed_end , .. } in self . packed . range ( start .. end ) {
// if the region we are checking is covered by a region in `packed`
// ignore the actual alignment
if packed_start < = ptr . offset & & ( ptr . offset + len ) < = packed_end {
2017-01-30 08:44:52 +00:00
return Ok ( ( ) ) ;
}
}
2016-07-22 14:35:39 +00:00
if alloc . align < align {
return Err ( EvalError ::AlignmentCheckFailed {
has : alloc . align ,
required : align ,
} ) ;
}
if ptr . offset % align = = 0 {
Ok ( ( ) )
} else {
Err ( EvalError ::AlignmentCheckFailed {
has : ptr . offset % align ,
required : align ,
} )
}
}
2017-01-30 08:44:52 +00:00
pub ( crate ) fn mark_packed ( & mut self , ptr : Pointer , len : u64 ) {
2017-01-31 09:36:27 +00:00
self . packed . insert ( Entry {
alloc_id : ptr . alloc_id ,
packed_start : ptr . offset ,
packed_end : ptr . offset + len ,
} ) ;
2017-01-30 08:44:52 +00:00
}
pub ( crate ) fn clear_packed ( & mut self ) {
self . packed . clear ( ) ;
}
2017-05-25 23:40:13 +00:00
2017-05-26 05:38:07 +00:00
pub ( crate ) fn create_tls_key ( & mut self , dtor : Option < ty ::Instance < ' tcx > > ) -> TlsKey {
2017-05-25 23:40:13 +00:00
let new_key = self . next_thread_local ;
self . next_thread_local + = 1 ;
2017-05-26 05:38:07 +00:00
self . thread_local . insert ( new_key , TlsEntry { data : Pointer ::from_int ( 0 ) , dtor } ) ;
trace! ( " New TLS key allocated: {} with dtor {:?} " , new_key , dtor ) ;
2017-05-25 23:40:13 +00:00
return new_key ;
}
pub ( crate ) fn delete_tls_key ( & mut self , key : TlsKey ) -> EvalResult < ' tcx > {
return match self . thread_local . remove ( & key ) {
Some ( _ ) = > {
trace! ( " TLS key {} removed " , key ) ;
Ok ( ( ) )
} ,
None = > Err ( EvalError ::TlsOutOfBounds )
}
}
pub ( crate ) fn load_tls ( & mut self , key : TlsKey ) -> EvalResult < ' tcx , Pointer > {
return match self . thread_local . get ( & key ) {
2017-05-26 05:38:07 +00:00
Some ( & TlsEntry { data , .. } ) = > {
trace! ( " TLS key {} loaded: {:?} " , key , data ) ;
Ok ( data )
2017-05-25 23:40:13 +00:00
} ,
None = > Err ( EvalError ::TlsOutOfBounds )
}
}
2017-05-26 05:38:07 +00:00
pub ( crate ) fn store_tls ( & mut self , key : TlsKey , new_data : Pointer ) -> EvalResult < ' tcx > {
2017-05-25 23:40:13 +00:00
return match self . thread_local . get_mut ( & key ) {
2017-05-26 05:38:07 +00:00
Some ( & mut TlsEntry { ref mut data , .. } ) = > {
trace! ( " TLS key {} stored: {:?} " , key , new_data ) ;
* data = new_data ;
2017-05-25 23:40:13 +00:00
Ok ( ( ) )
} ,
None = > Err ( EvalError ::TlsOutOfBounds )
}
}
2017-05-26 05:38:07 +00:00
// Returns a dtor and its argument, if one is supposed to run
pub ( crate ) fn fetch_tls_dtor ( & mut self ) -> Option < ( ty ::Instance < ' tcx > , Pointer ) > {
for ( _ , & mut TlsEntry { ref mut data , dtor } ) in self . thread_local . iter_mut ( ) {
if ! data . is_null_ptr ( ) {
if let Some ( dtor ) = dtor {
let old_data = * data ;
* data = Pointer ::from_int ( 0 ) ;
return Some ( ( dtor , old_data ) ) ;
}
}
}
return None ;
}
2016-06-23 07:40:01 +00:00
}
2016-04-07 09:02:02 +00:00
2017-01-31 09:36:27 +00:00
// The derived `Ord` impl sorts first by the first field, then, if the fields are the same
// by the second field, and if those are the same, too, then by the third field.
// This is exactly what we need for our purposes, since a range within an allocation
// will give us all `Entry`s that have that `AllocId`, and whose `packed_start` is <= than
// the one we're looking for, but not > the end of the range we're checking.
// At the same time the `packed_end` is irrelevant for the sorting and range searching, but used for the check.
// This kind of search breaks, if `packed_end < packed_start`, so don't do that!
2017-01-30 08:44:52 +00:00
#[ derive(Eq, PartialEq, Ord, PartialOrd) ]
2017-01-31 09:36:27 +00:00
struct Entry {
alloc_id : AllocId ,
packed_start : u64 ,
packed_end : u64 ,
}
2017-01-30 08:44:52 +00:00
2016-06-23 07:40:01 +00:00
/// Allocation accessors
impl < ' a , ' tcx > Memory < ' a , ' tcx > {
2016-06-14 08:34:54 +00:00
pub fn get ( & self , id : AllocId ) -> EvalResult < ' tcx , & Allocation > {
2016-06-13 09:39:15 +00:00
match self . alloc_map . get ( & id ) {
Some ( alloc ) = > Ok ( alloc ) ,
None = > match self . functions . get ( & id ) {
Some ( _ ) = > Err ( EvalError ::DerefFunctionPointer ) ,
2016-11-10 18:20:11 +00:00
None if id = = NEVER_ALLOC_ID | | id = = ZST_ALLOC_ID = > Err ( EvalError ::InvalidMemoryAccess ) ,
2016-06-13 09:39:15 +00:00
None = > Err ( EvalError ::DanglingPointerDeref ) ,
}
}
2016-03-05 06:48:23 +00:00
}
2016-06-14 08:34:54 +00:00
pub fn get_mut ( & mut self , id : AllocId ) -> EvalResult < ' tcx , & mut Allocation > {
2016-06-13 09:39:15 +00:00
match self . alloc_map . get_mut ( & id ) {
2017-02-07 18:20:16 +00:00
Some ( alloc ) = > match alloc . static_kind {
StaticKind ::Mutable |
StaticKind ::NotStatic = > Ok ( alloc ) ,
StaticKind ::Immutable = > Err ( EvalError ::ModifiedConstantMemory ) ,
} ,
2016-06-13 09:39:15 +00:00
None = > match self . functions . get ( & id ) {
Some ( _ ) = > Err ( EvalError ::DerefFunctionPointer ) ,
2016-11-10 18:20:11 +00:00
None if id = = NEVER_ALLOC_ID | | id = = ZST_ALLOC_ID = > Err ( EvalError ::InvalidMemoryAccess ) ,
2016-06-13 09:39:15 +00:00
None = > Err ( EvalError ::DanglingPointerDeref ) ,
}
}
2016-03-05 06:48:23 +00:00
}
2017-03-21 12:53:55 +00:00
pub fn get_fn ( & self , id : AllocId ) -> EvalResult < ' tcx , ty ::Instance < ' tcx > > {
2016-06-08 11:43:34 +00:00
debug! ( " reading fn ptr: {} " , id ) ;
2016-06-13 09:39:15 +00:00
match self . functions . get ( & id ) {
2017-02-07 13:22:20 +00:00
Some ( & fndef ) = > Ok ( fndef ) ,
2016-06-13 09:39:15 +00:00
None = > match self . alloc_map . get ( & id ) {
Some ( _ ) = > Err ( EvalError ::ExecuteMemory ) ,
None = > Err ( EvalError ::InvalidFunctionPointer ) ,
}
}
2016-06-08 11:43:34 +00:00
}
2016-12-08 06:00:46 +00:00
/// For debugging, print an allocation and all allocations it points to, recursively.
pub fn dump_alloc ( & self , id : AllocId ) {
self . dump_allocs ( vec! [ id ] ) ;
}
/// For debugging, print a list of allocations and all allocations they point to, recursively.
pub fn dump_allocs ( & self , mut allocs : Vec < AllocId > ) {
2016-09-22 11:01:08 +00:00
use std ::fmt ::Write ;
2016-12-08 06:00:46 +00:00
allocs . sort ( ) ;
allocs . dedup ( ) ;
let mut allocs_to_print = VecDeque ::from ( allocs ) ;
2016-04-06 09:45:06 +00:00
let mut allocs_seen = HashSet ::new ( ) ;
while let Some ( id ) = allocs_to_print . pop_front ( ) {
2016-11-18 09:36:01 +00:00
if id = = ZST_ALLOC_ID | | id = = NEVER_ALLOC_ID { continue ; }
2016-09-22 11:01:08 +00:00
let mut msg = format! ( " Alloc {:<5} " , format! ( " {} : " , id ) ) ;
let prefix_len = msg . len ( ) ;
2016-04-06 09:45:06 +00:00
let mut relocations = vec! [ ] ;
2016-06-08 11:43:34 +00:00
let alloc = match ( self . alloc_map . get ( & id ) , self . functions . get ( & id ) ) {
( Some ( a ) , None ) = > a ,
2017-03-21 12:53:55 +00:00
( None , Some ( instance ) ) = > {
trace! ( " {} {} " , msg , instance ) ;
2017-02-28 11:35:00 +00:00
continue ;
} ,
2016-06-08 11:43:34 +00:00
( None , None ) = > {
2016-09-22 11:01:08 +00:00
trace! ( " {} (deallocated) " , msg ) ;
2016-04-07 09:07:57 +00:00
continue ;
2016-06-08 11:43:34 +00:00
} ,
2016-09-06 14:16:49 +00:00
( Some ( _ ) , Some ( _ ) ) = > bug! ( " miri invariant broken: an allocation id exists that points to both a function and a memory location " ) ,
2016-04-07 09:07:57 +00:00
} ;
2016-11-18 11:55:14 +00:00
for i in 0 .. ( alloc . bytes . len ( ) as u64 ) {
2016-04-06 09:45:06 +00:00
if let Some ( & target_id ) = alloc . relocations . get ( & i ) {
2017-02-06 17:26:01 +00:00
if allocs_seen . insert ( target_id ) {
2016-04-06 09:45:06 +00:00
allocs_to_print . push_back ( target_id ) ;
}
2016-04-10 01:31:53 +00:00
relocations . push ( ( i , target_id ) ) ;
2016-04-06 09:45:06 +00:00
}
2016-04-10 01:31:53 +00:00
if alloc . undef_mask . is_range_defined ( i , i + 1 ) {
2016-11-18 11:55:14 +00:00
// this `as usize` is fine, since `i` came from a `usize`
write! ( msg , " {:02x} " , alloc . bytes [ i as usize ] ) . unwrap ( ) ;
2016-04-06 09:45:06 +00:00
} else {
2016-09-22 11:01:08 +00:00
msg . push_str ( " __ " ) ;
2016-04-06 09:45:06 +00:00
}
}
2016-09-19 10:10:51 +00:00
2017-02-07 18:20:16 +00:00
let immutable = match alloc . static_kind {
2017-02-08 08:17:48 +00:00
StaticKind ::Mutable = > " (static mut) " ,
StaticKind ::Immutable = > " (immutable) " ,
2017-02-07 18:20:16 +00:00
StaticKind ::NotStatic = > " " ,
} ;
2016-09-22 11:01:08 +00:00
trace! ( " {}({} bytes){} " , msg , alloc . bytes . len ( ) , immutable ) ;
2016-04-06 09:45:06 +00:00
if ! relocations . is_empty ( ) {
2016-09-22 11:01:08 +00:00
msg . clear ( ) ;
write! ( msg , " {:1$} " , " " , prefix_len ) . unwrap ( ) ; // Print spaces.
2016-04-06 09:45:06 +00:00
let mut pos = 0 ;
2016-06-23 08:00:31 +00:00
let relocation_width = ( self . pointer_size ( ) - 1 ) * 3 ;
2016-04-06 09:45:06 +00:00
for ( i , target_id ) in relocations {
2016-11-18 11:55:14 +00:00
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
write! ( msg , " {:1$} " , " " , ( ( i - pos ) * 3 ) as usize ) . unwrap ( ) ;
2016-11-18 09:36:01 +00:00
let target = match target_id {
ZST_ALLOC_ID = > String ::from ( " zst " ) ,
NEVER_ALLOC_ID = > String ::from ( " int ptr " ) ,
_ = > format! ( " ( {} ) " , target_id ) ,
} ;
2016-11-18 11:55:14 +00:00
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
write! ( msg , " └{0:─^1$}┘ " , target , relocation_width as usize ) . unwrap ( ) ;
2016-06-23 08:00:31 +00:00
pos = i + self . pointer_size ( ) ;
2016-04-06 09:45:06 +00:00
}
2016-09-22 11:01:08 +00:00
trace! ( " {} " , msg ) ;
2016-04-06 09:45:06 +00:00
}
}
}
2017-02-14 14:35:13 +00:00
pub fn leak_report ( & self ) -> usize {
trace! ( " ### LEAK REPORT ### " ) ;
let leaks : Vec < _ > = self . alloc_map
. iter ( )
. filter_map ( | ( & key , val ) | {
if val . static_kind = = StaticKind ::NotStatic {
Some ( key )
} else {
None
}
} )
. collect ( ) ;
let n = leaks . len ( ) ;
self . dump_allocs ( leaks ) ;
n
}
2016-06-23 07:40:01 +00:00
}
2016-04-06 09:45:06 +00:00
2016-06-23 07:40:01 +00:00
/// Byte accessors
impl < ' a , ' tcx > Memory < ' a , ' tcx > {
2017-01-31 09:36:46 +00:00
fn get_bytes_unchecked ( & self , ptr : Pointer , size : u64 , align : u64 ) -> EvalResult < ' tcx , & [ u8 ] > {
2016-09-22 13:22:00 +00:00
if size = = 0 {
return Ok ( & [ ] ) ;
}
2017-01-31 09:36:46 +00:00
self . check_align ( ptr , align , size ) ? ;
2016-05-10 00:52:44 +00:00
let alloc = self . get ( ptr . alloc_id ) ? ;
2017-01-17 02:45:30 +00:00
let allocation_size = alloc . bytes . len ( ) as u64 ;
if ptr . offset + size > allocation_size {
return Err ( EvalError ::PointerOutOfBounds { ptr , size , allocation_size } ) ;
2016-03-24 01:44:05 +00:00
}
2016-11-18 11:55:14 +00:00
assert_eq! ( ptr . offset as usize as u64 , ptr . offset ) ;
assert_eq! ( size as usize as u64 , size ) ;
let offset = ptr . offset as usize ;
Ok ( & alloc . bytes [ offset .. offset + size as usize ] )
2016-03-05 06:48:23 +00:00
}
2017-01-31 09:36:46 +00:00
fn get_bytes_unchecked_mut ( & mut self , ptr : Pointer , size : u64 , align : u64 ) -> EvalResult < ' tcx , & mut [ u8 ] > {
2016-09-22 13:22:00 +00:00
if size = = 0 {
return Ok ( & mut [ ] ) ;
}
2017-01-31 09:36:46 +00:00
self . check_align ( ptr , align , size ) ? ;
2016-05-10 00:52:44 +00:00
let alloc = self . get_mut ( ptr . alloc_id ) ? ;
2017-01-17 02:45:30 +00:00
let allocation_size = alloc . bytes . len ( ) as u64 ;
if ptr . offset + size > allocation_size {
return Err ( EvalError ::PointerOutOfBounds { ptr , size , allocation_size } ) ;
2016-03-24 01:44:05 +00:00
}
2016-11-18 11:55:14 +00:00
assert_eq! ( ptr . offset as usize as u64 , ptr . offset ) ;
assert_eq! ( size as usize as u64 , size ) ;
let offset = ptr . offset as usize ;
Ok ( & mut alloc . bytes [ offset .. offset + size as usize ] )
2016-03-05 06:48:23 +00:00
}
2016-11-18 11:55:14 +00:00
fn get_bytes ( & self , ptr : Pointer , size : u64 , align : u64 ) -> EvalResult < ' tcx , & [ u8 ] > {
2016-09-22 13:22:00 +00:00
if size = = 0 {
return Ok ( & [ ] ) ;
}
2017-06-04 17:42:02 +00:00
if self . has_non_int_relocations ( ptr , size ) ? {
2016-03-24 01:44:05 +00:00
return Err ( EvalError ::ReadPointerAsBytes ) ;
2016-03-21 11:27:34 +00:00
}
2016-05-10 00:52:44 +00:00
self . check_defined ( ptr , size ) ? ;
2017-01-31 09:36:46 +00:00
self . get_bytes_unchecked ( ptr , size , align )
2016-03-24 01:44:05 +00:00
}
2016-11-18 11:55:14 +00:00
fn get_bytes_mut ( & mut self , ptr : Pointer , size : u64 , align : u64 ) -> EvalResult < ' tcx , & mut [ u8 ] > {
2016-09-22 13:22:00 +00:00
if size = = 0 {
return Ok ( & mut [ ] ) ;
}
2016-05-10 00:52:44 +00:00
self . clear_relocations ( ptr , size ) ? ;
self . mark_definedness ( ptr , size , true ) ? ;
2017-01-31 09:36:46 +00:00
self . get_bytes_unchecked_mut ( ptr , size , align )
2016-03-21 11:27:34 +00:00
}
2016-06-23 07:40:01 +00:00
}
2016-03-21 11:27:34 +00:00
2016-06-23 07:40:01 +00:00
/// Reading and writing
impl < ' a , ' tcx > Memory < ' a , ' tcx > {
2017-02-08 15:27:28 +00:00
/// mark an allocation as being the entry point to a static (see `static_alloc` field)
pub fn mark_static ( & mut self , alloc_id : AllocId ) {
2017-02-14 09:59:38 +00:00
trace! ( " mark_static: {:?} " , alloc_id ) ;
2017-02-08 16:24:20 +00:00
if alloc_id ! = NEVER_ALLOC_ID & & alloc_id ! = ZST_ALLOC_ID & & ! self . static_alloc . insert ( alloc_id ) {
2017-02-08 15:27:28 +00:00
bug! ( " tried to mark an allocation ({:?}) as static twice " , alloc_id ) ;
}
}
2017-02-14 09:59:38 +00:00
/// mark an allocation pointed to by a static as static and initialized
pub fn mark_inner_allocation ( & mut self , alloc : AllocId , mutable : bool ) -> EvalResult < ' tcx > {
// relocations into other statics are not "inner allocations"
if ! self . static_alloc . contains ( & alloc ) {
self . mark_static_initalized ( alloc , mutable ) ? ;
}
Ok ( ( ) )
}
2017-02-08 15:27:28 +00:00
/// mark an allocation as static and initialized, either mutable or not
pub fn mark_static_initalized ( & mut self , alloc_id : AllocId , mutable : bool ) -> EvalResult < ' tcx > {
2017-02-14 09:59:38 +00:00
trace! ( " mark_static_initialized {:?}, mutable: {:?} " , alloc_id , mutable ) ;
2017-02-07 19:28:54 +00:00
// do not use `self.get_mut(alloc_id)` here, because we might have already marked a
2016-11-18 09:35:41 +00:00
// sub-element or have circular pointers (e.g. `Rc`-cycles)
let relocations = match self . alloc_map . get_mut ( & alloc_id ) {
2017-02-07 18:20:16 +00:00
Some ( & mut Allocation { ref mut relocations , static_kind : ref mut kind @ StaticKind ::NotStatic , .. } ) = > {
* kind = if mutable {
StaticKind ::Mutable
} else {
StaticKind ::Immutable
} ;
2016-11-18 09:35:41 +00:00
// take out the relocations vector to free the borrow on self, so we can call
2017-02-07 19:28:54 +00:00
// mark recursively
2017-02-07 18:20:16 +00:00
mem ::replace ( relocations , Default ::default ( ) )
2016-11-18 09:35:41 +00:00
} ,
None if alloc_id = = NEVER_ALLOC_ID | | alloc_id = = ZST_ALLOC_ID = > return Ok ( ( ) ) ,
None if ! self . functions . contains_key ( & alloc_id ) = > return Err ( EvalError ::DanglingPointerDeref ) ,
_ = > return Ok ( ( ) ) ,
} ;
// recurse into inner allocations
for & alloc in relocations . values ( ) {
2017-02-14 09:59:38 +00:00
self . mark_inner_allocation ( alloc , mutable ) ? ;
2016-09-19 10:10:18 +00:00
}
2016-11-18 09:35:41 +00:00
// put back the relocations
self . alloc_map . get_mut ( & alloc_id ) . expect ( " checked above " ) . relocations = relocations ;
2016-09-09 15:44:04 +00:00
Ok ( ( ) )
}
2017-02-04 21:09:10 +00:00
pub fn copy ( & mut self , src : Pointer , dest : Pointer , size : u64 , align : u64 ) -> EvalResult < ' tcx > {
2016-09-22 13:22:00 +00:00
if size = = 0 {
return Ok ( ( ) ) ;
}
2016-05-10 00:52:44 +00:00
self . check_relocation_edges ( src , size ) ? ;
2016-03-21 11:27:34 +00:00
2017-01-31 09:36:46 +00:00
let src_bytes = self . get_bytes_unchecked ( src , size , align ) ? . as_ptr ( ) ;
2016-07-22 14:35:39 +00:00
let dest_bytes = self . get_bytes_mut ( dest , size , align ) ? . as_mut_ptr ( ) ;
2016-03-05 06:48:23 +00:00
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
// `dest` could possibly overlap.
unsafe {
2016-11-18 11:55:14 +00:00
assert_eq! ( size as usize as u64 , size ) ;
2016-03-05 06:48:23 +00:00
if src . alloc_id = = dest . alloc_id {
2016-11-18 11:55:14 +00:00
ptr ::copy ( src_bytes , dest_bytes , size as usize ) ;
2016-03-05 06:48:23 +00:00
} else {
2016-11-18 11:55:14 +00:00
ptr ::copy_nonoverlapping ( src_bytes , dest_bytes , size as usize ) ;
2016-03-05 06:48:23 +00:00
}
}
2016-05-10 00:52:44 +00:00
self . copy_undef_mask ( src , dest , size ) ? ;
self . copy_relocations ( src , dest , size ) ? ;
2016-04-06 10:08:52 +00:00
Ok ( ( ) )
2016-03-05 06:48:23 +00:00
}
2016-12-17 01:10:16 +00:00
pub fn read_c_str ( & self , ptr : Pointer ) -> EvalResult < ' tcx , & [ u8 ] > {
let alloc = self . get ( ptr . alloc_id ) ? ;
assert_eq! ( ptr . offset as usize as u64 , ptr . offset ) ;
let offset = ptr . offset as usize ;
match alloc . bytes [ offset .. ] . iter ( ) . position ( | & c | c = = 0 ) {
Some ( size ) = > {
2017-06-04 17:42:02 +00:00
if self . has_non_int_relocations ( ptr , ( size + 1 ) as u64 ) ? {
2016-12-17 01:10:16 +00:00
return Err ( EvalError ::ReadPointerAsBytes ) ;
}
self . check_defined ( ptr , ( size + 1 ) as u64 ) ? ;
Ok ( & alloc . bytes [ offset .. offset + size ] )
} ,
None = > Err ( EvalError ::UnterminatedCString ( ptr ) ) ,
}
}
2016-11-18 11:55:14 +00:00
pub fn read_bytes ( & self , ptr : Pointer , size : u64 ) -> EvalResult < ' tcx , & [ u8 ] > {
2016-07-22 14:35:39 +00:00
self . get_bytes ( ptr , size , 1 )
2016-04-15 09:16:35 +00:00
}
2017-02-04 21:09:10 +00:00
pub fn write_bytes ( & mut self , ptr : Pointer , src : & [ u8 ] ) -> EvalResult < ' tcx > {
2016-11-18 11:55:14 +00:00
let bytes = self . get_bytes_mut ( ptr , src . len ( ) as u64 , 1 ) ? ;
2016-04-07 11:56:07 +00:00
bytes . clone_from_slice ( src ) ;
Ok ( ( ) )
}
2017-02-04 21:09:10 +00:00
pub fn write_repeat ( & mut self , ptr : Pointer , val : u8 , count : u64 ) -> EvalResult < ' tcx > {
2016-07-22 14:35:39 +00:00
let bytes = self . get_bytes_mut ( ptr , count , 1 ) ? ;
2016-04-07 11:56:07 +00:00
for b in bytes { * b = val ; }
Ok ( ( ) )
}
2016-06-14 08:34:54 +00:00
pub fn read_ptr ( & self , ptr : Pointer ) -> EvalResult < ' tcx , Pointer > {
2016-06-23 08:00:31 +00:00
let size = self . pointer_size ( ) ;
2016-05-10 00:52:44 +00:00
self . check_defined ( ptr , size ) ? ;
2016-06-25 14:50:33 +00:00
let endianess = self . endianess ( ) ;
2017-01-31 09:36:46 +00:00
let bytes = self . get_bytes_unchecked ( ptr , size , size ) ? ;
2016-11-18 11:55:14 +00:00
let offset = read_target_uint ( endianess , bytes ) . unwrap ( ) ;
2017-01-12 07:28:42 +00:00
assert_eq! ( offset as u64 as u128 , offset ) ;
let offset = offset as u64 ;
2016-05-10 00:52:44 +00:00
let alloc = self . get ( ptr . alloc_id ) ? ;
2016-03-17 13:24:10 +00:00
match alloc . relocations . get ( & ptr . offset ) {
2016-10-20 10:42:19 +00:00
Some ( & alloc_id ) = > Ok ( Pointer ::new ( alloc_id , offset ) ) ,
2016-09-22 13:47:16 +00:00
None = > Ok ( Pointer ::from_int ( offset ) ) ,
2016-03-17 13:24:10 +00:00
}
2016-03-13 20:36:25 +00:00
}
2017-02-04 21:09:10 +00:00
pub fn write_ptr ( & mut self , dest : Pointer , ptr : Pointer ) -> EvalResult < ' tcx > {
2016-06-23 11:04:05 +00:00
self . write_usize ( dest , ptr . offset as u64 ) ? ;
2016-05-10 00:52:44 +00:00
self . get_mut ( dest . alloc_id ) ? . relocations . insert ( dest . offset , ptr . alloc_id ) ;
2016-03-13 20:36:25 +00:00
Ok ( ( ) )
}
2016-11-27 06:58:01 +00:00
pub fn write_primval (
& mut self ,
dest : Pointer ,
val : PrimVal ,
2016-12-19 04:59:01 +00:00
size : u64 ,
2017-02-04 21:09:10 +00:00
) -> EvalResult < ' tcx > {
2016-12-17 09:36:02 +00:00
match val {
PrimVal ::Ptr ( ptr ) = > {
assert_eq! ( size , self . pointer_size ( ) ) ;
self . write_ptr ( dest , ptr )
}
PrimVal ::Bytes ( bytes ) = > {
// We need to mask here, or the byteorder crate can die when given a u64 larger
// than fits in an integer of the requested size.
let mask = match size {
2017-01-12 07:28:42 +00:00
1 = > ! 0 u8 as u128 ,
2 = > ! 0 u16 as u128 ,
4 = > ! 0 u32 as u128 ,
8 = > ! 0 u64 as u128 ,
16 = > ! 0 ,
2016-12-19 04:59:01 +00:00
_ = > bug! ( " unexpected PrimVal::Bytes size " ) ,
2016-12-17 09:36:02 +00:00
} ;
self . write_uint ( dest , bytes & mask , size )
}
PrimVal ::Undef = > self . mark_definedness ( dest , size , false ) ,
}
2016-03-07 10:44:03 +00:00
}
2016-06-14 08:34:54 +00:00
pub fn read_bool ( & self , ptr : Pointer ) -> EvalResult < ' tcx , bool > {
2016-11-18 11:55:14 +00:00
let bytes = self . get_bytes ( ptr , 1 , self . layout . i1_align . abi ( ) ) ? ;
2016-03-07 10:44:03 +00:00
match bytes [ 0 ] {
0 = > Ok ( false ) ,
1 = > Ok ( true ) ,
_ = > Err ( EvalError ::InvalidBool ) ,
}
}
2017-02-04 21:09:10 +00:00
pub fn write_bool ( & mut self , ptr : Pointer , b : bool ) -> EvalResult < ' tcx > {
2016-11-18 11:55:14 +00:00
let align = self . layout . i1_align . abi ( ) ;
2016-07-22 14:35:39 +00:00
self . get_bytes_mut ( ptr , 1 , align )
. map ( | bytes | bytes [ 0 ] = b as u8 )
2016-03-05 06:48:23 +00:00
}
2016-03-13 07:14:20 +00:00
2016-11-18 11:55:14 +00:00
fn int_align ( & self , size : u64 ) -> EvalResult < ' tcx , u64 > {
2016-07-06 09:12:44 +00:00
match size {
2016-11-18 11:55:14 +00:00
1 = > Ok ( self . layout . i8_align . abi ( ) ) ,
2 = > Ok ( self . layout . i16_align . abi ( ) ) ,
4 = > Ok ( self . layout . i32_align . abi ( ) ) ,
8 = > Ok ( self . layout . i64_align . abi ( ) ) ,
2017-01-12 07:28:42 +00:00
16 = > Ok ( self . layout . i128_align . abi ( ) ) ,
2016-11-11 12:08:14 +00:00
_ = > bug! ( " bad integer size: {} " , size ) ,
2016-07-06 09:12:44 +00:00
}
}
2017-01-12 07:28:42 +00:00
pub fn read_int ( & self , ptr : Pointer , size : u64 ) -> EvalResult < ' tcx , i128 > {
2016-07-22 14:35:39 +00:00
let align = self . int_align ( size ) ? ;
self . get_bytes ( ptr , size , align ) . map ( | b | read_target_int ( self . endianess ( ) , b ) . unwrap ( ) )
2016-03-17 09:12:15 +00:00
}
2017-02-04 21:09:10 +00:00
pub fn write_int ( & mut self , ptr : Pointer , n : i128 , size : u64 ) -> EvalResult < ' tcx > {
2016-07-22 14:35:39 +00:00
let align = self . int_align ( size ) ? ;
2016-06-23 13:16:25 +00:00
let endianess = self . endianess ( ) ;
2016-07-22 14:35:39 +00:00
let b = self . get_bytes_mut ( ptr , size , align ) ? ;
2016-06-25 14:50:33 +00:00
write_target_int ( endianess , b , n ) . unwrap ( ) ;
Ok ( ( ) )
2016-03-13 07:14:20 +00:00
}
2016-03-15 05:03:31 +00:00
2017-01-12 07:28:42 +00:00
pub fn read_uint ( & self , ptr : Pointer , size : u64 ) -> EvalResult < ' tcx , u128 > {
2016-07-22 14:35:39 +00:00
let align = self . int_align ( size ) ? ;
self . get_bytes ( ptr , size , align ) . map ( | b | read_target_uint ( self . endianess ( ) , b ) . unwrap ( ) )
2016-03-17 08:53:03 +00:00
}
2017-02-04 21:09:10 +00:00
pub fn write_uint ( & mut self , ptr : Pointer , n : u128 , size : u64 ) -> EvalResult < ' tcx > {
2016-07-22 14:35:39 +00:00
let align = self . int_align ( size ) ? ;
2016-06-23 13:16:25 +00:00
let endianess = self . endianess ( ) ;
2016-07-22 14:35:39 +00:00
let b = self . get_bytes_mut ( ptr , size , align ) ? ;
2016-06-25 14:50:33 +00:00
write_target_uint ( endianess , b , n ) . unwrap ( ) ;
Ok ( ( ) )
2016-03-17 08:53:03 +00:00
}
2016-03-21 05:24:27 +00:00
2016-06-14 08:34:54 +00:00
pub fn read_isize ( & self , ptr : Pointer ) -> EvalResult < ' tcx , i64 > {
2017-01-12 07:28:42 +00:00
self . read_int ( ptr , self . pointer_size ( ) ) . map ( | i | i as i64 )
2016-03-21 05:24:27 +00:00
}
2017-02-04 21:09:10 +00:00
pub fn write_isize ( & mut self , ptr : Pointer , n : i64 ) -> EvalResult < ' tcx > {
2016-06-23 08:00:31 +00:00
let size = self . pointer_size ( ) ;
2017-01-12 07:28:42 +00:00
self . write_int ( ptr , n as i128 , size )
2016-03-21 05:24:27 +00:00
}
2016-06-14 08:34:54 +00:00
pub fn read_usize ( & self , ptr : Pointer ) -> EvalResult < ' tcx , u64 > {
2017-01-12 07:28:42 +00:00
self . read_uint ( ptr , self . pointer_size ( ) ) . map ( | i | i as u64 )
2016-03-21 05:24:27 +00:00
}
2017-02-04 21:09:10 +00:00
pub fn write_usize ( & mut self , ptr : Pointer , n : u64 ) -> EvalResult < ' tcx > {
2016-06-23 08:00:31 +00:00
let size = self . pointer_size ( ) ;
2017-01-12 07:28:42 +00:00
self . write_uint ( ptr , n as u128 , size )
2016-03-21 05:24:27 +00:00
}
2016-07-06 09:51:32 +00:00
2017-02-04 21:09:10 +00:00
pub fn write_f32 ( & mut self , ptr : Pointer , f : f32 ) -> EvalResult < ' tcx > {
2016-07-06 09:51:32 +00:00
let endianess = self . endianess ( ) ;
2016-11-18 11:55:14 +00:00
let align = self . layout . f32_align . abi ( ) ;
2016-07-22 14:35:39 +00:00
let b = self . get_bytes_mut ( ptr , 4 , align ) ? ;
2016-07-06 09:51:32 +00:00
write_target_f32 ( endianess , b , f ) . unwrap ( ) ;
Ok ( ( ) )
}
2017-02-04 21:09:10 +00:00
pub fn write_f64 ( & mut self , ptr : Pointer , f : f64 ) -> EvalResult < ' tcx > {
2016-07-06 09:51:32 +00:00
let endianess = self . endianess ( ) ;
2016-11-18 11:55:14 +00:00
let align = self . layout . f64_align . abi ( ) ;
2016-07-22 14:35:39 +00:00
let b = self . get_bytes_mut ( ptr , 8 , align ) ? ;
2016-07-06 09:51:32 +00:00
write_target_f64 ( endianess , b , f ) . unwrap ( ) ;
Ok ( ( ) )
}
pub fn read_f32 ( & self , ptr : Pointer ) -> EvalResult < ' tcx , f32 > {
2016-11-18 11:55:14 +00:00
self . get_bytes ( ptr , 4 , self . layout . f32_align . abi ( ) )
2016-07-22 14:35:39 +00:00
. map ( | b | read_target_f32 ( self . endianess ( ) , b ) . unwrap ( ) )
2016-07-06 09:51:32 +00:00
}
pub fn read_f64 ( & self , ptr : Pointer ) -> EvalResult < ' tcx , f64 > {
2016-11-18 11:55:14 +00:00
self . get_bytes ( ptr , 8 , self . layout . f64_align . abi ( ) )
2016-07-22 14:35:39 +00:00
. map ( | b | read_target_f64 ( self . endianess ( ) , b ) . unwrap ( ) )
2016-07-06 09:51:32 +00:00
}
2016-06-23 07:40:01 +00:00
}
2016-03-05 06:48:23 +00:00
2016-06-23 07:40:01 +00:00
/// Relocations
impl < ' a , ' tcx > Memory < ' a , ' tcx > {
2016-11-18 11:55:14 +00:00
fn relocations ( & self , ptr : Pointer , size : u64 )
-> EvalResult < ' tcx , btree_map ::Range < u64 , AllocId > >
2016-03-24 03:40:58 +00:00
{
2016-06-23 08:00:31 +00:00
let start = ptr . offset . saturating_sub ( self . pointer_size ( ) - 1 ) ;
2016-06-13 09:24:01 +00:00
let end = ptr . offset + size ;
2017-01-17 03:37:53 +00:00
Ok ( self . get ( ptr . alloc_id ) ? . relocations . range ( start .. end ) )
2016-03-05 06:48:23 +00:00
}
2017-06-04 17:42:02 +00:00
fn has_non_int_relocations ( & self , ptr : Pointer , size : u64 )
-> EvalResult < ' tcx , bool >
{
Ok ( self . relocations ( ptr , size ) ? . any ( | ( _ , & alloc_id ) | alloc_id ! = NEVER_ALLOC_ID ) )
}
2017-02-04 21:09:10 +00:00
fn clear_relocations ( & mut self , ptr : Pointer , size : u64 ) -> EvalResult < ' tcx > {
2016-03-27 06:29:02 +00:00
// Find all relocations overlapping the given range.
2016-05-10 00:52:44 +00:00
let keys : Vec < _ > = self . relocations ( ptr , size ) ? . map ( | ( & k , _ ) | k ) . collect ( ) ;
2016-04-29 04:01:17 +00:00
if keys . is_empty ( ) { return Ok ( ( ) ) ; }
2016-03-27 06:29:02 +00:00
// Find the start and end of the given range and its outermost relocations.
let start = ptr . offset ;
let end = start + size ;
let first = * keys . first ( ) . unwrap ( ) ;
2016-06-23 08:00:31 +00:00
let last = * keys . last ( ) . unwrap ( ) + self . pointer_size ( ) ;
2016-03-27 06:29:02 +00:00
2016-05-10 00:52:44 +00:00
let alloc = self . get_mut ( ptr . alloc_id ) ? ;
2016-03-27 06:29:02 +00:00
// Mark parts of the outermost relocations as undefined if they partially fall outside the
// given range.
2016-04-06 09:45:06 +00:00
if first < start { alloc . undef_mask . set_range ( first , start , false ) ; }
if last > end { alloc . undef_mask . set_range ( end , last , false ) ; }
2016-03-27 06:29:02 +00:00
// Forget all the relocations.
for k in keys { alloc . relocations . remove ( & k ) ; }
2016-03-24 03:40:58 +00:00
Ok ( ( ) )
}
2017-02-04 21:09:10 +00:00
fn check_relocation_edges ( & self , ptr : Pointer , size : u64 ) -> EvalResult < ' tcx > {
2016-05-10 00:52:44 +00:00
let overlapping_start = self . relocations ( ptr , 0 ) ? . count ( ) ;
2016-11-18 11:55:14 +00:00
let overlapping_end = self . relocations ( ptr . offset ( size ) , 0 ) ? . count ( ) ;
2016-03-24 03:40:58 +00:00
if overlapping_start + overlapping_end ! = 0 {
return Err ( EvalError ::ReadPointerAsBytes ) ;
}
Ok ( ( ) )
}
2017-02-04 21:09:10 +00:00
fn copy_relocations ( & mut self , src : Pointer , dest : Pointer , size : u64 ) -> EvalResult < ' tcx > {
2016-05-10 00:52:44 +00:00
let relocations : Vec < _ > = self . relocations ( src , size ) ?
2016-03-24 03:40:58 +00:00
. map ( | ( & offset , & alloc_id ) | {
// Update relocation offsets for the new positions in the destination allocation.
( offset + dest . offset - src . offset , alloc_id )
} )
. collect ( ) ;
2016-05-10 00:52:44 +00:00
self . get_mut ( dest . alloc_id ) ? . relocations . extend ( relocations ) ;
2016-03-24 03:40:58 +00:00
Ok ( ( ) )
2016-03-05 06:48:23 +00:00
}
2016-06-23 07:40:01 +00:00
}
2016-03-27 04:25:08 +00:00
2016-06-23 07:40:01 +00:00
/// Undefined bytes
impl < ' a , ' tcx > Memory < ' a , ' tcx > {
2016-05-10 02:08:37 +00:00
// FIXME(solson): This is a very naive, slow version.
2017-02-04 21:09:10 +00:00
fn copy_undef_mask ( & mut self , src : Pointer , dest : Pointer , size : u64 ) -> EvalResult < ' tcx > {
2016-04-06 10:08:52 +00:00
// The bits have to be saved locally before writing to dest in case src and dest overlap.
2016-11-18 11:55:14 +00:00
assert_eq! ( size as usize as u64 , size ) ;
let mut v = Vec ::with_capacity ( size as usize ) ;
2016-04-06 10:08:52 +00:00
for i in 0 .. size {
2016-05-10 00:52:44 +00:00
let defined = self . get ( src . alloc_id ) ? . undef_mask . get ( src . offset + i ) ;
2016-04-06 10:08:52 +00:00
v . push ( defined ) ;
}
for ( i , defined ) in v . into_iter ( ) . enumerate ( ) {
2016-11-18 11:55:14 +00:00
self . get_mut ( dest . alloc_id ) ? . undef_mask . set ( dest . offset + i as u64 , defined ) ;
2016-04-06 10:08:52 +00:00
}
Ok ( ( ) )
}
2017-02-04 21:09:10 +00:00
fn check_defined ( & self , ptr : Pointer , size : u64 ) -> EvalResult < ' tcx > {
2016-05-10 00:52:44 +00:00
let alloc = self . get ( ptr . alloc_id ) ? ;
2016-04-06 09:45:06 +00:00
if ! alloc . undef_mask . is_range_defined ( ptr . offset , ptr . offset + size ) {
2016-03-27 05:56:49 +00:00
return Err ( EvalError ::ReadUndefBytes ) ;
}
Ok ( ( ) )
}
2017-02-04 21:09:10 +00:00
pub fn mark_definedness (
& mut self ,
ptr : Pointer ,
size : u64 ,
new_state : bool
) -> EvalResult < ' tcx > {
2016-09-22 13:22:00 +00:00
if size = = 0 {
return Ok ( ( ) )
}
2016-05-10 00:52:44 +00:00
let mut alloc = self . get_mut ( ptr . alloc_id ) ? ;
2016-04-06 09:45:06 +00:00
alloc . undef_mask . set_range ( ptr . offset , ptr . offset + size , new_state ) ;
2016-03-27 04:25:08 +00:00
Ok ( ( ) )
}
}
2016-06-25 14:50:33 +00:00
////////////////////////////////////////////////////////////////////////////////
// Methods to access integers in the target endianess
////////////////////////////////////////////////////////////////////////////////
2017-01-12 07:28:42 +00:00
fn write_target_uint ( endianess : layout ::Endian , mut target : & mut [ u8 ] , data : u128 ) -> Result < ( ) , io ::Error > {
2016-06-25 14:50:33 +00:00
let len = target . len ( ) ;
match endianess {
2017-01-12 07:28:42 +00:00
layout ::Endian ::Little = > target . write_uint128 ::< LittleEndian > ( data , len ) ,
layout ::Endian ::Big = > target . write_uint128 ::< BigEndian > ( data , len ) ,
2016-06-25 14:50:33 +00:00
}
}
2017-01-12 07:28:42 +00:00
fn write_target_int ( endianess : layout ::Endian , mut target : & mut [ u8 ] , data : i128 ) -> Result < ( ) , io ::Error > {
2016-06-25 14:50:33 +00:00
let len = target . len ( ) ;
match endianess {
2017-01-12 07:28:42 +00:00
layout ::Endian ::Little = > target . write_int128 ::< LittleEndian > ( data , len ) ,
layout ::Endian ::Big = > target . write_int128 ::< BigEndian > ( data , len ) ,
2016-06-25 14:50:33 +00:00
}
}
2017-01-12 07:28:42 +00:00
fn read_target_uint ( endianess : layout ::Endian , mut source : & [ u8 ] ) -> Result < u128 , io ::Error > {
2016-06-25 14:50:33 +00:00
match endianess {
2017-01-12 07:28:42 +00:00
layout ::Endian ::Little = > source . read_uint128 ::< LittleEndian > ( source . len ( ) ) ,
layout ::Endian ::Big = > source . read_uint128 ::< BigEndian > ( source . len ( ) ) ,
2016-06-25 14:50:33 +00:00
}
}
2017-01-12 07:28:42 +00:00
fn read_target_int ( endianess : layout ::Endian , mut source : & [ u8 ] ) -> Result < i128 , io ::Error > {
2016-06-25 14:50:33 +00:00
match endianess {
2017-01-12 07:28:42 +00:00
layout ::Endian ::Little = > source . read_int128 ::< LittleEndian > ( source . len ( ) ) ,
layout ::Endian ::Big = > source . read_int128 ::< BigEndian > ( source . len ( ) ) ,
2016-06-25 14:50:33 +00:00
}
}
2016-07-06 09:51:32 +00:00
////////////////////////////////////////////////////////////////////////////////
// Methods to access floats in the target endianess
////////////////////////////////////////////////////////////////////////////////
2017-01-12 07:28:42 +00:00
fn write_target_f32 ( endianess : layout ::Endian , mut target : & mut [ u8 ] , data : f32 ) -> Result < ( ) , io ::Error > {
2016-07-06 09:51:32 +00:00
match endianess {
layout ::Endian ::Little = > target . write_f32 ::< LittleEndian > ( data ) ,
layout ::Endian ::Big = > target . write_f32 ::< BigEndian > ( data ) ,
}
}
2017-01-12 07:28:42 +00:00
fn write_target_f64 ( endianess : layout ::Endian , mut target : & mut [ u8 ] , data : f64 ) -> Result < ( ) , io ::Error > {
2016-07-06 09:51:32 +00:00
match endianess {
layout ::Endian ::Little = > target . write_f64 ::< LittleEndian > ( data ) ,
layout ::Endian ::Big = > target . write_f64 ::< BigEndian > ( data ) ,
}
}
2017-01-12 07:28:42 +00:00
fn read_target_f32 ( endianess : layout ::Endian , mut source : & [ u8 ] ) -> Result < f32 , io ::Error > {
2016-07-06 09:51:32 +00:00
match endianess {
layout ::Endian ::Little = > source . read_f32 ::< LittleEndian > ( ) ,
layout ::Endian ::Big = > source . read_f32 ::< BigEndian > ( ) ,
}
}
2017-01-12 07:28:42 +00:00
fn read_target_f64 ( endianess : layout ::Endian , mut source : & [ u8 ] ) -> Result < f64 , io ::Error > {
2016-07-06 09:51:32 +00:00
match endianess {
layout ::Endian ::Little = > source . read_f64 ::< LittleEndian > ( ) ,
layout ::Endian ::Big = > source . read_f64 ::< BigEndian > ( ) ,
}
}
2016-03-27 04:25:08 +00:00
////////////////////////////////////////////////////////////////////////////////
// Undefined byte tracking
////////////////////////////////////////////////////////////////////////////////
2016-04-06 09:45:06 +00:00
type Block = u64 ;
2016-11-18 11:55:14 +00:00
const BLOCK_SIZE : u64 = 64 ;
2016-03-27 05:25:35 +00:00
2016-04-06 09:45:06 +00:00
#[ derive(Clone, Debug) ]
pub struct UndefMask {
blocks : Vec < Block > ,
2016-11-18 11:55:14 +00:00
len : u64 ,
2016-04-06 09:45:06 +00:00
}
2016-03-27 05:25:35 +00:00
2016-04-06 09:45:06 +00:00
impl UndefMask {
2016-11-18 11:55:14 +00:00
fn new ( size : u64 ) -> Self {
2016-04-06 10:35:25 +00:00
let mut m = UndefMask {
2016-04-06 09:45:06 +00:00
blocks : vec ! [ ] ,
len : 0 ,
2016-04-06 10:35:25 +00:00
} ;
m . grow ( size , false ) ;
m
2016-03-27 05:25:35 +00:00
}
2016-04-06 09:45:06 +00:00
/// Check whether the range `start..end` (end-exclusive) is entirely defined.
2016-11-18 11:55:14 +00:00
pub fn is_range_defined ( & self , start : u64 , end : u64 ) -> bool {
2016-04-06 09:45:06 +00:00
if end > self . len { return false ; }
for i in start .. end {
if ! self . get ( i ) { return false ; }
2016-03-27 04:25:08 +00:00
}
2016-04-06 09:45:06 +00:00
true
2016-03-27 04:25:08 +00:00
}
2016-11-18 11:55:14 +00:00
fn set_range ( & mut self , start : u64 , end : u64 , new_state : bool ) {
2016-04-06 09:45:06 +00:00
let len = self . len ;
if end > len { self . grow ( end - len , new_state ) ; }
self . set_range_inbounds ( start , end , new_state ) ;
2016-03-27 04:25:08 +00:00
}
2016-11-18 11:55:14 +00:00
fn set_range_inbounds ( & mut self , start : u64 , end : u64 , new_state : bool ) {
2016-04-06 09:45:06 +00:00
for i in start .. end { self . set ( i , new_state ) ; }
2016-03-27 04:25:08 +00:00
}
2016-11-18 11:55:14 +00:00
fn get ( & self , i : u64 ) -> bool {
2016-04-06 09:45:06 +00:00
let ( block , bit ) = bit_index ( i ) ;
( self . blocks [ block ] & 1 < < bit ) ! = 0
2016-03-27 04:25:08 +00:00
}
2016-11-18 11:55:14 +00:00
fn set ( & mut self , i : u64 , new_state : bool ) {
2016-04-06 09:45:06 +00:00
let ( block , bit ) = bit_index ( i ) ;
if new_state {
self . blocks [ block ] | = 1 < < bit ;
} else {
self . blocks [ block ] & = ! ( 1 < < bit ) ;
}
}
2016-03-27 04:25:08 +00:00
2016-11-18 11:55:14 +00:00
fn grow ( & mut self , amount : u64 , new_state : bool ) {
let unused_trailing_bits = self . blocks . len ( ) as u64 * BLOCK_SIZE - self . len ;
2016-04-06 09:45:06 +00:00
if amount > unused_trailing_bits {
let additional_blocks = amount / BLOCK_SIZE + 1 ;
2016-11-18 11:55:14 +00:00
assert_eq! ( additional_blocks as usize as u64 , additional_blocks ) ;
self . blocks . extend ( iter ::repeat ( 0 ) . take ( additional_blocks as usize ) ) ;
2016-04-06 09:45:06 +00:00
}
let start = self . len ;
self . len + = amount ;
self . set_range_inbounds ( start , start + amount , new_state ) ;
}
2016-03-27 04:25:08 +00:00
2016-11-18 11:55:14 +00:00
fn truncate ( & mut self , length : u64 ) {
2016-06-13 09:24:01 +00:00
self . len = length ;
2016-11-18 11:55:14 +00:00
let truncate = self . len / BLOCK_SIZE + 1 ;
assert_eq! ( truncate as usize as u64 , truncate ) ;
self . blocks . truncate ( truncate as usize ) ;
2016-07-07 09:21:18 +00:00
self . blocks . shrink_to_fit ( ) ;
2016-06-13 09:24:01 +00:00
}
}
2016-03-27 04:25:08 +00:00
2016-11-18 11:55:14 +00:00
fn bit_index ( bits : u64 ) -> ( usize , usize ) {
let a = bits / BLOCK_SIZE ;
let b = bits % BLOCK_SIZE ;
assert_eq! ( a as usize as u64 , a ) ;
assert_eq! ( b as usize as u64 , b ) ;
( a as usize , b as usize )
2016-03-05 06:48:23 +00:00
}