2020-03-21 16:17:01 +00:00
|
|
|
use std::convert::TryFrom;
|
2019-05-17 01:20:14 +00:00
|
|
|
|
2020-03-29 14:41:09 +00:00
|
|
|
use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar};
|
2020-07-24 12:16:54 +00:00
|
|
|
use rustc_middle::ty::{self, Instance, Ty};
|
2020-06-01 08:15:17 +00:00
|
|
|
use rustc_target::abi::{Align, LayoutOf, Size};
|
2018-06-08 02:47:26 +00:00
|
|
|
|
2020-07-24 12:16:54 +00:00
|
|
|
use super::util::ensure_monomorphic_enough;
|
2020-03-21 12:49:02 +00:00
|
|
|
use super::{FnVal, InterpCx, Machine, MemoryKind};
|
|
|
|
|
2020-03-16 22:12:42 +00:00
|
|
|
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
2016-09-11 09:06:44 +00:00
|
|
|
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for
|
|
|
|
/// objects.
|
2016-09-09 10:51:14 +00:00
|
|
|
///
|
2019-05-17 01:20:14 +00:00
|
|
|
/// The `trait_ref` encodes the erased self type. Hence, if we are
|
2016-09-09 10:51:14 +00:00
|
|
|
/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
|
2019-05-17 01:20:14 +00:00
|
|
|
/// `trait_ref` would map `T: Trait`.
|
2017-08-10 15:48:38 +00:00
|
|
|
pub fn get_vtable(
|
|
|
|
&mut self,
|
|
|
|
ty: Ty<'tcx>,
|
2018-12-04 11:28:06 +00:00
|
|
|
poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
|
2019-06-07 16:56:27 +00:00
|
|
|
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
|
2018-10-17 10:36:18 +00:00
|
|
|
trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
|
2016-09-09 10:51:14 +00:00
|
|
|
|
2020-10-24 00:21:18 +00:00
|
|
|
let (ty, poly_trait_ref) = self.tcx.erase_regions((ty, poly_trait_ref));
|
2018-10-12 14:45:17 +00:00
|
|
|
|
2019-08-12 15:59:45 +00:00
|
|
|
// All vtables must be monomorphic, bail out otherwise.
|
2020-07-24 12:16:54 +00:00
|
|
|
ensure_monomorphic_enough(*self.tcx, ty)?;
|
|
|
|
ensure_monomorphic_enough(*self.tcx, poly_trait_ref)?;
|
2019-08-12 15:59:45 +00:00
|
|
|
|
2018-10-12 14:45:17 +00:00
|
|
|
if let Some(&vtable) = self.vtables.get(&(ty, poly_trait_ref)) {
|
2019-02-08 11:20:55 +00:00
|
|
|
// This means we guarantee that there are no duplicate vtables, we will
|
|
|
|
// always use the same vtable for the same (Type, Trait) combination.
|
|
|
|
// That's not what happens in rustc, but emulating per-crate deduplication
|
|
|
|
// does not sound like it actually makes anything any better.
|
2019-05-25 11:48:56 +00:00
|
|
|
return Ok(vtable);
|
2018-10-12 14:10:16 +00:00
|
|
|
}
|
2018-09-21 21:32:59 +00:00
|
|
|
|
2018-12-04 11:28:06 +00:00
|
|
|
let methods = if let Some(poly_trait_ref) = poly_trait_ref {
|
2020-06-14 13:02:51 +00:00
|
|
|
let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty);
|
2020-10-24 00:21:18 +00:00
|
|
|
let trait_ref = self.tcx.erase_regions(trait_ref);
|
2018-10-12 14:45:17 +00:00
|
|
|
|
2020-06-14 13:02:51 +00:00
|
|
|
self.tcx.vtable_methods(trait_ref)
|
2018-12-04 11:28:06 +00:00
|
|
|
} else {
|
2019-03-29 16:49:11 +00:00
|
|
|
&[]
|
2018-12-04 11:28:06 +00:00
|
|
|
};
|
2018-10-12 14:45:17 +00:00
|
|
|
|
|
|
|
let layout = self.layout_of(ty)?;
|
2017-12-06 11:50:31 +00:00
|
|
|
assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
|
|
|
|
let size = layout.size.bytes();
|
2018-09-08 21:22:22 +00:00
|
|
|
let align = layout.align.abi.bytes();
|
2016-09-09 10:51:14 +00:00
|
|
|
|
2020-06-14 13:02:51 +00:00
|
|
|
let tcx = *self.tcx;
|
2018-08-26 18:42:52 +00:00
|
|
|
let ptr_size = self.pointer_size();
|
2020-06-01 08:15:17 +00:00
|
|
|
let ptr_align = tcx.data_layout.pointer_align.abi;
|
2018-10-12 15:08:36 +00:00
|
|
|
// /////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// If you touch this code, be sure to also make the corresponding changes to
|
2019-05-17 01:20:14 +00:00
|
|
|
// `get_vtable` in `rust_codegen_llvm/meth.rs`.
|
2018-10-12 15:08:36 +00:00
|
|
|
// /////////////////////////////////////////////////////////////////////////////////////////
|
2020-12-11 18:42:36 +00:00
|
|
|
let vtable_size = ptr_size * u64::try_from(methods.len()).unwrap().checked_add(3).unwrap();
|
2020-12-11 19:28:20 +00:00
|
|
|
let vtable = self.memory.allocate(vtable_size, ptr_align, MemoryKind::Vtable);
|
2016-09-09 10:51:14 +00:00
|
|
|
|
2020-06-01 08:15:17 +00:00
|
|
|
let drop = Instance::resolve_drop_in_place(tcx, ty);
|
2019-06-30 11:51:18 +00:00
|
|
|
let drop = self.memory.create_fn_alloc(FnVal::Instance(drop));
|
2019-05-23 17:45:22 +00:00
|
|
|
|
2021-05-16 16:53:20 +00:00
|
|
|
// Prepare the fn ptrs we will write into the vtable later.
|
|
|
|
let fn_ptrs = methods
|
|
|
|
.iter()
|
|
|
|
.enumerate() // remember the original position
|
|
|
|
.filter_map(|(i, method)| {
|
|
|
|
if let Some((def_id, substs)) = method { Some((i, def_id, substs)) } else { None }
|
|
|
|
})
|
|
|
|
.map(|(i, def_id, substs)| {
|
|
|
|
let instance =
|
|
|
|
ty::Instance::resolve_for_vtable(tcx, self.param_env, *def_id, substs)
|
|
|
|
.ok_or_else(|| err_inval!(TooGeneric))?;
|
|
|
|
Ok((i, self.memory.create_fn_alloc(FnVal::Instance(instance))))
|
|
|
|
})
|
|
|
|
.collect::<InterpResult<'tcx, Vec<(usize, Pointer<M::PointerTag>)>>>()?;
|
|
|
|
|
2019-11-02 16:46:11 +00:00
|
|
|
// No need to do any alignment checks on the memory accesses below, because we know the
|
2018-11-17 15:18:05 +00:00
|
|
|
// allocation is correctly aligned as we created it above. Also we're only offsetting by
|
|
|
|
// multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
|
2021-05-16 16:53:20 +00:00
|
|
|
let mut vtable_alloc =
|
|
|
|
self.memory.get_mut(vtable.into(), vtable_size, ptr_align)?.expect("not a ZST");
|
|
|
|
vtable_alloc.write_ptr_sized(ptr_size * 0, drop.into())?;
|
|
|
|
vtable_alloc.write_ptr_sized(ptr_size * 1, Scalar::from_uint(size, ptr_size).into())?;
|
|
|
|
vtable_alloc.write_ptr_sized(ptr_size * 2, Scalar::from_uint(align, ptr_size).into())?;
|
|
|
|
|
|
|
|
for (i, fn_ptr) in fn_ptrs.into_iter() {
|
|
|
|
vtable_alloc.write_ptr_sized(ptr_size * (3 + i as u64), fn_ptr.into())?;
|
2016-09-09 10:51:14 +00:00
|
|
|
}
|
|
|
|
|
2020-12-11 18:42:36 +00:00
|
|
|
M::after_static_mem_initialized(self, vtable, vtable_size)?;
|
|
|
|
|
2018-09-21 21:32:59 +00:00
|
|
|
self.memory.mark_immutable(vtable.alloc_id)?;
|
2019-05-25 11:48:56 +00:00
|
|
|
assert!(self.vtables.insert((ty, poly_trait_ref), vtable).is_none());
|
2016-09-10 13:17:08 +00:00
|
|
|
|
2016-09-09 10:51:14 +00:00
|
|
|
Ok(vtable)
|
|
|
|
}
|
|
|
|
|
2019-05-17 01:20:14 +00:00
|
|
|
/// Resolves the function at the specified slot in the provided
|
2019-10-28 23:09:54 +00:00
|
|
|
/// vtable. An index of '0' corresponds to the first method
|
2019-05-17 01:20:14 +00:00
|
|
|
/// declared in the trait of the provided vtable.
|
2019-10-28 23:09:54 +00:00
|
|
|
pub fn get_vtable_slot(
|
|
|
|
&self,
|
|
|
|
vtable: Scalar<M::PointerTag>,
|
2020-03-21 12:49:02 +00:00
|
|
|
idx: u64,
|
2019-10-28 23:09:54 +00:00
|
|
|
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
|
|
|
|
let ptr_size = self.pointer_size();
|
2019-05-17 01:20:14 +00:00
|
|
|
// Skip over the 'drop_ptr', 'size', and 'align' fields.
|
2020-03-24 15:43:50 +00:00
|
|
|
let vtable_slot = vtable.ptr_offset(ptr_size * idx.checked_add(3).unwrap(), self)?;
|
2019-12-22 22:42:04 +00:00
|
|
|
let vtable_slot = self
|
|
|
|
.memory
|
2021-05-16 16:53:20 +00:00
|
|
|
.get(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
|
2019-12-22 22:42:04 +00:00
|
|
|
.expect("cannot be a ZST");
|
2021-05-16 16:53:20 +00:00
|
|
|
let fn_ptr = vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?;
|
2021-02-17 22:23:57 +00:00
|
|
|
self.memory.get_fn(fn_ptr)
|
2019-10-28 23:09:54 +00:00
|
|
|
}
|
|
|
|
|
2019-05-17 01:20:14 +00:00
|
|
|
/// Returns the drop fn instance as well as the actual dynamic type.
|
2017-08-10 15:48:38 +00:00
|
|
|
pub fn read_drop_type_from_vtable(
|
|
|
|
&self,
|
2019-06-23 12:26:36 +00:00
|
|
|
vtable: Scalar<M::PointerTag>,
|
2019-06-07 16:56:27 +00:00
|
|
|
) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
|
2019-05-17 01:20:14 +00:00
|
|
|
// We don't care about the pointee type; we just want a pointer.
|
2019-12-22 22:42:04 +00:00
|
|
|
let vtable = self
|
|
|
|
.memory
|
2021-05-16 16:53:20 +00:00
|
|
|
.get(vtable, self.tcx.data_layout.pointer_size, self.tcx.data_layout.pointer_align.abi)?
|
2019-12-22 22:42:04 +00:00
|
|
|
.expect("cannot be a ZST");
|
2021-05-16 16:53:20 +00:00
|
|
|
let drop_fn = vtable.read_ptr_sized(Size::ZERO)?.check_init()?;
|
2019-06-30 11:51:18 +00:00
|
|
|
// We *need* an instance here, no other kind of function value, to be able
|
|
|
|
// to determine the type.
|
|
|
|
let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?;
|
2018-08-25 12:36:24 +00:00
|
|
|
trace!("Found drop fn: {:?}", drop_instance);
|
2020-06-22 12:57:03 +00:00
|
|
|
let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx);
|
2020-10-24 00:21:18 +00:00
|
|
|
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig);
|
2019-06-23 12:26:36 +00:00
|
|
|
// The drop function takes `*mut T` where `T` is the type being dropped, so get that.
|
2019-12-12 14:23:27 +00:00
|
|
|
let args = fn_sig.inputs();
|
|
|
|
if args.len() != 1 {
|
2020-05-06 09:31:05 +00:00
|
|
|
throw_ub!(InvalidDropFn(fn_sig));
|
2019-12-12 14:23:27 +00:00
|
|
|
}
|
2020-05-06 11:26:24 +00:00
|
|
|
let ty = args[0].builtin_deref(true).ok_or_else(|| err_ub!(InvalidDropFn(fn_sig)))?.ty;
|
2018-08-25 12:36:24 +00:00
|
|
|
Ok((drop_instance, ty))
|
2017-03-22 16:48:16 +00:00
|
|
|
}
|
|
|
|
|
2017-08-10 15:48:38 +00:00
|
|
|
pub fn read_size_and_align_from_vtable(
|
|
|
|
&self,
|
2019-06-23 12:26:36 +00:00
|
|
|
vtable: Scalar<M::PointerTag>,
|
2019-06-07 16:56:27 +00:00
|
|
|
) -> InterpResult<'tcx, (Size, Align)> {
|
2018-08-26 18:42:52 +00:00
|
|
|
let pointer_size = self.pointer_size();
|
2019-05-17 01:20:14 +00:00
|
|
|
// We check for `size = 3 * ptr_size`, which covers the drop fn (unused here),
|
2019-06-23 16:06:11 +00:00
|
|
|
// the size, and the align (which we read below).
|
2019-12-22 22:42:04 +00:00
|
|
|
let vtable = self
|
|
|
|
.memory
|
2021-05-16 16:53:20 +00:00
|
|
|
.get(vtable, 3 * pointer_size, self.tcx.data_layout.pointer_align.abi)?
|
2019-12-22 22:42:04 +00:00
|
|
|
.expect("cannot be a ZST");
|
2021-05-16 16:53:20 +00:00
|
|
|
let size = vtable.read_ptr_sized(pointer_size)?.check_init()?;
|
2020-03-21 16:17:01 +00:00
|
|
|
let size = u64::try_from(self.force_bits(size, pointer_size)?).unwrap();
|
2021-05-16 16:53:20 +00:00
|
|
|
let align = vtable.read_ptr_sized(pointer_size * 2)?.check_init()?;
|
2020-03-21 16:17:01 +00:00
|
|
|
let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap();
|
2019-08-27 10:54:46 +00:00
|
|
|
|
2020-06-01 08:15:17 +00:00
|
|
|
if size >= self.tcx.data_layout.obj_size_bound() {
|
2019-12-22 22:42:04 +00:00
|
|
|
throw_ub_format!(
|
|
|
|
"invalid vtable: \
|
|
|
|
size is bigger than largest supported object"
|
|
|
|
);
|
2019-08-27 10:54:46 +00:00
|
|
|
}
|
2018-09-08 22:16:45 +00:00
|
|
|
Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap()))
|
2017-02-10 11:12:33 +00:00
|
|
|
}
|
2016-09-09 10:51:14 +00:00
|
|
|
}
|