rust/compiler/rustc_const_eval/src/interpret/traits.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

141 lines
5.7 KiB
Rust
Raw Normal View History

use std::convert::TryFrom;
use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic};
use rustc_middle::ty::{
self, common_vtable_entries, Ty, COMMON_VTABLE_ENTRIES_ALIGN,
COMMON_VTABLE_ENTRIES_DROPINPLACE, COMMON_VTABLE_ENTRIES_SIZE,
};
use rustc_target::abi::{Align, Size};
use super::util::ensure_monomorphic_enough;
use super::{FnVal, InterpCx, Machine};
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
2016-09-11 09:06:44 +00:00
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for
/// objects.
///
/// The `trait_ref` encodes the erased self type. Hence, if we are
/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
/// `trait_ref` would map `T: Trait`.
pub fn get_vtable(
&mut self,
ty: Ty<'tcx>,
poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
2021-08-01 12:09:22 +00:00
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
2020-10-24 00:21:18 +00:00
let (ty, poly_trait_ref) = self.tcx.erase_regions((ty, poly_trait_ref));
// All vtables must be monomorphic, bail out otherwise.
ensure_monomorphic_enough(*self.tcx, ty)?;
ensure_monomorphic_enough(*self.tcx, poly_trait_ref)?;
let vtable_allocation = self.tcx.vtable_allocation((ty, poly_trait_ref));
let vtable_ptr = self.global_base_pointer(Pointer::from(vtable_allocation))?;
2016-09-10 13:17:08 +00:00
2021-08-01 12:09:22 +00:00
Ok(vtable_ptr.into())
}
/// Resolves the function at the specified slot in the provided
/// vtable. Currently an index of '3' (`common_vtable_entries().len()`)
/// corresponds to the first method declared in the trait of the provided vtable.
2019-10-28 23:09:54 +00:00
pub fn get_vtable_slot(
&self,
vtable: Pointer<Option<M::PointerTag>>,
idx: u64,
2019-10-28 23:09:54 +00:00
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
let ptr_size = self.pointer_size();
let vtable_slot = vtable.offset(ptr_size * idx, self)?;
2019-10-28 23:09:54 +00:00
let vtable_slot = self
.get_ptr_alloc(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
2019-10-28 23:09:54 +00:00
.expect("cannot be a ZST");
let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?)?;
self.get_ptr_fn(fn_ptr)
2019-10-28 23:09:54 +00:00
}
/// Returns the drop fn instance as well as the actual dynamic type.
pub fn read_drop_type_from_vtable(
&self,
vtable: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
let pointer_size = self.pointer_size();
// We don't care about the pointee type; we just want a pointer.
let vtable = self
.get_ptr_alloc(
vtable,
pointer_size * u64::try_from(common_vtable_entries().len()).unwrap(),
self.tcx.data_layout.pointer_align.abi,
)?
.expect("cannot be a ZST");
let drop_fn = vtable
.read_ptr_sized(
pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_DROPINPLACE).unwrap(),
)?
.check_init()?;
// We *need* an instance here, no other kind of function value, to be able
// to determine the type.
let drop_instance = self.get_ptr_fn(self.scalar_to_ptr(drop_fn)?)?.as_instance()?;
trace!("Found drop fn: {:?}", drop_instance);
let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx);
2020-10-24 00:21:18 +00:00
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig);
// The drop function takes `*mut T` where `T` is the type being dropped, so get that.
2019-12-12 14:23:27 +00:00
let args = fn_sig.inputs();
if args.len() != 1 {
throw_ub!(InvalidVtableDropFn(fn_sig));
2019-12-12 14:23:27 +00:00
}
let ty =
args[0].builtin_deref(true).ok_or_else(|| err_ub!(InvalidVtableDropFn(fn_sig)))?.ty;
Ok((drop_instance, ty))
2017-03-22 16:48:16 +00:00
}
pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
// We check for `size = 3 * ptr_size`, which covers the drop fn (unused here),
2019-06-23 16:06:11 +00:00
// the size, and the align (which we read below).
let vtable = self
.get_ptr_alloc(
vtable,
pointer_size * u64::try_from(common_vtable_entries().len()).unwrap(),
self.tcx.data_layout.pointer_align.abi,
)?
.expect("cannot be a ZST");
let size = vtable
.read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_SIZE).unwrap())?
.check_init()?;
let size = size.to_machine_usize(self)?;
let size = Size::from_bytes(size);
let align = vtable
.read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_ALIGN).unwrap())?
.check_init()?;
let align = align.to_machine_usize(self)?;
let align = Align::from_bytes(align).map_err(|e| err_ub!(InvalidVtableAlignment(e)))?;
if size > self.max_size_of_val() {
throw_ub!(InvalidVtableSize);
}
Ok((size, align))
}
2021-07-31 14:46:23 +00:00
pub fn read_new_vtable_after_trait_upcasting_from_vtable(
&self,
vtable: Pointer<Option<M::PointerTag>>,
idx: u64,
2021-08-01 12:09:22 +00:00
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
2021-07-31 14:46:23 +00:00
let pointer_size = self.pointer_size();
2021-08-01 12:09:22 +00:00
let vtable_slot = vtable.offset(pointer_size * idx, self)?;
let new_vtable = self
.get_ptr_alloc(vtable_slot, pointer_size, self.tcx.data_layout.pointer_align.abi)?
2021-07-31 14:46:23 +00:00
.expect("cannot be a ZST");
2021-08-01 12:09:22 +00:00
let new_vtable =
self.scalar_to_ptr(new_vtable.read_ptr_sized(Size::ZERO)?.check_init()?)?;
2021-07-31 14:46:23 +00:00
Ok(new_vtable)
}
}