Auto merge of #39628 - arielb1:shimmir, r=eddyb

Translate shims using MIR

This removes one large remaining part of old trans.
This commit is contained in:
bors 2017-03-20 15:58:10 +00:00
commit 134c4a0f08
88 changed files with 2826 additions and 3198 deletions

View File

@ -46,8 +46,13 @@
issue = "0")]
#![allow(missing_docs)]
extern "rust-intrinsic" {
#[cfg(not(stage0))]
#[stable(feature = "drop_in_place", since = "1.8.0")]
#[rustc_deprecated(reason = "no longer an intrinsic - use `ptr::drop_in_place` directly",
since = "1.18.0")]
pub use ptr::drop_in_place;
extern "rust-intrinsic" {
// NB: These intrinsics take raw pointers because they mutate aliased
// memory, which is not valid for either `&` or `&mut`.
@ -622,6 +627,7 @@ extern "rust-intrinsic" {
pub fn size_of_val<T: ?Sized>(_: &T) -> usize;
pub fn min_align_of_val<T: ?Sized>(_: &T) -> usize;
#[cfg(stage0)]
/// Executes the destructor (if any) of the pointed-to value.
///
/// This has two use cases:

View File

@ -37,9 +37,38 @@ pub use intrinsics::copy;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::write_bytes;
#[cfg(stage0)]
#[stable(feature = "drop_in_place", since = "1.8.0")]
pub use intrinsics::drop_in_place;
#[cfg(not(stage0))]
/// Executes the destructor (if any) of the pointed-to value.
///
/// This has two use cases:
///
/// * It is *required* to use `drop_in_place` to drop unsized types like
/// trait objects, because they can't be read out onto the stack and
/// dropped normally.
///
/// * It is friendlier to the optimizer to do this over `ptr::read` when
/// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
/// as the compiler doesn't need to prove that it's sound to elide the
/// copy.
///
/// # Undefined Behavior
///
/// This has all the same safety problems as `ptr::read` with respect to
/// invalid pointers, types, and double drops.
#[stable(feature = "drop_in_place", since = "1.8.0")]
#[lang="drop_in_place"]
#[inline]
#[allow(unconditional_recursion)]
pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
// Code here does not matter - this is replaced by the
// real drop glue by the compiler.
drop_in_place(to_drop);
}
/// Creates a null raw pointer.
///
/// # Examples

View File

@ -89,6 +89,7 @@ pub enum DepNode<D: Clone + Debug> {
// things read/modify that MIR.
MirKrate,
Mir(D),
MirShim(Vec<D>),
BorrowCheckKrate,
BorrowCheck(D),
@ -258,6 +259,10 @@ impl<D: Clone + Debug> DepNode<D> {
IntrinsicCheck(ref d) => op(d).map(IntrinsicCheck),
MatchCheck(ref d) => op(d).map(MatchCheck),
Mir(ref d) => op(d).map(Mir),
MirShim(ref def_ids) => {
let def_ids: Option<Vec<E>> = def_ids.iter().map(op).collect();
def_ids.map(MirShim)
}
BorrowCheck(ref d) => op(d).map(BorrowCheck),
RvalueCheck(ref d) => op(d).map(RvalueCheck),
StabilityCheck(ref d) => op(d).map(StabilityCheck),

View File

@ -335,7 +335,7 @@ language_item_table! {
ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn;
BoxFreeFnLangItem, "box_free", box_free_fn;
StrDupUniqFnLangItem, "strdup_uniq", strdup_uniq_fn;
DropInPlaceFnLangItem, "drop_in_place", drop_in_place_fn;
StartFnLangItem, "start", start_fn;
@ -355,8 +355,6 @@ language_item_table! {
ContravariantLifetimeItem, "contravariant_lifetime", contravariant_lifetime;
InvariantLifetimeItem, "invariant_lifetime", invariant_lifetime;
NoCopyItem, "no_copy_bound", no_copy_bound;
NonZeroItem, "non_zero", non_zero;
DebugTraitLangItem, "debug_trait", debug_trait;

View File

@ -17,7 +17,7 @@ use rustc_data_structures::control_flow_graph::{GraphPredecessors, GraphSuccesso
use rustc_data_structures::control_flow_graph::ControlFlowGraph;
use hir::def::CtorKind;
use hir::def_id::DefId;
use ty::subst::Substs;
use ty::subst::{Subst, Substs};
use ty::{self, AdtDef, ClosureSubsts, Region, Ty};
use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use util::ppaux;
@ -982,6 +982,22 @@ impl<'tcx> Debug for Operand<'tcx> {
}
}
impl<'tcx> Operand<'tcx> {
pub fn item<'a>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
span: Span)
-> Self
{
Operand::Constant(Constant {
span: span,
ty: tcx.item_type(def_id).subst(tcx, substs),
literal: Literal::Item { def_id, substs }
})
}
}
///////////////////////////////////////////////////////////////////////////
/// Rvalues

View File

@ -40,7 +40,7 @@ pub use self::select::{EvaluationCache, SelectionContext, SelectionCache};
pub use self::select::{MethodMatchResult, MethodMatched, MethodAmbiguous, MethodDidNotMatch};
pub use self::select::{MethodMatchedData}; // intentionally don't export variants
pub use self::specialize::{OverlapError, specialization_graph, specializes, translate_substs};
pub use self::specialize::{SpecializesCache, find_method};
pub use self::specialize::{SpecializesCache, find_associated_item};
pub use self::util::elaborate_predicates;
pub use self::util::supertraits;
pub use self::util::Supertraits;

View File

@ -29,8 +29,6 @@ use traits::{self, Reveal, ObligationCause};
use ty::{self, TyCtxt, TypeFoldable};
use syntax_pos::DUMMY_SP;
use syntax::ast;
pub mod specialization_graph;
/// Information pertinent to an overlapping impl error.
@ -106,22 +104,23 @@ pub fn translate_substs<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
}
/// Given a selected impl described by `impl_data`, returns the
/// definition and substitions for the method with the name `name`,
/// and trait method substitutions `substs`, in that impl, a less
/// specialized impl, or the trait default, whichever applies.
pub fn find_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
name: ast::Name,
substs: &'tcx Substs<'tcx>,
impl_data: &super::VtableImplData<'tcx, ()>)
-> (DefId, &'tcx Substs<'tcx>)
{
/// definition and substitions for the method with the name `name`
/// the kind `kind`, and trait method substitutions `substs`, in
/// that impl, a less specialized impl, or the trait default,
/// whichever applies.
pub fn find_associated_item<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
item: &ty::AssociatedItem,
substs: &'tcx Substs<'tcx>,
impl_data: &super::VtableImplData<'tcx, ()>,
) -> (DefId, &'tcx Substs<'tcx>) {
assert!(!substs.needs_infer());
let trait_def_id = tcx.trait_id_of_impl(impl_data.impl_def_id).unwrap();
let trait_def = tcx.lookup_trait_def(trait_def_id);
let ancestors = trait_def.ancestors(impl_data.impl_def_id);
match ancestors.defs(tcx, name, ty::AssociatedKind::Method).next() {
match ancestors.defs(tcx, item.name, item.kind).next() {
Some(node_item) => {
let substs = tcx.infer_ctxt((), Reveal::All).enter(|infcx| {
let substs = substs.rebase_onto(tcx, trait_def_id, impl_data.substs);
@ -137,7 +136,7 @@ pub fn find_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
(node_item.item.def_id, substs)
}
None => {
bug!("method {:?} not found in {:?}", name, impl_data.impl_def_id)
bug!("{:?} not found in {:?}", item, impl_data.impl_def_id)
}
}
}

View File

@ -1469,6 +1469,15 @@ impl<T, R> InternIteratorElement<T, R> for T {
}
}
impl<'a, T, R> InternIteratorElement<T, R> for &'a T
where T: Clone + 'a
{
type Output = R;
fn intern_with<I: Iterator<Item=Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output {
f(&iter.cloned().collect::<AccumulateVec<[_; 8]>>())
}
}
impl<T, R, E> InternIteratorElement<T, R> for Result<T, E> {
type Output = Result<R, E>;
fn intern_with<I: Iterator<Item=Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output {

128
src/librustc/ty/instance.rs Normal file
View File

@ -0,0 +1,128 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use dep_graph::DepNode;
use hir::def_id::DefId;
use ty::{self, Ty, TypeFoldable, Substs};
use util::ppaux;
use std::borrow::Cow;
use std::fmt;
use syntax::ast;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct Instance<'tcx> {
pub def: InstanceDef<'tcx>,
pub substs: &'tcx Substs<'tcx>,
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum InstanceDef<'tcx> {
Item(DefId),
Intrinsic(DefId),
// <fn() as FnTrait>::call_*
// def-id is FnTrait::call_*
FnPtrShim(DefId, Ty<'tcx>),
// <Trait as Trait>::fn
Virtual(DefId, usize),
// <[mut closure] as FnOnce>::call_once
ClosureOnceShim { call_once: DefId },
// drop_in_place::<T>; None for empty drop glue.
DropGlue(DefId, Option<Ty<'tcx>>),
}
impl<'tcx> InstanceDef<'tcx> {
#[inline]
pub fn def_id(&self) -> DefId {
match *self {
InstanceDef::Item(def_id) |
InstanceDef::FnPtrShim(def_id, _) |
InstanceDef::Virtual(def_id, _) |
InstanceDef::Intrinsic(def_id, ) |
InstanceDef::ClosureOnceShim { call_once: def_id }
=> def_id,
InstanceDef::DropGlue(def_id, _) => def_id
}
}
#[inline]
pub fn def_ty<'a>(&self, tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
tcx.item_type(self.def_id())
}
#[inline]
pub fn attrs<'a>(&self, tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> Cow<'tcx, [ast::Attribute]> {
tcx.get_attrs(self.def_id())
}
pub(crate) fn dep_node(&self) -> DepNode<DefId> {
// HACK: def-id binning, project-style; someone replace this with
// real on-demand.
let ty = match self {
&InstanceDef::FnPtrShim(_, ty) => Some(ty),
&InstanceDef::DropGlue(_, ty) => ty,
_ => None
}.into_iter();
DepNode::MirShim(
Some(self.def_id()).into_iter().chain(
ty.flat_map(|t| t.walk()).flat_map(|t| match t.sty {
ty::TyAdt(adt_def, _) => Some(adt_def.did),
ty::TyProjection(ref proj) => Some(proj.trait_ref.def_id),
_ => None,
})
).collect()
)
}
}
impl<'tcx> fmt::Display for Instance<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
ppaux::parameterized(f, self.substs, self.def_id(), &[])?;
match self.def {
InstanceDef::Item(_) => Ok(()),
InstanceDef::Intrinsic(_) => {
write!(f, " - intrinsic")
}
InstanceDef::Virtual(_, num) => {
write!(f, " - shim(#{})", num)
}
InstanceDef::FnPtrShim(_, ty) => {
write!(f, " - shim({:?})", ty)
}
InstanceDef::ClosureOnceShim { .. } => {
write!(f, " - shim")
}
InstanceDef::DropGlue(_, ty) => {
write!(f, " - shim({:?})", ty)
}
}
}
}
impl<'a, 'b, 'tcx> Instance<'tcx> {
pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>)
-> Instance<'tcx> {
assert!(substs.is_normalized_for_trans() && !substs.has_escaping_regions(),
"substs of instance {:?} not normalized for trans: {:?}",
def_id, substs);
Instance { def: InstanceDef::Item(def_id), substs: substs }
}
pub fn mono(tcx: ty::TyCtxt<'a, 'tcx, 'b>, def_id: DefId) -> Instance<'tcx> {
Instance::new(def_id, tcx.global_tcx().empty_substs_for_def_id(def_id))
}
#[inline]
pub fn def_id(&self) -> DefId {
self.def.def_id()
}
}

View File

@ -9,7 +9,7 @@
// except according to those terms.
use dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig};
use hir::def_id::{CrateNum, DefId};
use hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use middle::const_val::ConstVal;
use mir;
use ty::{self, Ty, TyCtxt};
@ -24,6 +24,16 @@ trait Key {
fn default_span(&self, tcx: TyCtxt) -> Span;
}
impl<'tcx> Key for ty::InstanceDef<'tcx> {
fn map_crate(&self) -> CrateNum {
LOCAL_CRATE
}
fn default_span(&self, tcx: TyCtxt) -> Span {
tcx.def_span(self.def_id())
}
}
impl Key for CrateNum {
fn map_crate(&self) -> CrateNum {
*self
@ -83,9 +93,9 @@ impl<'tcx> Value<'tcx> for Ty<'tcx> {
}
}
pub struct CycleError<'a> {
pub struct CycleError<'a, 'tcx: 'a> {
span: Span,
cycle: RefMut<'a, [(Span, Query)]>,
cycle: RefMut<'a, [(Span, Query<'tcx>)]>,
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
@ -110,8 +120,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
err.emit();
}
fn cycle_check<F, R>(self, span: Span, query: Query, compute: F)
-> Result<R, CycleError<'a>>
fn cycle_check<F, R>(self, span: Span, query: Query<'gcx>, compute: F)
-> Result<R, CycleError<'a, 'gcx>>
where F: FnOnce() -> R
{
{
@ -172,13 +182,20 @@ impl<'tcx> QueryDescription for queries::coherent_inherent_impls<'tcx> {
}
}
impl<'tcx> QueryDescription for queries::mir_shims<'tcx> {
fn describe(tcx: TyCtxt, def: ty::InstanceDef<'tcx>) -> String {
format!("generating MIR shim for `{}`",
tcx.item_path_str(def.def_id()))
}
}
macro_rules! define_maps {
(<$tcx:tt>
$($(#[$attr:meta])*
pub $name:ident: $node:ident($K:ty) -> $V:ty),*) => {
pub struct Maps<$tcx> {
providers: IndexVec<CrateNum, Providers<$tcx>>,
query_stack: RefCell<Vec<(Span, Query)>>,
query_stack: RefCell<Vec<(Span, Query<$tcx>)>>,
$($(#[$attr])* pub $name: RefCell<DepTrackingMap<queries::$name<$tcx>>>),*
}
@ -196,11 +213,11 @@ macro_rules! define_maps {
#[allow(bad_style)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Query {
pub enum Query<$tcx> {
$($(#[$attr])* $name($K)),*
}
impl Query {
impl<$tcx> Query<$tcx> {
pub fn describe(&self, tcx: TyCtxt) -> String {
match *self {
$(Query::$name(key) => queries::$name::describe(tcx, key)),*
@ -233,7 +250,7 @@ macro_rules! define_maps {
mut span: Span,
key: $K,
f: F)
-> Result<R, CycleError<'a>>
-> Result<R, CycleError<'a, $tcx>>
where F: FnOnce(&$V) -> R
{
if let Some(result) = tcx.maps.$name.borrow().get(&key) {
@ -256,7 +273,7 @@ macro_rules! define_maps {
}
pub fn try_get(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K)
-> Result<$V, CycleError<'a>> {
-> Result<$V, CycleError<'a, $tcx>> {
Self::try_get_with(tcx, span, key, Clone::clone)
}
@ -387,7 +404,9 @@ define_maps! { <'tcx>
/// Results of evaluating monomorphic constants embedded in
/// other items, such as enum variant explicit discriminants.
pub monomorphic_const_eval: MonomorphicConstEval(DefId) -> Result<ConstVal<'tcx>, ()>
pub monomorphic_const_eval: MonomorphicConstEval(DefId) -> Result<ConstVal<'tcx>, ()>,
pub mir_shims: mir_shim(ty::InstanceDef<'tcx>) -> &'tcx RefCell<mir::Mir<'tcx>>
}
fn coherent_trait_dep_node((_, def_id): (CrateNum, DefId)) -> DepNode<DefId> {
@ -397,3 +416,7 @@ fn coherent_trait_dep_node((_, def_id): (CrateNum, DefId)) -> DepNode<DefId> {
fn coherent_inherent_impls_dep_node(_: CrateNum) -> DepNode<DefId> {
DepNode::Coherence
}
fn mir_shim(instance: ty::InstanceDef) -> DepNode<DefId> {
instance.dep_node()
}

View File

@ -73,6 +73,8 @@ pub use self::contents::TypeContents;
pub use self::context::{TyCtxt, GlobalArenas, tls};
pub use self::context::{Lift, TypeckTables};
pub use self::instance::{Instance, InstanceDef};
pub use self::trait_def::{TraitDef, TraitFlags};
pub use self::maps::queries;
@ -98,6 +100,7 @@ pub mod util;
mod contents;
mod context;
mod flags;
mod instance;
mod structural_impls;
mod sty;
@ -1264,10 +1267,17 @@ impl<'a, 'tcx> ParameterEnvironment<'tcx> {
def_id,
ROOT_CODE_EXTENT)
}
_ => {
Some(hir_map::NodeStructCtor(..)) |
Some(hir_map::NodeVariant(..)) => {
let def_id = tcx.hir.local_def_id(id);
tcx.construct_parameter_environment(tcx.hir.span(id),
def_id,
ROOT_CODE_EXTENT)
}
it => {
bug!("ParameterEnvironment::from_item(): \
`{}` is not an item",
tcx.hir.node_to_string(id))
`{}` = {:?} is unsupported",
tcx.hir.node_to_string(id), it)
}
}
}
@ -2302,6 +2312,16 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
queries::mir::get(self, DUMMY_SP, did).borrow()
}
/// Return the possibly-auto-generated MIR of a (DefId, Subst) pair.
pub fn instance_mir(self, instance: ty::InstanceDef<'gcx>)
-> Ref<'gcx, Mir<'gcx>>
{
match instance {
ty::InstanceDef::Item(did) if true => self.item_mir(did),
_ => queries::mir_shims::get(self, DUMMY_SP, instance).borrow(),
}
}
/// Given the DefId of an item, returns its MIR, borrowed immutably.
/// Returns None if there is no MIR for the DefId
pub fn maybe_item_mir(self, did: DefId) -> Option<Ref<'gcx, Mir<'gcx>>> {

View File

@ -398,6 +398,16 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
}
def_id
}
/// Given the def-id of some item that has no type parameters, make
/// a suitable "empty substs" for it.
pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> &'tcx ty::Substs<'tcx> {
ty::Substs::for_item(self, item_def_id,
|_, _| self.mk_region(ty::ReErased),
|_, _| {
bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id)
})
}
}
pub struct TypeIdHasher<'a, 'gcx: 'a+'tcx, 'tcx: 'a, W> {

View File

@ -15,6 +15,7 @@ use rustc::mir::{BasicBlock, Mir};
use rustc_data_structures::bitslice::bits_to_string;
use rustc_data_structures::indexed_set::{IdxSet};
use rustc_data_structures::indexed_vec::Idx;
use rustc_mir::util as mir_util;
use dot;
use dot::IntoCow;
@ -219,7 +220,7 @@ impl<'a, 'tcx, MWF, P> dot::Labeller<'a> for Graph<'a, 'tcx, MWF, P>
}
Ok(())
}
::rustc_mir::graphviz::write_node_label(
mir_util::write_graphviz_node_label(
*n, self.mbcx.mir(), &mut v, 4,
|w| {
let flow = self.mbcx.flow_state();

View File

@ -14,10 +14,10 @@ use rustc_data_structures::bitslice::BitSlice; // adds set_bit/get_bit to &[usiz
use rustc_data_structures::bitslice::{BitwiseOperator};
use rustc_data_structures::indexed_set::{IdxSet};
use rustc_data_structures::indexed_vec::Idx;
use rustc_mir::util::elaborate_drops::DropFlagState;
use super::super::gather_moves::{HasMoveData, MoveData, MoveOutIndex, MovePathIndex};
use super::super::MoveDataParamEnv;
use super::super::DropFlagState;
use super::super::drop_flag_effects_for_function_entry;
use super::super::drop_flag_effects_for_location;
use super::super::on_lookup_result_bits;

View File

@ -13,22 +13,20 @@ use super::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals};
use super::dataflow::{DataflowResults};
use super::{drop_flag_effects_for_location, on_all_children_bits};
use super::on_lookup_result_bits;
use super::{DropFlagState, MoveDataParamEnv};
use super::patch::MirPatch;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::ty::util::IntTypeExt;
use super::MoveDataParamEnv;
use rustc::ty::{self, TyCtxt};
use rustc::mir::*;
use rustc::mir::transform::{Pass, MirPass, MirSource};
use rustc::middle::const_val::ConstVal;
use rustc::middle::lang_items;
use rustc::util::nodemap::FxHashMap;
use rustc_data_structures::indexed_set::IdxSetBuf;
use rustc_data_structures::indexed_vec::Idx;
use rustc_mir::util::patch::MirPatch;
use rustc_mir::util::elaborate_drops::{DropFlagState, elaborate_drop};
use rustc_mir::util::elaborate_drops::{DropElaborator, DropStyle, DropFlagMode};
use syntax_pos::Span;
use std::fmt;
use std::iter;
use std::u32;
pub struct ElaborateDrops;
@ -109,12 +107,116 @@ impl InitializationData {
}
}
impl fmt::Debug for InitializationData {
struct Elaborator<'a, 'b: 'a, 'tcx: 'b> {
init_data: &'a InitializationData,
ctxt: &'a mut ElaborateDropsCtxt<'b, 'tcx>,
}
impl<'a, 'b, 'tcx> fmt::Debug for Elaborator<'a, 'b, 'tcx> {
fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl<'a, 'b, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, 'b, 'tcx> {
type Path = MovePathIndex;
fn patch(&mut self) -> &mut MirPatch<'tcx> {
&mut self.ctxt.patch
}
fn mir(&self) -> &'a Mir<'tcx> {
self.ctxt.mir
}
fn tcx(&self) -> ty::TyCtxt<'a, 'tcx, 'tcx> {
self.ctxt.tcx
}
fn param_env(&self) -> &'a ty::ParameterEnvironment<'tcx> {
self.ctxt.param_env()
}
fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle {
let ((maybe_live, maybe_dead), multipart) = match mode {
DropFlagMode::Shallow => (self.init_data.state(path), false),
DropFlagMode::Deep => {
let mut some_live = false;
let mut some_dead = false;
let mut children_count = 0;
on_all_children_bits(
self.tcx(), self.mir(), self.ctxt.move_data(),
path, |child| {
if self.ctxt.path_needs_drop(child) {
let (live, dead) = self.init_data.state(child);
debug!("elaborate_drop: state({:?}) = {:?}",
child, (live, dead));
some_live |= live;
some_dead |= dead;
children_count += 1;
}
});
((some_live, some_dead), children_count != 1)
}
};
match (maybe_live, maybe_dead, multipart) {
(false, _, _) => DropStyle::Dead,
(true, false, _) => DropStyle::Static,
(true, true, false) => DropStyle::Conditional,
(true, true, true) => DropStyle::Open,
}
}
fn clear_drop_flag(&mut self, loc: Location, path: Self::Path, mode: DropFlagMode) {
match mode {
DropFlagMode::Shallow => {
self.ctxt.set_drop_flag(loc, path, DropFlagState::Absent);
}
DropFlagMode::Deep => {
on_all_children_bits(
self.tcx(), self.mir(), self.ctxt.move_data(), path,
|child| self.ctxt.set_drop_flag(loc, child, DropFlagState::Absent)
);
}
}
}
fn field_subpath(&self, path: Self::Path, field: Field) -> Option<Self::Path> {
super::move_path_children_matching(self.ctxt.move_data(), path, |p| {
match p {
&Projection {
elem: ProjectionElem::Field(idx, _), ..
} => idx == field,
_ => false
}
})
}
fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path> {
super::move_path_children_matching(self.ctxt.move_data(), path, |p| {
match p {
&Projection { elem: ProjectionElem::Deref, .. } => true,
_ => false
}
})
}
fn downcast_subpath(&self, path: Self::Path, variant: usize) -> Option<Self::Path> {
super::move_path_children_matching(self.ctxt.move_data(), path, |p| {
match p {
&Projection {
elem: ProjectionElem::Downcast(_, idx), ..
} => idx == variant,
_ => false
}
})
}
fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>> {
self.ctxt.drop_flag(path).map(Operand::Consume)
}
}
struct ElaborateDropsCtxt<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
mir: &'a Mir<'tcx>,
@ -125,19 +227,6 @@ struct ElaborateDropsCtxt<'a, 'tcx: 'a> {
patch: MirPatch<'tcx>,
}
#[derive(Copy, Clone, Debug)]
struct DropCtxt<'a, 'tcx: 'a> {
source_info: SourceInfo,
is_cleanup: bool,
init_data: &'a InitializationData,
lvalue: &'a Lvalue<'tcx>,
path: MovePathIndex,
succ: BasicBlock,
unwind: Option<BasicBlock>
}
impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
fn move_data(&self) -> &'b MoveData<'tcx> { &self.env.move_data }
fn param_env(&self) -> &'b ty::ParameterEnvironment<'tcx> {
@ -254,19 +343,22 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
let init_data = self.initialization_data_at(loc);
match self.move_data().rev_lookup.find(location) {
LookupResult::Exact(path) => {
self.elaborate_drop(&DropCtxt {
source_info: terminator.source_info,
is_cleanup: data.is_cleanup,
init_data: &init_data,
lvalue: location,
path: path,
succ: target,
unwind: if data.is_cleanup {
elaborate_drop(
&mut Elaborator {
init_data: &init_data,
ctxt: self
},
terminator.source_info,
data.is_cleanup,
location,
path,
target,
if data.is_cleanup {
None
} else {
Some(Option::unwrap_or(unwind, resume_block))
}
}, bb);
},
bb)
}
LookupResult::Parent(..) => {
span_bug!(terminator.source_info.span,
@ -343,15 +435,18 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
debug!("elaborate_drop_and_replace({:?}) - tracked {:?}", terminator, path);
let init_data = self.initialization_data_at(loc);
self.elaborate_drop(&DropCtxt {
source_info: terminator.source_info,
is_cleanup: data.is_cleanup,
init_data: &init_data,
lvalue: location,
path: path,
succ: target,
unwind: Some(unwind)
}, bb);
elaborate_drop(
&mut Elaborator {
init_data: &init_data,
ctxt: self
},
terminator.source_info,
data.is_cleanup,
location,
path,
target,
Some(unwind),
bb);
on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| {
self.set_drop_flag(Location { block: target, statement_index: 0 },
child, DropFlagState::Present);
@ -372,547 +467,6 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
}
}
/// This elaborates a single drop instruction, located at `bb`, and
/// patches over it.
///
/// The elaborated drop checks the drop flags to only drop what
/// is initialized.
///
/// In addition, the relevant drop flags also need to be cleared
/// to avoid double-drops. However, in the middle of a complex
/// drop, one must avoid clearing some of the flags before they
/// are read, as that would cause a memory leak.
///
/// In particular, when dropping an ADT, multiple fields may be
/// joined together under the `rest` subpath. They are all controlled
/// by the primary drop flag, but only the last rest-field dropped
/// should clear it (and it must also not clear anything else).
///
/// FIXME: I think we should just control the flags externally
/// and then we do not need this machinery.
fn elaborate_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, bb: BasicBlock) {
debug!("elaborate_drop({:?})", c);
let mut some_live = false;
let mut some_dead = false;
let mut children_count = 0;
on_all_children_bits(
self.tcx, self.mir, self.move_data(),
c.path, |child| {
if self.path_needs_drop(child) {
let (live, dead) = c.init_data.state(child);
debug!("elaborate_drop: state({:?}) = {:?}",
child, (live, dead));
some_live |= live;
some_dead |= dead;
children_count += 1;
}
});
debug!("elaborate_drop({:?}): live - {:?}", c,
(some_live, some_dead));
match (some_live, some_dead) {
(false, false) | (false, true) => {
// dead drop - patch it out
self.patch.patch_terminator(bb, TerminatorKind::Goto {
target: c.succ
});
}
(true, false) => {
// static drop - just set the flag
self.patch.patch_terminator(bb, TerminatorKind::Drop {
location: c.lvalue.clone(),
target: c.succ,
unwind: c.unwind
});
self.drop_flags_for_drop(c, bb);
}
(true, true) => {
// dynamic drop
let drop_bb = if children_count == 1 || self.must_complete_drop(c) {
self.conditional_drop(c)
} else {
self.open_drop(c)
};
self.patch.patch_terminator(bb, TerminatorKind::Goto {
target: drop_bb
});
}
}
}
/// Return the lvalue and move path for each field of `variant`,
/// (the move path is `None` if the field is a rest field).
fn move_paths_for_fields(&self,
base_lv: &Lvalue<'tcx>,
variant_path: MovePathIndex,
variant: &'tcx ty::VariantDef,
substs: &'tcx Substs<'tcx>)
-> Vec<(Lvalue<'tcx>, Option<MovePathIndex>)>
{
variant.fields.iter().enumerate().map(|(i, f)| {
let subpath =
super::move_path_children_matching(self.move_data(), variant_path, |p| {
match p {
&Projection {
elem: ProjectionElem::Field(idx, _), ..
} => idx.index() == i,
_ => false
}
});
let field_ty =
self.tcx.normalize_associated_type_in_env(
&f.ty(self.tcx, substs),
self.param_env()
);
(base_lv.clone().field(Field::new(i), field_ty), subpath)
}).collect()
}
/// Create one-half of the drop ladder for a list of fields, and return
/// the list of steps in it in reverse order.
///
/// `unwind_ladder` is such a list of steps in reverse order,
/// which is called instead of the next step if the drop unwinds
/// (the first field is never reached). If it is `None`, all
/// unwind targets are left blank.
fn drop_halfladder<'a>(&mut self,
c: &DropCtxt<'a, 'tcx>,
unwind_ladder: Option<Vec<BasicBlock>>,
succ: BasicBlock,
fields: &[(Lvalue<'tcx>, Option<MovePathIndex>)],
is_cleanup: bool)
-> Vec<BasicBlock>
{
let mut unwind_succ = if is_cleanup {
None
} else {
c.unwind
};
let mut succ = self.new_block(
c, c.is_cleanup, TerminatorKind::Goto { target: succ }
);
// Always clear the "master" drop flag at the bottom of the
// ladder. This is needed because the "master" drop flag
// protects the ADT's discriminant, which is invalidated
// after the ADT is dropped.
self.set_drop_flag(
Location { block: succ, statement_index: 0 },
c.path,
DropFlagState::Absent
);
fields.iter().rev().enumerate().map(|(i, &(ref lv, path))| {
succ = if let Some(path) = path {
debug!("drop_ladder: for std field {} ({:?})", i, lv);
self.elaborated_drop_block(&DropCtxt {
source_info: c.source_info,
is_cleanup: is_cleanup,
init_data: c.init_data,
lvalue: lv,
path: path,
succ: succ,
unwind: unwind_succ,
})
} else {
debug!("drop_ladder: for rest field {} ({:?})", i, lv);
self.complete_drop(&DropCtxt {
source_info: c.source_info,
is_cleanup: is_cleanup,
init_data: c.init_data,
lvalue: lv,
path: c.path,
succ: succ,
unwind: unwind_succ,
}, false)
};
unwind_succ = unwind_ladder.as_ref().map(|p| p[i]);
succ
}).collect()
}
/// Create a full drop ladder, consisting of 2 connected half-drop-ladders
///
/// For example, with 3 fields, the drop ladder is
///
/// .d0:
/// ELAB(drop location.0 [target=.d1, unwind=.c1])
/// .d1:
/// ELAB(drop location.1 [target=.d2, unwind=.c2])
/// .d2:
/// ELAB(drop location.2 [target=`c.succ`, unwind=`c.unwind`])
/// .c1:
/// ELAB(drop location.1 [target=.c2])
/// .c2:
/// ELAB(drop location.2 [target=`c.unwind])
fn drop_ladder<'a>(&mut self,
c: &DropCtxt<'a, 'tcx>,
fields: Vec<(Lvalue<'tcx>, Option<MovePathIndex>)>)
-> BasicBlock
{
debug!("drop_ladder({:?}, {:?})", c, fields);
let mut fields = fields;
fields.retain(|&(ref lvalue, _)| {
let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx);
self.tcx.type_needs_drop_given_env(ty, self.param_env())
});
debug!("drop_ladder - fields needing drop: {:?}", fields);
let unwind_ladder = if c.is_cleanup {
None
} else {
Some(self.drop_halfladder(c, None, c.unwind.unwrap(), &fields, true))
};
self.drop_halfladder(c, unwind_ladder, c.succ, &fields, c.is_cleanup)
.last().cloned().unwrap_or(c.succ)
}
fn open_drop_for_tuple<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, tys: &[Ty<'tcx>])
-> BasicBlock
{
debug!("open_drop_for_tuple({:?}, {:?})", c, tys);
let fields = tys.iter().enumerate().map(|(i, &ty)| {
(c.lvalue.clone().field(Field::new(i), ty),
super::move_path_children_matching(
self.move_data(), c.path, |proj| match proj {
&Projection {
elem: ProjectionElem::Field(f, _), ..
} => f.index() == i,
_ => false
}
))
}).collect();
self.drop_ladder(c, fields)
}
fn open_drop_for_box<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, ty: Ty<'tcx>)
-> BasicBlock
{
debug!("open_drop_for_box({:?}, {:?})", c, ty);
let interior_path = super::move_path_children_matching(
self.move_data(), c.path, |proj| match proj {
&Projection { elem: ProjectionElem::Deref, .. } => true,
_ => false
}).unwrap();
let interior = c.lvalue.clone().deref();
let inner_c = DropCtxt {
lvalue: &interior,
unwind: c.unwind.map(|u| {
self.box_free_block(c, ty, u, true)
}),
succ: self.box_free_block(c, ty, c.succ, c.is_cleanup),
path: interior_path,
..*c
};
self.elaborated_drop_block(&inner_c)
}
fn open_drop_for_adt<'a>(&mut self, c: &DropCtxt<'a, 'tcx>,
adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>)
-> BasicBlock {
debug!("open_drop_for_adt({:?}, {:?}, {:?})", c, adt, substs);
match adt.variants.len() {
1 => {
let fields = self.move_paths_for_fields(
c.lvalue,
c.path,
&adt.variants[0],
substs
);
self.drop_ladder(c, fields)
}
_ => {
let mut values = Vec::with_capacity(adt.variants.len());
let mut blocks = Vec::with_capacity(adt.variants.len());
let mut otherwise = None;
for (variant_index, discr) in adt.discriminants(self.tcx).enumerate() {
let subpath = super::move_path_children_matching(
self.move_data(), c.path, |proj| match proj {
&Projection {
elem: ProjectionElem::Downcast(_, idx), ..
} => idx == variant_index,
_ => false
});
if let Some(variant_path) = subpath {
let base_lv = c.lvalue.clone().elem(
ProjectionElem::Downcast(adt, variant_index)
);
let fields = self.move_paths_for_fields(
&base_lv,
variant_path,
&adt.variants[variant_index],
substs);
values.push(discr);
blocks.push(self.drop_ladder(c, fields));
} else {
// variant not found - drop the entire enum
if let None = otherwise {
otherwise = Some(self.complete_drop(c, true));
}
}
}
if let Some(block) = otherwise {
blocks.push(block);
} else {
values.pop();
}
// If there are multiple variants, then if something
// is present within the enum the discriminant, tracked
// by the rest path, must be initialized.
//
// Additionally, we do not want to switch on the
// discriminant after it is free-ed, because that
// way lies only trouble.
let discr_ty = adt.repr.discr_type().to_ty(self.tcx);
let discr = Lvalue::Local(self.patch.new_temp(discr_ty));
let switch_block = self.patch.new_block(BasicBlockData {
statements: vec![
Statement {
source_info: c.source_info,
kind: StatementKind::Assign(discr.clone(),
Rvalue::Discriminant(c.lvalue.clone()))
}
],
terminator: Some(Terminator {
source_info: c.source_info,
kind: TerminatorKind::SwitchInt {
discr: Operand::Consume(discr),
switch_ty: discr_ty,
values: From::from(values),
targets: blocks,
}
}),
is_cleanup: c.is_cleanup,
});
self.drop_flag_test_block(c, switch_block)
}
}
}
/// The slow-path - create an "open", elaborated drop for a type
/// which is moved-out-of only partially, and patch `bb` to a jump
/// to it. This must not be called on ADTs with a destructor,
/// as these can't be moved-out-of, except for `Box<T>`, which is
/// special-cased.
///
/// This creates a "drop ladder" that drops the needed fields of the
/// ADT, both in the success case or if one of the destructors fail.
fn open_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock {
let ty = c.lvalue.ty(self.mir, self.tcx).to_ty(self.tcx);
match ty.sty {
ty::TyClosure(def_id, substs) => {
let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx).collect();
self.open_drop_for_tuple(c, &tys)
}
ty::TyTuple(tys, _) => {
self.open_drop_for_tuple(c, tys)
}
ty::TyAdt(def, _) if def.is_box() => {
self.open_drop_for_box(c, ty.boxed_ty())
}
ty::TyAdt(def, substs) => {
self.open_drop_for_adt(c, def, substs)
}
_ => bug!("open drop from non-ADT `{:?}`", ty)
}
}
/// Return a basic block that drop an lvalue using the context
/// and path in `c`. If `update_drop_flag` is true, also
/// clear `c`.
///
/// if FLAG(c.path)
/// if(update_drop_flag) FLAG(c.path) = false
/// drop(c.lv)
fn complete_drop<'a>(
&mut self,
c: &DropCtxt<'a, 'tcx>,
update_drop_flag: bool)
-> BasicBlock
{
debug!("complete_drop({:?},{:?})", c, update_drop_flag);
let drop_block = self.drop_block(c);
if update_drop_flag {
self.set_drop_flag(
Location { block: drop_block, statement_index: 0 },
c.path,
DropFlagState::Absent
);
}
self.drop_flag_test_block(c, drop_block)
}
/// Create a simple conditional drop.
///
/// if FLAG(c.lv)
/// FLAGS(c.lv) = false
/// drop(c.lv)
fn conditional_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>)
-> BasicBlock
{
debug!("conditional_drop({:?})", c);
let drop_bb = self.drop_block(c);
self.drop_flags_for_drop(c, drop_bb);
self.drop_flag_test_block(c, drop_bb)
}
fn new_block<'a>(&mut self,
c: &DropCtxt<'a, 'tcx>,
is_cleanup: bool,
k: TerminatorKind<'tcx>)
-> BasicBlock
{
self.patch.new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
source_info: c.source_info, kind: k
}),
is_cleanup: is_cleanup
})
}
fn elaborated_drop_block<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock {
debug!("elaborated_drop_block({:?})", c);
let blk = self.drop_block(c);
self.elaborate_drop(c, blk);
blk
}
fn drop_flag_test_block<'a>(&mut self,
c: &DropCtxt<'a, 'tcx>,
on_set: BasicBlock)
-> BasicBlock {
self.drop_flag_test_block_with_succ(c, c.is_cleanup, on_set, c.succ)
}
fn drop_flag_test_block_with_succ<'a>(&mut self,
c: &DropCtxt<'a, 'tcx>,
is_cleanup: bool,
on_set: BasicBlock,
on_unset: BasicBlock)
-> BasicBlock
{
let (maybe_live, maybe_dead) = c.init_data.state(c.path);
debug!("drop_flag_test_block({:?},{:?},{:?}) - {:?}",
c, is_cleanup, on_set, (maybe_live, maybe_dead));
match (maybe_live, maybe_dead) {
(false, _) => on_unset,
(true, false) => on_set,
(true, true) => {
let flag = self.drop_flag(c.path).unwrap();
let term = TerminatorKind::if_(self.tcx, Operand::Consume(flag), on_set, on_unset);
self.new_block(c, is_cleanup, term)
}
}
}
fn drop_block<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock {
self.new_block(c, c.is_cleanup, TerminatorKind::Drop {
location: c.lvalue.clone(),
target: c.succ,
unwind: c.unwind
})
}
fn box_free_block<'a>(
&mut self,
c: &DropCtxt<'a, 'tcx>,
ty: Ty<'tcx>,
target: BasicBlock,
is_cleanup: bool
) -> BasicBlock {
let block = self.unelaborated_free_block(c, ty, target, is_cleanup);
self.drop_flag_test_block_with_succ(c, is_cleanup, block, target)
}
fn unelaborated_free_block<'a>(
&mut self,
c: &DropCtxt<'a, 'tcx>,
ty: Ty<'tcx>,
target: BasicBlock,
is_cleanup: bool
) -> BasicBlock {
let mut statements = vec![];
if let Some(&flag) = self.drop_flags.get(&c.path) {
statements.push(Statement {
source_info: c.source_info,
kind: StatementKind::Assign(
Lvalue::Local(flag),
self.constant_bool(c.source_info.span, false)
)
});
}
let tcx = self.tcx;
let unit_temp = Lvalue::Local(self.patch.new_temp(tcx.mk_nil()));
let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem);
let substs = tcx.mk_substs(iter::once(Kind::from(ty)));
let fty = tcx.item_type(free_func).subst(tcx, substs);
self.patch.new_block(BasicBlockData {
statements: statements,
terminator: Some(Terminator {
source_info: c.source_info, kind: TerminatorKind::Call {
func: Operand::Constant(Constant {
span: c.source_info.span,
ty: fty,
literal: Literal::Item {
def_id: free_func,
substs: substs
}
}),
args: vec![Operand::Consume(c.lvalue.clone())],
destination: Some((unit_temp, target)),
cleanup: None
}
}),
is_cleanup: is_cleanup
})
}
fn must_complete_drop<'a>(&self, c: &DropCtxt<'a, 'tcx>) -> bool {
// if we have a destuctor, we must *not* split the drop.
// dataflow can create unneeded children in some cases
// - be sure to ignore them.
let ty = c.lvalue.ty(self.mir, self.tcx).to_ty(self.tcx);
match ty.sty {
ty::TyAdt(def, _) => {
if def.has_dtor(self.tcx) && !def.is_box() {
self.tcx.sess.span_warn(
c.source_info.span,
&format!("dataflow bug??? moving out of type with dtor {:?}",
c));
true
} else {
false
}
}
_ => false
}
}
fn constant_bool(&self, span: Span, val: bool) -> Rvalue<'tcx> {
Rvalue::Use(Operand::Constant(Constant {
span: span,
@ -1023,15 +577,4 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
}
}
}
fn drop_flags_for_drop<'a>(&mut self,
c: &DropCtxt<'a, 'tcx>,
bb: BasicBlock)
{
let loc = self.patch.terminator_loc(self.mir, bb);
on_all_children_bits(
self.tcx, self.mir, self.move_data(), c.path,
|child| self.set_drop_flag(loc, child, DropFlagState::Absent)
);
}
}

View File

@ -437,7 +437,7 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
}
Rvalue::Ref(..) |
Rvalue::Discriminant(..) |
Rvalue::Len(..) => {}
Rvalue::Len(..) |
Rvalue::Box(..) => {
// This returns an rvalue with uninitialized contents. We can't
// move out of it here because it is an rvalue - assignments always

View File

@ -16,12 +16,12 @@ use syntax_pos::DUMMY_SP;
use rustc::mir::{self, BasicBlock, BasicBlockData, Mir, Statement, Terminator, Location};
use rustc::session::Session;
use rustc::ty::{self, TyCtxt};
use rustc_mir::util::elaborate_drops::DropFlagState;
mod abs_domain;
pub mod elaborate_drops;
mod dataflow;
mod gather_moves;
mod patch;
// mod graphviz;
use self::dataflow::{BitDenotation};
@ -183,21 +183,6 @@ impl<'b, 'a: 'b, 'tcx: 'a> MirBorrowckCtxt<'b, 'a, 'tcx> {
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
enum DropFlagState {
Present, // i.e. initialized
Absent, // i.e. deinitialized or "moved"
}
impl DropFlagState {
fn value(self) -> bool {
match self {
DropFlagState::Present => true,
DropFlagState::Absent => false
}
}
}
fn move_path_children_matching<'tcx, F>(move_data: &MoveData<'tcx>,
path: MovePathIndex,
mut cond: F)

View File

@ -26,8 +26,7 @@ use rustc::session::config::Input;
use rustc_borrowck as borrowck;
use rustc_borrowck::graphviz as borrowck_dot;
use rustc_mir::pretty::write_mir_pretty;
use rustc_mir::graphviz::write_mir_graphviz;
use rustc_mir::util::{write_mir_pretty, write_mir_graphviz};
use syntax::ast::{self, BlockCheckMode};
use syntax::fold::{self, Folder};

View File

@ -293,7 +293,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
predicates: Some(self.encode_predicates(def_id)),
ast: None,
mir: None,
mir: self.encode_mir(def_id),
}
}
@ -426,7 +426,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
predicates: Some(self.encode_predicates(def_id)),
ast: None,
mir: None,
mir: self.encode_mir(def_id),
}
}

View File

@ -22,6 +22,8 @@ Rust MIR: a lowered representation of Rust. Also: an experiment!
#![feature(associated_consts)]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![cfg_attr(stage0, feature(field_init_shorthand))]
#![feature(i128_type)]
#![feature(rustc_diagnostic_macros)]
#![feature(rustc_private)]
@ -47,16 +49,16 @@ pub mod diagnostics;
pub mod build;
pub mod callgraph;
pub mod def_use;
pub mod graphviz;
mod hair;
mod shim;
pub mod mir_map;
pub mod pretty;
pub mod transform;
pub mod util;
use rustc::ty::maps::Providers;
pub fn provide(providers: &mut Providers) {
mir_map::provide(providers);
shim::provide(providers);
transform::qualify_consts::provide(providers);
}

View File

@ -22,14 +22,16 @@ use rustc::dep_graph::DepNode;
use rustc::mir::Mir;
use rustc::mir::transform::MirSource;
use rustc::mir::visit::MutVisitor;
use pretty;
use shim;
use hair::cx::Cx;
use util as mir_util;
use rustc::traits::Reveal;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::maps::Providers;
use rustc::ty::subst::Substs;
use rustc::hir;
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
use syntax::abi::Abi;
use syntax::ast;
use syntax_pos::Span;
@ -44,6 +46,31 @@ pub fn build_mir_for_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
tcx.visit_all_bodies_in_krate(|body_owner_def_id, _body_id| {
tcx.item_mir(body_owner_def_id);
});
// Tuple struct/variant constructors don't have a BodyId, so we need
// to build them separately.
struct GatherCtors<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>
}
impl<'a, 'tcx> Visitor<'tcx> for GatherCtors<'a, 'tcx> {
fn visit_variant_data(&mut self,
v: &'tcx hir::VariantData,
_: ast::Name,
_: &'tcx hir::Generics,
_: ast::NodeId,
_: Span) {
if let hir::VariantData::Tuple(_, node_id) = *v {
self.tcx.item_mir(self.tcx.hir.local_def_id(node_id));
}
intravisit::walk_struct_def(self, v)
}
fn nested_visit_map<'b>(&'b mut self) -> NestedVisitorMap<'b, 'tcx> {
NestedVisitorMap::None
}
}
tcx.visit_all_item_likes_in_krate(DepNode::Mir, &mut GatherCtors {
tcx: tcx
}.as_deep_visitor());
}
}
@ -95,6 +122,10 @@ fn build_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId)
_ => hir::BodyId { node_id: expr.id }
}
}
hir::map::NodeVariant(variant) =>
return create_constructor_shim(tcx, id, &variant.node.data),
hir::map::NodeStructCtor(ctor) =>
return create_constructor_shim(tcx, id, ctor),
_ => unsupported()
};
@ -144,7 +175,7 @@ fn build_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId)
mem::transmute::<Mir, Mir<'tcx>>(mir)
};
pretty::dump_mir(tcx, "mir_map", &0, src, &mir);
mir_util::dump_mir(tcx, "mir_map", &0, src, &mir);
tcx.alloc_mir(mir)
})
@ -180,6 +211,38 @@ impl<'a, 'gcx: 'tcx, 'tcx> MutVisitor<'tcx> for GlobalizeMir<'a, 'gcx> {
}
}
fn create_constructor_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ctor_id: ast::NodeId,
v: &'tcx hir::VariantData)
-> &'tcx RefCell<Mir<'tcx>>
{
let span = tcx.hir.span(ctor_id);
if let hir::VariantData::Tuple(ref fields, ctor_id) = *v {
let pe = ty::ParameterEnvironment::for_item(tcx, ctor_id);
tcx.infer_ctxt(pe, Reveal::UserFacing).enter(|infcx| {
let (mut mir, src) =
shim::build_adt_ctor(&infcx, ctor_id, fields, span);
// Convert the Mir to global types.
let tcx = infcx.tcx.global_tcx();
let mut globalizer = GlobalizeMir {
tcx: tcx,
span: mir.span
};
globalizer.visit_mir(&mut mir);
let mir = unsafe {
mem::transmute::<Mir, Mir<'tcx>>(mir)
};
mir_util::dump_mir(tcx, "mir_map", &0, src, &mir);
tcx.alloc_mir(mir)
})
} else {
span_bug!(span, "attempting to create MIR for non-tuple variant {:?}", v);
}
}
///////////////////////////////////////////////////////////////////////////
// BuildMir -- walks a crate, looking for fn items and methods to build MIR from
@ -189,12 +252,9 @@ fn closure_self_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
-> Ty<'tcx> {
let closure_ty = tcx.body_tables(body_id).node_id_to_type(closure_expr_id);
// We're just hard-coding the idea that the signature will be
// &self or &mut self and hence will have a bound region with
// number 0, hokey.
let region = ty::Region::ReFree(ty::FreeRegion {
scope: tcx.region_maps.item_extent(body_id.node_id),
bound_region: ty::BoundRegion::BrAnon(0),
bound_region: ty::BoundRegion::BrEnv,
});
let region = tcx.mk_region(region);

489
src/librustc_mir/shim.rs Normal file
View File

@ -0,0 +1,489 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::infer;
use rustc::middle::region::ROOT_CODE_EXTENT;
use rustc::mir::*;
use rustc::mir::transform::MirSource;
use rustc::ty::{self, Ty};
use rustc::ty::subst::{Kind, Subst};
use rustc::ty::maps::Providers;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use syntax::abi::Abi;
use syntax::ast;
use syntax_pos::Span;
use std::cell::RefCell;
use std::fmt;
use std::iter;
use std::mem;
use transform::{add_call_guards, no_landing_pads, simplify};
use util::elaborate_drops::{self, DropElaborator, DropStyle, DropFlagMode};
use util::patch::MirPatch;
pub fn provide(providers: &mut Providers) {
providers.mir_shims = make_shim;
}
fn make_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
instance: ty::InstanceDef<'tcx>)
-> &'tcx RefCell<Mir<'tcx>>
{
debug!("make_shim({:?})", instance);
let did = instance.def_id();
let span = tcx.def_span(did);
let param_env =
tcx.construct_parameter_environment(span, did, ROOT_CODE_EXTENT);
let mut result = match instance {
ty::InstanceDef::Item(..) =>
bug!("item {:?} passed to make_shim", instance),
ty::InstanceDef::FnPtrShim(def_id, ty) => {
let trait_ = tcx.trait_of_item(def_id).unwrap();
let adjustment = match tcx.lang_items.fn_trait_kind(trait_) {
Some(ty::ClosureKind::FnOnce) => Adjustment::Identity,
Some(ty::ClosureKind::FnMut) |
Some(ty::ClosureKind::Fn) => Adjustment::Deref,
None => bug!("fn pointer {:?} is not an fn", ty)
};
// HACK: we need the "real" argument types for the MIR,
// but because our substs are (Self, Args), where Args
// is a tuple, we must include the *concrete* argument
// types in the MIR. They will be substituted again with
// the param-substs, but because they are concrete, this
// will not do any harm.
let sig = tcx.erase_late_bound_regions(&ty.fn_sig());
let arg_tys = sig.inputs();
build_call_shim(
tcx,
&param_env,
def_id,
adjustment,
CallKind::Indirect,
Some(arg_tys)
)
}
ty::InstanceDef::Virtual(def_id, _) => {
// We are translating a call back to our def-id, which
// trans::mir knows to turn to an actual virtual call.
build_call_shim(
tcx,
&param_env,
def_id,
Adjustment::Identity,
CallKind::Direct(def_id),
None
)
}
ty::InstanceDef::ClosureOnceShim { call_once } => {
let fn_mut = tcx.lang_items.fn_mut_trait().unwrap();
let call_mut = tcx.global_tcx()
.associated_items(fn_mut)
.find(|it| it.kind == ty::AssociatedKind::Method)
.unwrap().def_id;
build_call_shim(
tcx,
&param_env,
call_once,
Adjustment::RefMut,
CallKind::Direct(call_mut),
None
)
}
ty::InstanceDef::DropGlue(def_id, ty) => {
build_drop_shim(tcx, &param_env, def_id, ty)
}
ty::InstanceDef::Intrinsic(_) => {
bug!("creating shims from intrinsics ({:?}) is unsupported", instance)
}
};
debug!("make_shim({:?}) = untransformed {:?}", instance, result);
no_landing_pads::no_landing_pads(tcx, &mut result);
simplify::simplify_cfg(&mut result);
add_call_guards::add_call_guards(&mut result);
debug!("make_shim({:?}) = {:?}", instance, result);
let result = tcx.alloc_mir(result);
// Perma-borrow MIR from shims to prevent mutation.
mem::forget(result.borrow());
result
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum Adjustment {
Identity,
Deref,
RefMut,
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum CallKind {
Indirect,
Direct(DefId),
}
fn temp_decl(mutability: Mutability, ty: Ty) -> LocalDecl {
LocalDecl { mutability, ty, name: None, source_info: None }
}
fn local_decls_for_sig<'tcx>(sig: &ty::FnSig<'tcx>)
-> IndexVec<Local, LocalDecl<'tcx>>
{
iter::once(temp_decl(Mutability::Mut, sig.output()))
.chain(sig.inputs().iter().map(
|ity| temp_decl(Mutability::Not, ity)))
.collect()
}
fn build_drop_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
def_id: DefId,
ty: Option<Ty<'tcx>>)
-> Mir<'tcx>
{
debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
let substs = if let Some(ty) = ty {
tcx.mk_substs(iter::once(Kind::from(ty)))
} else {
param_env.free_substs
};
let fn_ty = tcx.item_type(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(&fn_ty.fn_sig());
let span = tcx.def_span(def_id);
let source_info = SourceInfo { span, scope: ARGUMENT_VISIBILITY_SCOPE };
let return_block = BasicBlock::new(1);
let mut blocks = IndexVec::new();
let block = |blocks: &mut IndexVec<_, _>, kind| {
blocks.push(BasicBlockData {
statements: vec![],
terminator: Some(Terminator { source_info, kind }),
is_cleanup: false
})
};
block(&mut blocks, TerminatorKind::Goto { target: return_block });
block(&mut blocks, TerminatorKind::Return);
let mut mir = Mir::new(
blocks,
IndexVec::from_elem_n(
VisibilityScopeData { span: span, parent_scope: None }, 1
),
IndexVec::new(),
sig.output(),
local_decls_for_sig(&sig),
sig.inputs().len(),
vec![],
span
);
if let Some(..) = ty {
let patch = {
let mut elaborator = DropShimElaborator {
mir: &mir,
patch: MirPatch::new(&mir),
tcx, param_env
};
let dropee = Lvalue::Projection(
box Projection {
base: Lvalue::Local(Local::new(1+0)),
elem: ProjectionElem::Deref
}
);
let resume_block = elaborator.patch.resume_block();
elaborate_drops::elaborate_drop(
&mut elaborator,
source_info,
false,
&dropee,
(),
return_block,
Some(resume_block),
START_BLOCK
);
elaborator.patch
};
patch.apply(&mut mir);
}
mir
}
pub struct DropShimElaborator<'a, 'tcx: 'a> {
mir: &'a Mir<'tcx>,
patch: MirPatch<'tcx>,
tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
param_env: &'a ty::ParameterEnvironment<'tcx>,
}
impl<'a, 'tcx> fmt::Debug for DropShimElaborator<'a, 'tcx> {
fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
type Path = ();
fn patch(&mut self) -> &mut MirPatch<'tcx> { &mut self.patch }
fn mir(&self) -> &'a Mir<'tcx> { self.mir }
fn tcx(&self) -> ty::TyCtxt<'a, 'tcx, 'tcx> { self.tcx }
fn param_env(&self) -> &'a ty::ParameterEnvironment<'tcx> { self.param_env }
fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle {
if let DropFlagMode::Shallow = mode {
DropStyle::Static
} else {
DropStyle::Open
}
}
fn get_drop_flag(&mut self, _path: Self::Path) -> Option<Operand<'tcx>> {
None
}
fn clear_drop_flag(&mut self, _location: Location, _path: Self::Path, _mode: DropFlagMode) {
}
fn field_subpath(&self, _path: Self::Path, _field: Field) -> Option<Self::Path> {
None
}
fn deref_subpath(&self, _path: Self::Path) -> Option<Self::Path> {
None
}
fn downcast_subpath(&self, _path: Self::Path, _variant: usize) -> Option<Self::Path> {
Some(())
}
}
/// Build a "call" shim for `def_id`. The shim calls the
/// function specified by `call_kind`, first adjusting its first
/// argument according to `rcvr_adjustment`.
///
/// If `untuple_args` is a vec of types, the second argument of the
/// function will be untupled as these types.
fn build_call_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
def_id: DefId,
rcvr_adjustment: Adjustment,
call_kind: CallKind,
untuple_args: Option<&[Ty<'tcx>]>)
-> Mir<'tcx>
{
debug!("build_call_shim(def_id={:?}, rcvr_adjustment={:?}, \
call_kind={:?}, untuple_args={:?})",
def_id, rcvr_adjustment, call_kind, untuple_args);
let fn_ty = tcx.item_type(def_id).subst(tcx, param_env.free_substs);
let sig = tcx.erase_late_bound_regions(&fn_ty.fn_sig());
let span = tcx.def_span(def_id);
debug!("build_call_shim: sig={:?}", sig);
let mut local_decls = local_decls_for_sig(&sig);
let source_info = SourceInfo { span, scope: ARGUMENT_VISIBILITY_SCOPE };
let rcvr_arg = Local::new(1+0);
let rcvr_l = Lvalue::Local(rcvr_arg);
let mut statements = vec![];
let rcvr = match rcvr_adjustment {
Adjustment::Identity => Operand::Consume(rcvr_l),
Adjustment::Deref => Operand::Consume(Lvalue::Projection(
box Projection { base: rcvr_l, elem: ProjectionElem::Deref }
)),
Adjustment::RefMut => {
// let rcvr = &mut rcvr;
let re_erased = tcx.mk_region(ty::ReErased);
let ref_rcvr = local_decls.push(temp_decl(
Mutability::Not,
tcx.mk_ref(re_erased, ty::TypeAndMut {
ty: sig.inputs()[0],
mutbl: hir::Mutability::MutMutable
})
));
statements.push(Statement {
source_info: source_info,
kind: StatementKind::Assign(
Lvalue::Local(ref_rcvr),
Rvalue::Ref(re_erased, BorrowKind::Mut, rcvr_l)
)
});
Operand::Consume(Lvalue::Local(ref_rcvr))
}
};
let (callee, mut args) = match call_kind {
CallKind::Indirect => (rcvr, vec![]),
CallKind::Direct(def_id) => (
Operand::Constant(Constant {
span: span,
ty: tcx.item_type(def_id).subst(tcx, param_env.free_substs),
literal: Literal::Item { def_id, substs: param_env.free_substs },
}),
vec![rcvr]
)
};
if let Some(untuple_args) = untuple_args {
args.extend(untuple_args.iter().enumerate().map(|(i, ity)| {
let arg_lv = Lvalue::Local(Local::new(1+1));
Operand::Consume(Lvalue::Projection(box Projection {
base: arg_lv,
elem: ProjectionElem::Field(Field::new(i), *ity)
}))
}));
} else {
args.extend((1..sig.inputs().len()).map(|i| {
Operand::Consume(Lvalue::Local(Local::new(1+i)))
}));
}
let mut blocks = IndexVec::new();
let block = |blocks: &mut IndexVec<_, _>, statements, kind, is_cleanup| {
blocks.push(BasicBlockData {
statements,
terminator: Some(Terminator { source_info, kind }),
is_cleanup
})
};
// BB #0
block(&mut blocks, statements, TerminatorKind::Call {
func: callee,
args: args,
destination: Some((Lvalue::Local(RETURN_POINTER),
BasicBlock::new(1))),
cleanup: if let Adjustment::RefMut = rcvr_adjustment {
Some(BasicBlock::new(3))
} else {
None
}
}, false);
if let Adjustment::RefMut = rcvr_adjustment {
// BB #1 - drop for Self
block(&mut blocks, vec![], TerminatorKind::Drop {
location: Lvalue::Local(rcvr_arg),
target: BasicBlock::new(2),
unwind: None
}, false);
}
// BB #1/#2 - return
block(&mut blocks, vec![], TerminatorKind::Return, false);
if let Adjustment::RefMut = rcvr_adjustment {
// BB #3 - drop if closure panics
block(&mut blocks, vec![], TerminatorKind::Drop {
location: Lvalue::Local(rcvr_arg),
target: BasicBlock::new(4),
unwind: None
}, true);
// BB #4 - resume
block(&mut blocks, vec![], TerminatorKind::Resume, true);
}
let mut mir = Mir::new(
blocks,
IndexVec::from_elem_n(
VisibilityScopeData { span: span, parent_scope: None }, 1
),
IndexVec::new(),
sig.output(),
local_decls,
sig.inputs().len(),
vec![],
span
);
if let Abi::RustCall = sig.abi {
mir.spread_arg = Some(Local::new(sig.inputs().len()));
}
mir
}
pub fn build_adt_ctor<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>,
ctor_id: ast::NodeId,
fields: &[hir::StructField],
span: Span)
-> (Mir<'tcx>, MirSource)
{
let tcx = infcx.tcx;
let def_id = tcx.hir.local_def_id(ctor_id);
let sig = match tcx.item_type(def_id).sty {
ty::TyFnDef(_, _, fty) => tcx.no_late_bound_regions(&fty)
.expect("LBR in ADT constructor signature"),
_ => bug!("unexpected type for ctor {:?}", def_id)
};
let sig = tcx.erase_regions(&sig);
let (adt_def, substs) = match sig.output().sty {
ty::TyAdt(adt_def, substs) => (adt_def, substs),
_ => bug!("unexpected type for ADT ctor {:?}", sig.output())
};
debug!("build_ctor: def_id={:?} sig={:?} fields={:?}", def_id, sig, fields);
let local_decls = local_decls_for_sig(&sig);
let source_info = SourceInfo {
span: span,
scope: ARGUMENT_VISIBILITY_SCOPE
};
let variant_no = if adt_def.is_enum() {
adt_def.variant_index_with_id(def_id)
} else {
0
};
// return = ADT(arg0, arg1, ...); return
let start_block = BasicBlockData {
statements: vec![Statement {
source_info: source_info,
kind: StatementKind::Assign(
Lvalue::Local(RETURN_POINTER),
Rvalue::Aggregate(
AggregateKind::Adt(adt_def, variant_no, substs, None),
(1..sig.inputs().len()+1).map(|i| {
Operand::Consume(Lvalue::Local(Local::new(i)))
}).collect()
)
)
}],
terminator: Some(Terminator {
source_info: source_info,
kind: TerminatorKind::Return,
}),
is_cleanup: false
};
let mir = Mir::new(
IndexVec::from_elem_n(start_block, 1),
IndexVec::from_elem_n(
VisibilityScopeData { span: span, parent_scope: None }, 1
),
IndexVec::new(),
sig.output(),
local_decls,
sig.inputs().len(),
vec![],
span
);
(mir, MirSource::Fn(ctor_id))
}

View File

@ -37,46 +37,50 @@ pub struct AddCallGuards;
impl<'tcx> MirPass<'tcx> for AddCallGuards {
fn run_pass<'a>(&mut self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, _src: MirSource, mir: &mut Mir<'tcx>) {
let pred_count: IndexVec<_, _> =
mir.predecessors().iter().map(|ps| ps.len()).collect();
// We need a place to store the new blocks generated
let mut new_blocks = Vec::new();
let cur_len = mir.basic_blocks().len();
for block in mir.basic_blocks_mut() {
match block.terminator {
Some(Terminator {
kind: TerminatorKind::Call {
destination: Some((_, ref mut destination)),
cleanup: Some(_),
..
}, source_info
}) if pred_count[*destination] > 1 => {
// It's a critical edge, break it
let call_guard = BasicBlockData {
statements: vec![],
is_cleanup: block.is_cleanup,
terminator: Some(Terminator {
source_info: source_info,
kind: TerminatorKind::Goto { target: *destination }
})
};
// Get the index it will be when inserted into the MIR
let idx = cur_len + new_blocks.len();
new_blocks.push(call_guard);
*destination = BasicBlock::new(idx);
}
_ => {}
}
}
debug!("Broke {} N edges", new_blocks.len());
mir.basic_blocks_mut().extend(new_blocks);
add_call_guards(mir);
}
}
pub fn add_call_guards(mir: &mut Mir) {
let pred_count: IndexVec<_, _> =
mir.predecessors().iter().map(|ps| ps.len()).collect();
// We need a place to store the new blocks generated
let mut new_blocks = Vec::new();
let cur_len = mir.basic_blocks().len();
for block in mir.basic_blocks_mut() {
match block.terminator {
Some(Terminator {
kind: TerminatorKind::Call {
destination: Some((_, ref mut destination)),
cleanup: Some(_),
..
}, source_info
}) if pred_count[*destination] > 1 => {
// It's a critical edge, break it
let call_guard = BasicBlockData {
statements: vec![],
is_cleanup: block.is_cleanup,
terminator: Some(Terminator {
source_info: source_info,
kind: TerminatorKind::Goto { target: *destination }
})
};
// Get the index it will be when inserted into the MIR
let idx = cur_len + new_blocks.len();
new_blocks.push(call_guard);
*destination = BasicBlock::new(idx);
}
_ => {}
}
}
debug!("Broke {} N edges", new_blocks.len());
mir.basic_blocks_mut().extend(new_blocks);
}
impl Pass for AddCallGuards {}

View File

@ -29,11 +29,11 @@
//! (non-mutating) use of `SRC`. These restrictions are conservative and may be relaxed in the
//! future.
use def_use::DefUseAnalysis;
use rustc::mir::{Constant, Local, LocalKind, Location, Lvalue, Mir, Operand, Rvalue, StatementKind};
use rustc::mir::transform::{MirPass, MirSource, Pass};
use rustc::mir::visit::MutVisitor;
use rustc::ty::TyCtxt;
use util::def_use::DefUseAnalysis;
use transform::qualify_consts;
pub struct CopyPropagation;

View File

@ -15,7 +15,7 @@ use std::fmt;
use rustc::ty::TyCtxt;
use rustc::mir::*;
use rustc::mir::transform::{Pass, MirPass, MirPassHook, MirSource};
use pretty;
use util as mir_util;
pub struct Marker<'a>(pub &'a str);
@ -56,7 +56,7 @@ impl<'tcx> MirPassHook<'tcx> for DumpMir {
pass: &Pass,
is_after: bool)
{
pretty::dump_mir(
mir_util::dump_mir(
tcx,
&*pass.name(),
&Disambiguator {

View File

@ -42,12 +42,16 @@ impl<'tcx> MutVisitor<'tcx> for NoLandingPads {
}
}
pub fn no_landing_pads<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &mut Mir<'tcx>) {
if tcx.sess.no_landing_pads() {
NoLandingPads.visit_mir(mir);
}
}
impl<'tcx> MirPass<'tcx> for NoLandingPads {
fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
_: MirSource, mir: &mut Mir<'tcx>) {
if tcx.sess.no_landing_pads() {
self.visit_mir(mir);
}
no_landing_pads(tcx, mir)
}
}

View File

@ -53,14 +53,18 @@ impl<'a> SimplifyCfg<'a> {
}
}
pub fn simplify_cfg(mir: &mut Mir) {
CfgSimplifier::new(mir).simplify();
remove_dead_blocks(mir);
// FIXME: Should probably be moved into some kind of pass manager
mir.basic_blocks_mut().raw.shrink_to_fit();
}
impl<'l, 'tcx> MirPass<'tcx> for SimplifyCfg<'l> {
fn run_pass<'a>(&mut self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, _src: MirSource, mir: &mut Mir<'tcx>) {
debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, mir);
CfgSimplifier::new(mir).simplify();
remove_dead_blocks(mir);
// FIXME: Should probably be moved into some kind of pass manager
mir.basic_blocks_mut().raw.shrink_to_fit();
simplify_cfg(mir);
}
}

View File

@ -0,0 +1,696 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt;
use rustc::hir;
use rustc::mir::*;
use rustc::middle::const_val::ConstInt;
use rustc::middle::lang_items;
use rustc::ty::{self, Ty};
use rustc::ty::subst::{Kind, Substs};
use rustc::ty::util::IntTypeExt;
use rustc_data_structures::indexed_vec::Idx;
use util::patch::MirPatch;
use std::iter;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum DropFlagState {
Present, // i.e. initialized
Absent, // i.e. deinitialized or "moved"
}
impl DropFlagState {
pub fn value(self) -> bool {
match self {
DropFlagState::Present => true,
DropFlagState::Absent => false
}
}
}
#[derive(Debug)]
pub enum DropStyle {
Dead,
Static,
Conditional,
Open,
}
#[derive(Debug)]
pub enum DropFlagMode {
Shallow,
Deep
}
pub trait DropElaborator<'a, 'tcx: 'a> : fmt::Debug {
type Path : Copy + fmt::Debug;
fn patch(&mut self) -> &mut MirPatch<'tcx>;
fn mir(&self) -> &'a Mir<'tcx>;
fn tcx(&self) -> ty::TyCtxt<'a, 'tcx, 'tcx>;
fn param_env(&self) -> &'a ty::ParameterEnvironment<'tcx>;
fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle;
fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>>;
fn clear_drop_flag(&mut self, location: Location, path: Self::Path, mode: DropFlagMode);
fn field_subpath(&self, path: Self::Path, field: Field) -> Option<Self::Path>;
fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path>;
fn downcast_subpath(&self, path: Self::Path, variant: usize) -> Option<Self::Path>;
}
#[derive(Debug)]
struct DropCtxt<'l, 'b: 'l, 'tcx: 'b, D>
where D : DropElaborator<'b, 'tcx> + 'l
{
elaborator: &'l mut D,
source_info: SourceInfo,
is_cleanup: bool,
lvalue: &'l Lvalue<'tcx>,
path: D::Path,
succ: BasicBlock,
unwind: Option<BasicBlock>,
}
pub fn elaborate_drop<'b, 'tcx, D>(
elaborator: &mut D,
source_info: SourceInfo,
is_cleanup: bool,
lvalue: &Lvalue<'tcx>,
path: D::Path,
succ: BasicBlock,
unwind: Option<BasicBlock>,
bb: BasicBlock)
where D: DropElaborator<'b, 'tcx>
{
assert_eq!(unwind.is_none(), is_cleanup);
DropCtxt {
elaborator, source_info, is_cleanup, lvalue, path, succ, unwind
}.elaborate_drop(bb)
}
impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
where D: DropElaborator<'b, 'tcx>
{
fn lvalue_ty(&self, lvalue: &Lvalue<'tcx>) -> Ty<'tcx> {
lvalue.ty(self.elaborator.mir(), self.tcx()).to_ty(self.tcx())
}
fn tcx(&self) -> ty::TyCtxt<'b, 'tcx, 'tcx> {
self.elaborator.tcx()
}
/// This elaborates a single drop instruction, located at `bb`, and
/// patches over it.
///
/// The elaborated drop checks the drop flags to only drop what
/// is initialized.
///
/// In addition, the relevant drop flags also need to be cleared
/// to avoid double-drops. However, in the middle of a complex
/// drop, one must avoid clearing some of the flags before they
/// are read, as that would cause a memory leak.
///
/// In particular, when dropping an ADT, multiple fields may be
/// joined together under the `rest` subpath. They are all controlled
/// by the primary drop flag, but only the last rest-field dropped
/// should clear it (and it must also not clear anything else).
///
/// FIXME: I think we should just control the flags externally
/// and then we do not need this machinery.
pub fn elaborate_drop<'a>(&mut self, bb: BasicBlock) {
debug!("elaborate_drop({:?})", self);
let style = self.elaborator.drop_style(self.path, DropFlagMode::Deep);
debug!("elaborate_drop({:?}): live - {:?}", self, style);
match style {
DropStyle::Dead => {
self.elaborator.patch().patch_terminator(bb, TerminatorKind::Goto {
target: self.succ
});
}
DropStyle::Static => {
let loc = self.terminator_loc(bb);
self.elaborator.clear_drop_flag(loc, self.path, DropFlagMode::Deep);
self.elaborator.patch().patch_terminator(bb, TerminatorKind::Drop {
location: self.lvalue.clone(),
target: self.succ,
unwind: self.unwind
});
}
DropStyle::Conditional => {
let is_cleanup = self.is_cleanup; // FIXME(#6393)
let succ = self.succ;
let drop_bb = self.complete_drop(
is_cleanup, Some(DropFlagMode::Deep), succ);
self.elaborator.patch().patch_terminator(bb, TerminatorKind::Goto {
target: drop_bb
});
}
DropStyle::Open => {
let drop_bb = self.open_drop();
self.elaborator.patch().patch_terminator(bb, TerminatorKind::Goto {
target: drop_bb
});
}
}
}
/// Return the lvalue and move path for each field of `variant`,
/// (the move path is `None` if the field is a rest field).
fn move_paths_for_fields(&self,
base_lv: &Lvalue<'tcx>,
variant_path: D::Path,
variant: &'tcx ty::VariantDef,
substs: &'tcx Substs<'tcx>)
-> Vec<(Lvalue<'tcx>, Option<D::Path>)>
{
variant.fields.iter().enumerate().map(|(i, f)| {
let field = Field::new(i);
let subpath = self.elaborator.field_subpath(variant_path, field);
let field_ty =
self.tcx().normalize_associated_type_in_env(
&f.ty(self.tcx(), substs),
self.elaborator.param_env()
);
(base_lv.clone().field(field, field_ty), subpath)
}).collect()
}
fn drop_subpath(&mut self,
is_cleanup: bool,
lvalue: &Lvalue<'tcx>,
path: Option<D::Path>,
succ: BasicBlock,
unwind: Option<BasicBlock>)
-> BasicBlock
{
if let Some(path) = path {
debug!("drop_subpath: for std field {:?}", lvalue);
DropCtxt {
elaborator: self.elaborator,
source_info: self.source_info,
path, lvalue, succ, unwind, is_cleanup
}.elaborated_drop_block()
} else {
debug!("drop_subpath: for rest field {:?}", lvalue);
DropCtxt {
elaborator: self.elaborator,
source_info: self.source_info,
lvalue, succ, unwind, is_cleanup,
// Using `self.path` here to condition the drop on
// our own drop flag.
path: self.path
}.complete_drop(is_cleanup, None, succ)
}
}
/// Create one-half of the drop ladder for a list of fields, and return
/// the list of steps in it in reverse order.
///
/// `unwind_ladder` is such a list of steps in reverse order,
/// which is called instead of the next step if the drop unwinds
/// (the first field is never reached). If it is `None`, all
/// unwind targets are left blank.
fn drop_halfladder<'a>(&mut self,
unwind_ladder: Option<&[BasicBlock]>,
succ: BasicBlock,
fields: &[(Lvalue<'tcx>, Option<D::Path>)],
is_cleanup: bool)
-> Vec<BasicBlock>
{
let mut unwind_succ = if is_cleanup {
None
} else {
self.unwind
};
let goto = TerminatorKind::Goto { target: succ };
let mut succ = self.new_block(is_cleanup, goto);
// Always clear the "master" drop flag at the bottom of the
// ladder. This is needed because the "master" drop flag
// protects the ADT's discriminant, which is invalidated
// after the ADT is dropped.
let succ_loc = Location { block: succ, statement_index: 0 };
self.elaborator.clear_drop_flag(succ_loc, self.path, DropFlagMode::Shallow);
fields.iter().rev().enumerate().map(|(i, &(ref lv, path))| {
succ = self.drop_subpath(is_cleanup, lv, path, succ, unwind_succ);
unwind_succ = unwind_ladder.as_ref().map(|p| p[i]);
succ
}).collect()
}
/// Create a full drop ladder, consisting of 2 connected half-drop-ladders
///
/// For example, with 3 fields, the drop ladder is
///
/// .d0:
/// ELAB(drop location.0 [target=.d1, unwind=.c1])
/// .d1:
/// ELAB(drop location.1 [target=.d2, unwind=.c2])
/// .d2:
/// ELAB(drop location.2 [target=`self.succ`, unwind=`self.unwind`])
/// .c1:
/// ELAB(drop location.1 [target=.c2])
/// .c2:
/// ELAB(drop location.2 [target=`self.unwind`])
fn drop_ladder<'a>(&mut self,
fields: Vec<(Lvalue<'tcx>, Option<D::Path>)>)
-> (BasicBlock, Option<BasicBlock>)
{
debug!("drop_ladder({:?}, {:?})", self, fields);
let mut fields = fields;
fields.retain(|&(ref lvalue, _)| {
self.tcx().type_needs_drop_given_env(
self.lvalue_ty(lvalue), self.elaborator.param_env())
});
debug!("drop_ladder - fields needing drop: {:?}", fields);
let unwind_ladder = if self.is_cleanup {
None
} else {
let unwind = self.unwind.unwrap(); // FIXME(#6393)
Some(self.drop_halfladder(None, unwind, &fields, true))
};
let succ = self.succ; // FIXME(#6393)
let is_cleanup = self.is_cleanup;
let normal_ladder =
self.drop_halfladder(unwind_ladder.as_ref().map(|x| &**x),
succ, &fields, is_cleanup);
(normal_ladder.last().cloned().unwrap_or(succ),
unwind_ladder.and_then(|l| l.last().cloned()).or(self.unwind))
}
fn open_drop_for_tuple<'a>(&mut self, tys: &[Ty<'tcx>])
-> BasicBlock
{
debug!("open_drop_for_tuple({:?}, {:?})", self, tys);
let fields = tys.iter().enumerate().map(|(i, &ty)| {
(self.lvalue.clone().field(Field::new(i), ty),
self.elaborator.field_subpath(self.path, Field::new(i)))
}).collect();
self.drop_ladder(fields).0
}
fn open_drop_for_box<'a>(&mut self, ty: Ty<'tcx>) -> BasicBlock
{
debug!("open_drop_for_box({:?}, {:?})", self, ty);
let interior = self.lvalue.clone().deref();
let interior_path = self.elaborator.deref_subpath(self.path);
let succ = self.succ; // FIXME(#6393)
let is_cleanup = self.is_cleanup;
let succ = self.box_free_block(ty, succ, is_cleanup);
let unwind_succ = self.unwind.map(|u| {
self.box_free_block(ty, u, true)
});
self.drop_subpath(is_cleanup, &interior, interior_path, succ, unwind_succ)
}
fn open_drop_for_adt<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>)
-> BasicBlock {
debug!("open_drop_for_adt({:?}, {:?}, {:?})", self, adt, substs);
if adt.variants.len() == 0 {
return self.elaborator.patch().new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::Unreachable
}),
is_cleanup: self.is_cleanup
});
}
let contents_drop = if adt.is_union() {
(self.succ, self.unwind)
} else {
self.open_drop_for_adt_contents(adt, substs)
};
if adt.has_dtor(self.tcx()) {
self.destructor_call_block(contents_drop)
} else {
contents_drop.0
}
}
fn open_drop_for_adt_contents<'a>(&mut self, adt: &'tcx ty::AdtDef,
substs: &'tcx Substs<'tcx>)
-> (BasicBlock, Option<BasicBlock>) {
match adt.variants.len() {
1 => {
let fields = self.move_paths_for_fields(
self.lvalue,
self.path,
&adt.variants[0],
substs
);
self.drop_ladder(fields)
}
_ => {
let is_cleanup = self.is_cleanup;
let succ = self.succ;
let unwind = self.unwind; // FIXME(#6393)
let mut values = Vec::with_capacity(adt.variants.len());
let mut normal_blocks = Vec::with_capacity(adt.variants.len());
let mut unwind_blocks = if is_cleanup {
None
} else {
Some(Vec::with_capacity(adt.variants.len()))
};
let mut otherwise = None;
let mut unwind_otherwise = None;
for (variant_index, discr) in adt.discriminants(self.tcx()).enumerate() {
let subpath = self.elaborator.downcast_subpath(
self.path, variant_index);
if let Some(variant_path) = subpath {
let base_lv = self.lvalue.clone().elem(
ProjectionElem::Downcast(adt, variant_index)
);
let fields = self.move_paths_for_fields(
&base_lv,
variant_path,
&adt.variants[variant_index],
substs);
values.push(discr);
if let Some(ref mut unwind_blocks) = unwind_blocks {
// We can't use the half-ladder from the original
// drop ladder, because this breaks the
// "funclet can't have 2 successor funclets"
// requirement from MSVC:
//
// switch unwind-switch
// / \ / \
// v1.0 v2.0 v2.0-unwind v1.0-unwind
// | | / |
// v1.1-unwind v2.1-unwind |
// ^ |
// \-------------------------------/
//
// Create a duplicate half-ladder to avoid that. We
// could technically only do this on MSVC, but I
// I want to minimize the divergence between MSVC
// and non-MSVC.
let unwind = unwind.unwrap();
let halfladder = self.drop_halfladder(
None, unwind, &fields, true);
unwind_blocks.push(
halfladder.last().cloned().unwrap_or(unwind)
);
}
let (normal, _) = self.drop_ladder(fields);
normal_blocks.push(normal);
} else {
// variant not found - drop the entire enum
if let None = otherwise {
otherwise = Some(self.complete_drop(
is_cleanup,
Some(DropFlagMode::Shallow),
succ));
unwind_otherwise = unwind.map(|unwind| self.complete_drop(
true,
Some(DropFlagMode::Shallow),
unwind
));
}
}
}
if let Some(block) = otherwise {
normal_blocks.push(block);
if let Some(ref mut unwind_blocks) = unwind_blocks {
unwind_blocks.push(unwind_otherwise.unwrap());
}
} else {
values.pop();
}
(self.adt_switch_block(is_cleanup, adt, normal_blocks, &values, succ),
unwind_blocks.map(|unwind_blocks| {
self.adt_switch_block(
is_cleanup, adt, unwind_blocks, &values, unwind.unwrap()
)
}))
}
}
}
fn adt_switch_block(&mut self,
is_cleanup: bool,
adt: &'tcx ty::AdtDef,
blocks: Vec<BasicBlock>,
values: &[ConstInt],
succ: BasicBlock)
-> BasicBlock {
// If there are multiple variants, then if something
// is present within the enum the discriminant, tracked
// by the rest path, must be initialized.
//
// Additionally, we do not want to switch on the
// discriminant after it is free-ed, because that
// way lies only trouble.
let discr_ty = adt.repr.discr_type().to_ty(self.tcx());
let discr = Lvalue::Local(self.new_temp(discr_ty));
let discr_rv = Rvalue::Discriminant(self.lvalue.clone());
let switch_block = self.elaborator.patch().new_block(BasicBlockData {
statements: vec![
Statement {
source_info: self.source_info,
kind: StatementKind::Assign(discr.clone(), discr_rv),
}
],
terminator: Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::SwitchInt {
discr: Operand::Consume(discr),
switch_ty: discr_ty,
values: From::from(values.to_owned()),
targets: blocks,
}
}),
is_cleanup: is_cleanup,
});
self.drop_flag_test_block(is_cleanup, switch_block, succ)
}
fn destructor_call_block<'a>(&mut self, (succ, unwind): (BasicBlock, Option<BasicBlock>))
-> BasicBlock
{
debug!("destructor_call_block({:?}, {:?})", self, succ);
let tcx = self.tcx();
let drop_trait = tcx.lang_items.drop_trait().unwrap();
let drop_fn = tcx.associated_items(drop_trait).next().unwrap();
let ty = self.lvalue_ty(self.lvalue);
let substs = tcx.mk_substs(iter::once(Kind::from(ty)));
let re_erased = tcx.mk_region(ty::ReErased);
let ref_ty = tcx.mk_ref(re_erased, ty::TypeAndMut {
ty: ty,
mutbl: hir::Mutability::MutMutable
});
let ref_lvalue = self.new_temp(ref_ty);
let unit_temp = Lvalue::Local(self.new_temp(tcx.mk_nil()));
self.elaborator.patch().new_block(BasicBlockData {
statements: vec![Statement {
source_info: self.source_info,
kind: StatementKind::Assign(
Lvalue::Local(ref_lvalue),
Rvalue::Ref(re_erased, BorrowKind::Mut, self.lvalue.clone())
)
}],
terminator: Some(Terminator {
kind: TerminatorKind::Call {
func: Operand::item(tcx, drop_fn.def_id, substs,
self.source_info.span),
args: vec![Operand::Consume(Lvalue::Local(ref_lvalue))],
destination: Some((unit_temp, succ)),
cleanup: unwind,
},
source_info: self.source_info
}),
is_cleanup: self.is_cleanup,
})
}
/// The slow-path - create an "open", elaborated drop for a type
/// which is moved-out-of only partially, and patch `bb` to a jump
/// to it. This must not be called on ADTs with a destructor,
/// as these can't be moved-out-of, except for `Box<T>`, which is
/// special-cased.
///
/// This creates a "drop ladder" that drops the needed fields of the
/// ADT, both in the success case or if one of the destructors fail.
fn open_drop<'a>(&mut self) -> BasicBlock {
let ty = self.lvalue_ty(self.lvalue);
let is_cleanup = self.is_cleanup; // FIXME(#6393)
let succ = self.succ;
match ty.sty {
ty::TyClosure(def_id, substs) => {
let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx()).collect();
self.open_drop_for_tuple(&tys)
}
ty::TyTuple(tys, _) => {
self.open_drop_for_tuple(tys)
}
ty::TyAdt(def, _) if def.is_box() => {
self.open_drop_for_box(ty.boxed_ty())
}
ty::TyAdt(def, substs) => {
self.open_drop_for_adt(def, substs)
}
ty::TyDynamic(..) => {
self.complete_drop(is_cleanup, Some(DropFlagMode::Deep), succ)
}
ty::TyArray(..) | ty::TySlice(..) => {
// FIXME(#34708): handle partially-dropped
// array/slice elements.
self.complete_drop(is_cleanup, Some(DropFlagMode::Deep), succ)
}
_ => bug!("open drop from non-ADT `{:?}`", ty)
}
}
/// Return a basic block that drop an lvalue using the context
/// and path in `c`. If `mode` is something, also clear `c`
/// according to it.
///
/// if FLAG(self.path)
/// if let Some(mode) = mode: FLAG(self.path)[mode] = false
/// drop(self.lv)
fn complete_drop<'a>(&mut self,
is_cleanup: bool,
drop_mode: Option<DropFlagMode>,
succ: BasicBlock) -> BasicBlock
{
debug!("complete_drop({:?},{:?})", self, drop_mode);
let drop_block = self.drop_block(is_cleanup, succ);
if let Some(mode) = drop_mode {
let block_start = Location { block: drop_block, statement_index: 0 };
self.elaborator.clear_drop_flag(block_start, self.path, mode);
}
self.drop_flag_test_block(is_cleanup, drop_block, succ)
}
fn elaborated_drop_block<'a>(&mut self) -> BasicBlock {
debug!("elaborated_drop_block({:?})", self);
let is_cleanup = self.is_cleanup; // FIXME(#6393)
let succ = self.succ;
let blk = self.drop_block(is_cleanup, succ);
self.elaborate_drop(blk);
blk
}
fn box_free_block<'a>(
&mut self,
ty: Ty<'tcx>,
target: BasicBlock,
is_cleanup: bool
) -> BasicBlock {
let block = self.unelaborated_free_block(ty, target, is_cleanup);
self.drop_flag_test_block(is_cleanup, block, target)
}
fn unelaborated_free_block<'a>(
&mut self,
ty: Ty<'tcx>,
target: BasicBlock,
is_cleanup: bool
) -> BasicBlock {
let tcx = self.tcx();
let unit_temp = Lvalue::Local(self.new_temp(tcx.mk_nil()));
let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem);
let substs = tcx.mk_substs(iter::once(Kind::from(ty)));
let call = TerminatorKind::Call {
func: Operand::item(tcx, free_func, substs, self.source_info.span),
args: vec![Operand::Consume(self.lvalue.clone())],
destination: Some((unit_temp, target)),
cleanup: None
}; // FIXME(#6393)
let free_block = self.new_block(is_cleanup, call);
let block_start = Location { block: free_block, statement_index: 0 };
self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
free_block
}
fn drop_block<'a>(&mut self, is_cleanup: bool, succ: BasicBlock) -> BasicBlock {
let block = TerminatorKind::Drop {
location: self.lvalue.clone(),
target: succ,
unwind: if is_cleanup { None } else { self.unwind }
};
self.new_block(is_cleanup, block)
}
fn drop_flag_test_block(&mut self,
is_cleanup: bool,
on_set: BasicBlock,
on_unset: BasicBlock)
-> BasicBlock
{
let style = self.elaborator.drop_style(self.path, DropFlagMode::Shallow);
debug!("drop_flag_test_block({:?},{:?},{:?}) - {:?}",
self, is_cleanup, on_set, style);
match style {
DropStyle::Dead => on_unset,
DropStyle::Static => on_set,
DropStyle::Conditional | DropStyle::Open => {
let flag = self.elaborator.get_drop_flag(self.path).unwrap();
let term = TerminatorKind::if_(self.tcx(), flag, on_set, on_unset);
self.new_block(is_cleanup, term)
}
}
}
fn new_block<'a>(&mut self,
is_cleanup: bool,
k: TerminatorKind<'tcx>)
-> BasicBlock
{
self.elaborator.patch().new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
source_info: self.source_info, kind: k
}),
is_cleanup: is_cleanup
})
}
fn new_temp(&mut self, ty: Ty<'tcx>) -> Local {
self.elaborator.patch().new_temp(ty)
}
fn terminator_loc(&mut self, bb: BasicBlock) -> Location {
let mir = self.elaborator.mir();
self.elaborator.patch().terminator_loc(mir, bb)
}
}

View File

@ -0,0 +1,20 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod elaborate_drops;
pub mod def_use;
pub mod patch;
mod graphviz;
mod pretty;
pub use self::pretty::{dump_mir, write_mir_pretty};
pub use self::graphviz::{write_mir_graphviz};
pub use self::graphviz::write_node_label as write_graphviz_node_label;

View File

@ -59,6 +59,7 @@ enum ArgKind {
pub use self::attr_impl::ArgAttribute;
#[allow(non_upper_case_globals)]
#[allow(unused)]
mod attr_impl {
// The subset of llvm::Attribute needed for arguments, packed into a bitfield.
bitflags! {
@ -223,16 +224,6 @@ impl ArgType {
self.kind == ArgKind::Ignore
}
/// Get the LLVM type for an lvalue of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
pub fn memory_ty(&self, ccx: &CrateContext) -> Type {
if self.original_ty == Type::i1(ccx) {
Type::i8(ccx)
} else {
self.original_ty
}
}
/// Store a direct/indirect value described by this ArgType into a
/// lvalue for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
@ -334,9 +325,19 @@ impl FnType {
fn_ty
}
pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
pub fn new_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType {
let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
// Don't pass the vtable, it's not an argument of the virtual fn.
fn_ty.args[1].ignore();
fn_ty.adjust_for_abi(ccx, sig);
fn_ty
}
fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType {
use self::Abi::*;
let cconv = match ccx.sess().target.target.adjust_abi(sig.abi) {
RustIntrinsic | PlatformIntrinsic |
@ -532,9 +533,9 @@ impl FnType {
}
}
pub fn adjust_for_abi<'a, 'tcx>(&mut self,
ccx: &CrateContext<'a, 'tcx>,
sig: ty::FnSig<'tcx>) {
fn adjust_for_abi<'a, 'tcx>(&mut self,
ccx: &CrateContext<'a, 'tcx>,
sig: ty::FnSig<'tcx>) {
let abi = sig.abi;
if abi == Abi::Unadjusted { return }

View File

@ -363,28 +363,6 @@ fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef,
}
}
/// Yield information about how to dispatch a case of the
/// discriminant-like value returned by `trans_switch`.
///
/// This should ideally be less tightly tied to `_match`.
pub fn trans_case<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef {
let l = bcx.ccx.layout_of(t);
match *l {
layout::CEnum { discr, .. }
| layout::General { discr, .. }=> {
C_integral(Type::from_integer(bcx.ccx, discr), value.0, true)
}
layout::RawNullablePointer { .. } |
layout::StructWrappedNullablePointer { .. } => {
assert!(value == Disr(0) || value == Disr(1));
C_bool(bcx.ccx, value != Disr(0))
}
_ => {
bug!("{} does not have a discriminant. Represented as {:#?}", t, l);
}
}
}
/// Set the discriminant for a new value of the given case of the given
/// representation.
pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr) {

View File

@ -17,6 +17,7 @@ pub use syntax::attr::InlineAttr;
use syntax::ast;
use context::CrateContext;
/// Mark LLVM function to use provided inline heuristic.
#[inline]
pub fn inline(val: ValueRef, inline: InlineAttr) {

View File

@ -11,6 +11,7 @@
use context::SharedCrateContext;
use monomorphize::Instance;
use symbol_map::SymbolMap;
use back::symbol_names::symbol_name;
use util::nodemap::FxHashMap;
use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE};
use rustc::session::config;
@ -106,7 +107,7 @@ impl ExportedSymbols {
.exported_symbols(cnum)
.iter()
.map(|&def_id| {
let name = Instance::mono(scx, def_id).symbol_name(scx);
let name = symbol_name(Instance::mono(scx.tcx(), def_id), scx);
let export_level = if special_runtime_crate {
// We can probably do better here by just ensuring that
// it has hidden visibility rather than public
@ -218,9 +219,9 @@ fn symbol_for_def_id<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
}
}
let instance = Instance::mono(scx, def_id);
let instance = Instance::mono(scx.tcx(), def_id);
symbol_map.get(TransItem::Fn(instance))
.map(str::to_owned)
.unwrap_or_else(|| instance.symbol_name(scx))
.unwrap_or_else(|| symbol_name(instance, scx))
}

View File

@ -168,105 +168,105 @@ fn get_symbol_hash<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
format!("h{:016x}", hasher.finish())
}
impl<'a, 'tcx> Instance<'tcx> {
pub fn symbol_name(self, scx: &SharedCrateContext<'a, 'tcx>) -> String {
let Instance { def: def_id, substs } = self;
pub fn symbol_name<'a, 'tcx>(instance: Instance<'tcx>,
scx: &SharedCrateContext<'a, 'tcx>) -> String {
let def_id = instance.def_id();
let substs = instance.substs;
debug!("symbol_name(def_id={:?}, substs={:?})",
def_id, substs);
debug!("symbol_name(def_id={:?}, substs={:?})",
def_id, substs);
let node_id = scx.tcx().hir.as_local_node_id(def_id);
let node_id = scx.tcx().hir.as_local_node_id(def_id);
if let Some(id) = node_id {
if scx.sess().plugin_registrar_fn.get() == Some(id) {
let svh = &scx.link_meta().crate_hash;
let idx = def_id.index;
return scx.sess().generate_plugin_registrar_symbol(svh, idx);
}
if scx.sess().derive_registrar_fn.get() == Some(id) {
let svh = &scx.link_meta().crate_hash;
let idx = def_id.index;
return scx.sess().generate_derive_registrar_symbol(svh, idx);
}
if let Some(id) = node_id {
if scx.sess().plugin_registrar_fn.get() == Some(id) {
let svh = &scx.link_meta().crate_hash;
let idx = def_id.index;
return scx.sess().generate_plugin_registrar_symbol(svh, idx);
}
// FIXME(eddyb) Precompute a custom symbol name based on attributes.
let attrs = scx.tcx().get_attrs(def_id);
let is_foreign = if let Some(id) = node_id {
match scx.tcx().hir.get(id) {
hir_map::NodeForeignItem(_) => true,
_ => false
}
} else {
scx.sess().cstore.is_foreign_item(def_id)
};
if let Some(name) = weak_lang_items::link_name(&attrs) {
return name.to_string();
if scx.sess().derive_registrar_fn.get() == Some(id) {
let svh = &scx.link_meta().crate_hash;
let idx = def_id.index;
return scx.sess().generate_derive_registrar_symbol(svh, idx);
}
if is_foreign {
if let Some(name) = attr::first_attr_value_str_by_name(&attrs, "link_name") {
return name.to_string();
}
// Don't mangle foreign items.
return scx.tcx().item_name(def_id).as_str().to_string();
}
if let Some(name) = attr::find_export_name_attr(scx.sess().diagnostic(), &attrs) {
// Use provided name
return name.to_string();
}
if attr::contains_name(&attrs, "no_mangle") {
// Don't mangle
return scx.tcx().item_name(def_id).as_str().to_string();
}
let def_path = scx.tcx().def_path(def_id);
// We want to compute the "type" of this item. Unfortunately, some
// kinds of items (e.g., closures) don't have an entry in the
// item-type array. So walk back up the find the closest parent
// that DOES have an entry.
let mut ty_def_id = def_id;
let instance_ty;
loop {
let key = scx.tcx().def_key(ty_def_id);
match key.disambiguated_data.data {
DefPathData::TypeNs(_) |
DefPathData::ValueNs(_) => {
instance_ty = scx.tcx().item_type(ty_def_id);
break;
}
_ => {
// if we're making a symbol for something, there ought
// to be a value or type-def or something in there
// *somewhere*
ty_def_id.index = key.parent.unwrap_or_else(|| {
bug!("finding type for {:?}, encountered def-id {:?} with no \
parent", def_id, ty_def_id);
});
}
}
}
// Erase regions because they may not be deterministic when hashed
// and should not matter anyhow.
let instance_ty = scx.tcx().erase_regions(&instance_ty);
let hash = get_symbol_hash(scx, &def_path, instance_ty, Some(substs));
let mut buffer = SymbolPathBuffer {
names: Vec::with_capacity(def_path.data.len())
};
item_path::with_forced_absolute_paths(|| {
scx.tcx().push_item_path(&mut buffer, def_id);
});
mangle(buffer.names.into_iter(), &hash)
}
// FIXME(eddyb) Precompute a custom symbol name based on attributes.
let attrs = scx.tcx().get_attrs(def_id);
let is_foreign = if let Some(id) = node_id {
match scx.tcx().hir.get(id) {
hir_map::NodeForeignItem(_) => true,
_ => false
}
} else {
scx.sess().cstore.is_foreign_item(def_id)
};
if let Some(name) = weak_lang_items::link_name(&attrs) {
return name.to_string();
}
if is_foreign {
if let Some(name) = attr::first_attr_value_str_by_name(&attrs, "link_name") {
return name.to_string();
}
// Don't mangle foreign items.
return scx.tcx().item_name(def_id).as_str().to_string();
}
if let Some(name) = attr::find_export_name_attr(scx.sess().diagnostic(), &attrs) {
// Use provided name
return name.to_string();
}
if attr::contains_name(&attrs, "no_mangle") {
// Don't mangle
return scx.tcx().item_name(def_id).as_str().to_string();
}
let def_path = scx.tcx().def_path(def_id);
// We want to compute the "type" of this item. Unfortunately, some
// kinds of items (e.g., closures) don't have an entry in the
// item-type array. So walk back up the find the closest parent
// that DOES have an entry.
let mut ty_def_id = def_id;
let instance_ty;
loop {
let key = scx.tcx().def_key(ty_def_id);
match key.disambiguated_data.data {
DefPathData::TypeNs(_) |
DefPathData::ValueNs(_) => {
instance_ty = scx.tcx().item_type(ty_def_id);
break;
}
_ => {
// if we're making a symbol for something, there ought
// to be a value or type-def or something in there
// *somewhere*
ty_def_id.index = key.parent.unwrap_or_else(|| {
bug!("finding type for {:?}, encountered def-id {:?} with no \
parent", def_id, ty_def_id);
});
}
}
}
// Erase regions because they may not be deterministic when hashed
// and should not matter anyhow.
let instance_ty = scx.tcx().erase_regions(&instance_ty);
let hash = get_symbol_hash(scx, &def_path, instance_ty, Some(substs));
let mut buffer = SymbolPathBuffer {
names: Vec::with_capacity(def_path.data.len())
};
item_path::with_forced_absolute_paths(|| {
scx.tcx().push_item_path(&mut buffer, def_id);
});
mangle(buffer.names.into_iter(), &hash)
}
struct SymbolPathBuffer {

View File

@ -34,30 +34,24 @@ use back::linker::LinkerInfo;
use back::symbol_export::{self, ExportedSymbols};
use llvm::{Linkage, ValueRef, Vector, get_param};
use llvm;
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::hir::def_id::LOCAL_CRATE;
use middle::lang_items::StartFnLangItem;
use rustc::ty::subst::Substs;
use rustc::mir::tcx::LvalueTy;
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::adjustment::CustomCoerceUnsized;
use rustc::dep_graph::{AssertDepGraphSafe, DepNode, WorkProduct};
use rustc::hir::map as hir_map;
use rustc::util::common::time;
use session::config::{self, NoDebugInfo};
use rustc_incremental::IncrementalHashesMap;
use session::{self, DataTypeKind, Session};
use abi::{self, FnType};
use abi;
use mir::lvalue::LvalueRef;
use adt;
use attributes;
use builder::Builder;
use callee::{Callee};
use callee;
use common::{C_bool, C_bytes_in_context, C_i32, C_uint};
use collector::{self, TransItemCollectionMode};
use common::{C_struct_in_context, C_u64, C_undef};
use common::CrateContext;
use common::{fulfill_obligation};
use common::{type_is_zero_size, val_ty};
use common;
use consts;
@ -65,7 +59,7 @@ use context::{SharedCrateContext, CrateContextList};
use debuginfo;
use declare;
use machine;
use machine::{llalign_of_min, llsize_of};
use machine::llsize_of;
use meth;
use mir;
use monomorphize::{self, Instance};
@ -76,7 +70,6 @@ use trans_item::{TransItem, DefPathBasedNames};
use type_::Type;
use type_of;
use value::Value;
use Disr;
use util::nodemap::{NodeSet, FxHashMap, FxHashSet};
use libc::c_uint;
@ -84,7 +77,7 @@ use std::ffi::{CStr, CString};
use std::rc::Rc;
use std::str;
use std::i32;
use syntax_pos::{Span, DUMMY_SP};
use syntax_pos::Span;
use syntax::attr;
use rustc::hir;
use rustc::ty::layout::{self, Layout};
@ -317,25 +310,6 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
}
}
pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx>,
source_ty: Ty<'tcx>,
target_ty: Ty<'tcx>)
-> CustomCoerceUnsized {
let trait_ref = ty::Binder(ty::TraitRef {
def_id: scx.tcx().lang_items.coerce_unsized_trait().unwrap(),
substs: scx.tcx().mk_substs_trait(source_ty, &[target_ty])
});
match fulfill_obligation(scx, DUMMY_SP, trait_ref) {
traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
scx.tcx().custom_coerce_unsized_kind(impl_def_id)
}
vtable => {
bug!("invalid CoerceUnsized vtable: {:?}", vtable);
}
}
}
pub fn cast_shift_expr_rhs(
cx: &Builder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef
) -> ValueRef {
@ -429,7 +403,9 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef,
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False, alignment.to_align())
} else if (t.is_region_ptr() || t.is_box()) && !common::type_is_fat_ptr(ccx, t) {
} else if (t.is_region_ptr() || t.is_box() || t.is_fn())
&& !common::type_is_fat_ptr(ccx, t)
{
b.load_nonnull(ptr, alignment.to_align())
} else {
b.load(ptr, alignment.to_align())
@ -569,11 +545,11 @@ pub fn memcpy_ty<'a, 'tcx>(
}
pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>,
ptr: ValueRef,
fill_byte: ValueRef,
size: ValueRef,
align: ValueRef,
volatile: bool) -> ValueRef {
ptr: ValueRef,
fill_byte: ValueRef,
size: ValueRef,
align: ValueRef,
volatile: bool) -> ValueRef {
let ptr_width = &b.ccx.sess().target.target.target_pointer_width[..];
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = b.ccx.get_intrinsic(&intrinsic_key);
@ -585,7 +561,7 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance
let _s = if ccx.sess().trans_stats() {
let mut instance_name = String::new();
DefPathBasedNames::new(ccx.tcx(), true, true)
.push_def_path(instance.def, &mut instance_name);
.push_def_path(instance.def_id(), &mut instance_name);
Some(StatRecorder::new(ccx, instance_name))
} else {
None
@ -596,7 +572,7 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance
// release builds.
info!("trans_instance({})", instance);
let fn_ty = common::def_ty(ccx.shared(), instance.def, instance.substs);
let fn_ty = common::instance_ty(ccx.shared(), &instance);
let sig = common::ty_fn_sig(ccx, fn_ty);
let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
@ -611,76 +587,10 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance
attributes::emit_uwtable(lldecl, true);
}
let mir = ccx.tcx().item_mir(instance.def);
let mir = ccx.tcx().instance_mir(instance.def);
mir::trans_mir(ccx, lldecl, &mir, instance, sig);
}
pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
disr: Disr,
llfn: ValueRef) {
attributes::inline(llfn, attributes::InlineAttr::Hint);
attributes::set_frame_pointer_elimination(ccx, llfn);
let ctor_ty = common::def_ty(ccx.shared(), def_id, substs);
let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig());
let fn_ty = FnType::new(ccx, sig, &[]);
let bcx = Builder::new_block(ccx, llfn, "entry-block");
if !fn_ty.ret.is_ignore() {
// But if there are no nested returns, we skip the indirection
// and have a single retslot
let dest = if fn_ty.ret.is_indirect() {
get_param(llfn, 0)
} else {
// We create an alloca to hold a pointer of type `ret.original_ty`
// which will hold the pointer to the right alloca which has the
// final ret value
bcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot")
};
// Can return unsized value
let mut dest_val = LvalueRef::new_sized_ty(dest, sig.output(), Alignment::AbiAligned);
dest_val.ty = LvalueTy::Downcast {
adt_def: sig.output().ty_adt_def().unwrap(),
substs: substs,
variant_index: disr.0 as usize,
};
let mut llarg_idx = fn_ty.ret.is_indirect() as usize;
let mut arg_idx = 0;
for (i, arg_ty) in sig.inputs().iter().enumerate() {
let (lldestptr, _) = dest_val.trans_field_ptr(&bcx, i);
let arg = &fn_ty.args[arg_idx];
arg_idx += 1;
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
let meta = &fn_ty.args[arg_idx];
arg_idx += 1;
arg.store_fn_arg(&bcx, &mut llarg_idx, get_dataptr(&bcx, lldestptr));
meta.store_fn_arg(&bcx, &mut llarg_idx, get_meta(&bcx, lldestptr));
} else {
arg.store_fn_arg(&bcx, &mut llarg_idx, lldestptr);
}
}
adt::trans_set_discr(&bcx, sig.output(), dest, disr);
if fn_ty.ret.is_indirect() {
bcx.ret_void();
return;
}
if let Some(cast_ty) = fn_ty.ret.cast {
bcx.ret(bcx.load(
bcx.pointercast(dest, cast_ty.ptr_to()),
Some(llalign_of_min(ccx, fn_ty.ret.ty))
));
} else {
bcx.ret(bcx.load(dest, None))
}
} else {
bcx.ret_void();
}
}
pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
// Use the names from src/llvm/docs/LangRef.rst here. Most types are only
// applicable to variable declarations and may not really make sense for
@ -721,7 +631,7 @@ pub fn set_link_section(ccx: &CrateContext,
}
/// Create the `main` function which will initialise the rust runtime and call
/// users main function.
/// users main function.
pub fn maybe_create_entry_wrapper(ccx: &CrateContext) {
let (main_def_id, span) = match *ccx.sess().entry_fn.borrow() {
Some((id, span)) => {
@ -738,7 +648,7 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) {
ccx.tcx().sess.span_fatal(span, "compilation successful");
}
let instance = Instance::mono(ccx.shared(), main_def_id);
let instance = Instance::mono(ccx.tcx(), main_def_id);
if !ccx.codegen_unit().contains_item(&TransItem::Fn(instance)) {
// We want to create the wrapper in the same codegen unit as Rust's main
@ -746,7 +656,7 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) {
return;
}
let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx);
let main_llfn = callee::get_fn(ccx, instance);
let et = ccx.sess().entry_type.get().unwrap();
match et {
@ -780,8 +690,8 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) {
let (start_fn, args) = if use_start_lang_item {
let start_def_id = ccx.tcx().require_lang_item(StartFnLangItem);
let empty_substs = ccx.tcx().intern_substs(&[]);
let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx);
let start_instance = Instance::mono(ccx.tcx(), start_def_id);
let start_fn = callee::get_fn(ccx, start_instance);
(start_fn, vec![bld.pointercast(rust_main, Type::i8p(ccx).ptr_to()), get_param(llfn, 0),
get_param(llfn, 1)])
} else {

View File

@ -14,504 +14,18 @@
//! and methods are represented as just a fn ptr and not a full
//! closure.
pub use self::CalleeData::*;
use llvm::{self, ValueRef, get_params};
use llvm::{self, ValueRef};
use rustc::hir::def_id::DefId;
use rustc::ty::subst::{Substs, Subst};
use rustc::traits;
use abi::{Abi, FnType};
use rustc::ty::subst::Substs;
use attributes;
use base;
use builder::Builder;
use common::{self, CrateContext};
use cleanup::CleanupScope;
use mir::lvalue::LvalueRef;
use monomorphize;
use consts;
use common::def_ty;
use declare;
use value::Value;
use meth;
use monomorphize::Instance;
use trans_item::TransItem;
use type_of;
use Disr;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::hir;
use std::iter;
use syntax_pos::DUMMY_SP;
use mir::lvalue::Alignment;
#[derive(Debug)]
pub enum CalleeData {
/// Constructor for enum variant/tuple-like-struct.
NamedTupleConstructor(Disr),
/// Function pointer.
Fn(ValueRef),
Intrinsic,
/// Trait object found in the vtable at that index.
Virtual(usize)
}
#[derive(Debug)]
pub struct Callee<'tcx> {
pub data: CalleeData,
pub ty: Ty<'tcx>
}
impl<'tcx> Callee<'tcx> {
/// Function pointer.
pub fn ptr(llfn: ValueRef, ty: Ty<'tcx>) -> Callee<'tcx> {
Callee {
data: Fn(llfn),
ty: ty
}
}
/// Function or method definition.
pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>)
-> Callee<'tcx> {
let tcx = ccx.tcx();
if let Some(trait_id) = tcx.trait_of_item(def_id) {
return Callee::trait_method(ccx, trait_id, def_id, substs);
}
let fn_ty = def_ty(ccx.shared(), def_id, substs);
if let ty::TyFnDef(.., f) = fn_ty.sty {
if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
return Callee {
data: Intrinsic,
ty: fn_ty
};
}
}
// FIXME(eddyb) Detect ADT constructors more efficiently.
if let Some(adt_def) = fn_ty.fn_ret().skip_binder().ty_adt_def() {
if let Some(i) = adt_def.variants.iter().position(|v| def_id == v.did) {
return Callee {
data: NamedTupleConstructor(Disr::for_variant(tcx, adt_def, i)),
ty: fn_ty
};
}
}
let (llfn, ty) = get_fn(ccx, def_id, substs);
Callee::ptr(llfn, ty)
}
/// Trait method, which has to be resolved to an impl method.
pub fn trait_method<'a>(ccx: &CrateContext<'a, 'tcx>,
trait_id: DefId,
def_id: DefId,
substs: &'tcx Substs<'tcx>)
-> Callee<'tcx> {
let tcx = ccx.tcx();
let trait_ref = ty::TraitRef::from_method(tcx, trait_id, substs);
let trait_ref = tcx.normalize_associated_type(&ty::Binder(trait_ref));
match common::fulfill_obligation(ccx.shared(), DUMMY_SP, trait_ref) {
traits::VtableImpl(vtable_impl) => {
let name = tcx.item_name(def_id);
let (def_id, substs) = traits::find_method(tcx, name, substs, &vtable_impl);
// Translate the function, bypassing Callee::def.
// That is because default methods have the same ID as the
// trait method used to look up the impl method that ended
// up here, so calling Callee::def would infinitely recurse.
let (llfn, ty) = get_fn(ccx, def_id, substs);
Callee::ptr(llfn, ty)
}
traits::VtableClosure(vtable_closure) => {
// The substitutions should have no type parameters remaining
// after passing through fulfill_obligation
let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
let instance = Instance::new(def_id, substs);
let llfn = trans_closure_method(
ccx,
vtable_closure.closure_def_id,
vtable_closure.substs,
instance,
trait_closure_kind);
let method_ty = def_ty(ccx.shared(), def_id, substs);
Callee::ptr(llfn, method_ty)
}
traits::VtableFnPointer(vtable_fn_pointer) => {
let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
let instance = Instance::new(def_id, substs);
let llfn = trans_fn_pointer_shim(ccx, instance,
trait_closure_kind,
vtable_fn_pointer.fn_ty);
let method_ty = def_ty(ccx.shared(), def_id, substs);
Callee::ptr(llfn, method_ty)
}
traits::VtableObject(ref data) => {
Callee {
data: Virtual(tcx.get_vtable_index_of_object_method(data, def_id)),
ty: def_ty(ccx.shared(), def_id, substs)
}
}
vtable => {
bug!("resolved vtable bad vtable {:?} in trans", vtable);
}
}
}
/// Get the abi::FnType for a direct call. Mainly deals with the fact
/// that a Virtual call doesn't take the vtable, like its shim does.
/// The extra argument types are for variadic (extern "C") functions.
pub fn direct_fn_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType {
let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&self.ty.fn_sig());
let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
if let Virtual(_) = self.data {
// Don't pass the vtable, it's not an argument of the virtual fn.
fn_ty.args[1].ignore();
}
fn_ty.adjust_for_abi(ccx, sig);
fn_ty
}
/// Turn the callee into a function pointer.
pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
match self.data {
Fn(llfn) => llfn,
Virtual(_) => meth::trans_object_shim(ccx, self),
NamedTupleConstructor(disr) => match self.ty.sty {
ty::TyFnDef(def_id, substs, _) => {
let instance = Instance::new(def_id, substs);
if let Some(&llfn) = ccx.instances().borrow().get(&instance) {
return llfn;
}
let sym = ccx.symbol_map().get_or_compute(ccx.shared(),
TransItem::Fn(instance));
assert!(!ccx.codegen_unit().contains_item(&TransItem::Fn(instance)));
let lldecl = declare::define_internal_fn(ccx, &sym, self.ty);
base::trans_ctor_shim(ccx, def_id, substs, disr, lldecl);
ccx.instances().borrow_mut().insert(instance, lldecl);
lldecl
}
_ => bug!("expected fn item type, found {}", self.ty)
},
Intrinsic => bug!("intrinsic {} getting reified", self.ty)
}
}
}
fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
def_id: DefId,
substs: ty::ClosureSubsts<'tcx>,
method_instance: Instance<'tcx>,
trait_closure_kind: ty::ClosureKind)
-> ValueRef
{
// If this is a closure, redirect to it.
let (llfn, _) = get_fn(ccx, def_id, substs.substs);
// If the closure is a Fn closure, but a FnOnce is needed (etc),
// then adapt the self type
let llfn_closure_kind = ccx.tcx().closure_kind(def_id);
debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \
trait_closure_kind={:?}, llfn={:?})",
llfn_closure_kind, trait_closure_kind, Value(llfn));
match needs_fn_once_adapter_shim(llfn_closure_kind, trait_closure_kind) {
Ok(true) => trans_fn_once_adapter_shim(ccx,
def_id,
substs,
method_instance,
llfn),
Ok(false) => llfn,
Err(()) => {
bug!("trans_closure_adapter_shim: cannot convert {:?} to {:?}",
llfn_closure_kind,
trait_closure_kind);
}
}
}
pub fn needs_fn_once_adapter_shim(actual_closure_kind: ty::ClosureKind,
trait_closure_kind: ty::ClosureKind)
-> Result<bool, ()>
{
match (actual_closure_kind, trait_closure_kind) {
(ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
(ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
// No adapter needed.
Ok(false)
}
(ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
// The closure fn `llfn` is a `fn(&self, ...)`. We want a
// `fn(&mut self, ...)`. In fact, at trans time, these are
// basically the same thing, so we can just return llfn.
Ok(false)
}
(ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
// The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
// self, ...)`. We want a `fn(self, ...)`. We can produce
// this by doing something like:
//
// fn call_once(self, ...) { call_mut(&self, ...) }
// fn call_once(mut self, ...) { call_mut(&mut self, ...) }
//
// These are both the same at trans time.
Ok(true)
}
_ => Err(()),
}
}
fn trans_fn_once_adapter_shim<'a, 'tcx>(
ccx: &'a CrateContext<'a, 'tcx>,
def_id: DefId,
substs: ty::ClosureSubsts<'tcx>,
method_instance: Instance<'tcx>,
llreffn: ValueRef)
-> ValueRef
{
if let Some(&llfn) = ccx.instances().borrow().get(&method_instance) {
return llfn;
}
debug!("trans_fn_once_adapter_shim(def_id={:?}, substs={:?}, llreffn={:?})",
def_id, substs, Value(llreffn));
let tcx = ccx.tcx();
// Find a version of the closure type. Substitute static for the
// region since it doesn't really matter.
let closure_ty = tcx.mk_closure_from_closure_substs(def_id, substs);
let ref_closure_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReErased), closure_ty);
// Make a version with the type of by-ref closure.
let sig = tcx.closure_type(def_id).subst(tcx, substs.substs);
let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
assert_eq!(sig.abi, Abi::RustCall);
let llref_fn_ty = tcx.mk_fn_ptr(ty::Binder(tcx.mk_fn_sig(
iter::once(ref_closure_ty).chain(sig.inputs().iter().cloned()),
sig.output(),
sig.variadic,
sig.unsafety,
Abi::RustCall
)));
debug!("trans_fn_once_adapter_shim: llref_fn_ty={:?}",
llref_fn_ty);
// Make a version of the closure type with the same arguments, but
// with argument #0 being by value.
let sig = tcx.mk_fn_sig(
iter::once(closure_ty).chain(sig.inputs().iter().cloned()),
sig.output(),
sig.variadic,
sig.unsafety,
Abi::RustCall
);
let fn_ty = FnType::new(ccx, sig, &[]);
let llonce_fn_ty = tcx.mk_fn_ptr(ty::Binder(sig));
// Create the by-value helper.
let function_name = method_instance.symbol_name(ccx.shared());
let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty);
attributes::set_frame_pointer_elimination(ccx, lloncefn);
let orig_fn_ty = fn_ty;
let mut bcx = Builder::new_block(ccx, lloncefn, "entry-block");
let callee = Callee {
data: Fn(llreffn),
ty: llref_fn_ty
};
// the first argument (`self`) will be the (by value) closure env.
let mut llargs = get_params(lloncefn);
let fn_ret = callee.ty.fn_ret();
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
let self_idx = fn_ty.ret.is_indirect() as usize;
let env_arg = &orig_fn_ty.args[0];
let env = if env_arg.is_indirect() {
LvalueRef::new_sized_ty(llargs[self_idx], closure_ty, Alignment::AbiAligned)
} else {
let scratch = LvalueRef::alloca(&bcx, closure_ty, "self");
let mut llarg_idx = self_idx;
env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch.llval);
scratch
};
debug!("trans_fn_once_adapter_shim: env={:?}", env);
// Adjust llargs such that llargs[self_idx..] has the call arguments.
// For zero-sized closures that means sneaking in a new argument.
if env_arg.is_ignore() {
llargs.insert(self_idx, env.llval);
} else {
llargs[self_idx] = env.llval;
}
// Call the by-ref closure body with `self` in a cleanup scope,
// to drop `self` when the body returns, or in case it unwinds.
let self_scope = CleanupScope::schedule_drop_mem(&bcx, env);
let llfn = callee.reify(bcx.ccx);
let llret;
if let Some(landing_pad) = self_scope.landing_pad {
let normal_bcx = bcx.build_sibling_block("normal-return");
llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None);
bcx = normal_bcx;
} else {
llret = bcx.call(llfn, &llargs[..], None);
}
fn_ty.apply_attrs_callsite(llret);
if fn_ret.0.is_never() {
bcx.unreachable();
} else {
self_scope.trans(&bcx);
if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() {
bcx.ret_void();
} else {
bcx.ret(llret);
}
}
ccx.instances().borrow_mut().insert(method_instance, lloncefn);
lloncefn
}
/// Translates an adapter that implements the `Fn` trait for a fn
/// pointer. This is basically the equivalent of something like:
///
/// ```
/// impl<'a> Fn(&'a int) -> &'a int for fn(&int) -> &int {
/// extern "rust-abi" fn call(&self, args: (&'a int,)) -> &'a int {
/// (*self)(args.0)
/// }
/// }
/// ```
///
/// but for the bare function type given.
fn trans_fn_pointer_shim<'a, 'tcx>(
ccx: &'a CrateContext<'a, 'tcx>,
method_instance: Instance<'tcx>,
closure_kind: ty::ClosureKind,
bare_fn_ty: Ty<'tcx>)
-> ValueRef
{
let tcx = ccx.tcx();
// Normalize the type for better caching.
let bare_fn_ty = tcx.normalize_associated_type(&bare_fn_ty);
// If this is an impl of `Fn` or `FnMut` trait, the receiver is `&self`.
let is_by_ref = match closure_kind {
ty::ClosureKind::Fn | ty::ClosureKind::FnMut => true,
ty::ClosureKind::FnOnce => false,
};
let llfnpointer = match bare_fn_ty.sty {
ty::TyFnDef(def_id, substs, _) => {
// Function definitions have to be turned into a pointer.
let llfn = Callee::def(ccx, def_id, substs).reify(ccx);
if !is_by_ref {
// A by-value fn item is ignored, so the shim has
// the same signature as the original function.
return llfn;
}
Some(llfn)
}
_ => None
};
let bare_fn_ty_maybe_ref = if is_by_ref {
tcx.mk_imm_ref(tcx.mk_region(ty::ReErased), bare_fn_ty)
} else {
bare_fn_ty
};
// Check if we already trans'd this shim.
if let Some(&llval) = ccx.fn_pointer_shims().borrow().get(&bare_fn_ty_maybe_ref) {
return llval;
}
debug!("trans_fn_pointer_shim(bare_fn_ty={:?})",
bare_fn_ty);
// Construct the "tuply" version of `bare_fn_ty`. It takes two arguments: `self`,
// which is the fn pointer, and `args`, which is the arguments tuple.
let sig = bare_fn_ty.fn_sig();
let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
assert_eq!(sig.unsafety, hir::Unsafety::Normal);
assert_eq!(sig.abi, Abi::Rust);
let tuple_input_ty = tcx.intern_tup(sig.inputs(), false);
let sig = tcx.mk_fn_sig(
[bare_fn_ty_maybe_ref, tuple_input_ty].iter().cloned(),
sig.output(),
false,
hir::Unsafety::Normal,
Abi::RustCall
);
let fn_ty = FnType::new(ccx, sig, &[]);
let tuple_fn_ty = tcx.mk_fn_ptr(ty::Binder(sig));
debug!("tuple_fn_ty: {:?}", tuple_fn_ty);
//
let function_name = method_instance.symbol_name(ccx.shared());
let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty);
attributes::set_frame_pointer_elimination(ccx, llfn);
//
let bcx = Builder::new_block(ccx, llfn, "entry-block");
let mut llargs = get_params(llfn);
let self_arg = llargs.remove(fn_ty.ret.is_indirect() as usize);
let llfnpointer = llfnpointer.unwrap_or_else(|| {
// the first argument (`self`) will be ptr to the fn pointer
if is_by_ref {
bcx.load(self_arg, None)
} else {
self_arg
}
});
let callee = Callee {
data: Fn(llfnpointer),
ty: bare_fn_ty
};
let fn_ret = callee.ty.fn_ret();
let fn_ty = callee.direct_fn_type(ccx, &[]);
let llret = bcx.call(llfnpointer, &llargs, None);
fn_ty.apply_attrs_callsite(llret);
if fn_ret.0.is_never() {
bcx.unreachable();
} else {
if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() {
bcx.ret_void();
} else {
bcx.ret(llret);
}
}
ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn);
llfn
}
use rustc::ty::TypeFoldable;
/// Translates a reference to a fn/method item, monomorphizing and
/// inlining as it goes.
@ -519,26 +33,22 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
/// # Parameters
///
/// - `ccx`: the crate context
/// - `def_id`: def id of the fn or method item being referenced
/// - `substs`: values for each of the fn/method's parameters
fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>)
-> (ValueRef, Ty<'tcx>) {
/// - `instance`: the instance to be instantiated
pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
instance: Instance<'tcx>)
-> ValueRef
{
let tcx = ccx.tcx();
debug!("get_fn(def_id={:?}, substs={:?})", def_id, substs);
debug!("get_fn(instance={:?})", instance);
assert!(!substs.needs_infer());
assert!(!substs.has_escaping_regions());
assert!(!substs.has_param_types());
let substs = tcx.normalize_associated_type(&substs);
let instance = Instance::new(def_id, substs);
let fn_ty = common::def_ty(ccx.shared(), def_id, substs);
assert!(!instance.substs.needs_infer());
assert!(!instance.substs.has_escaping_regions());
assert!(!instance.substs.has_param_types());
let fn_ty = common::instance_ty(ccx.shared(), &instance);
if let Some(&llfn) = ccx.instances().borrow().get(&instance) {
return (llfn, fn_ty);
return llfn;
}
let sym = ccx.symbol_map().get_or_compute(ccx.shared(),
@ -586,7 +96,10 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
assert_eq!(common::val_ty(llfn), llptrty);
debug!("get_fn: not casting pointer!");
let attrs = ccx.tcx().get_attrs(def_id);
if common::is_inline_instance(tcx, &instance) {
attributes::inline(llfn, attributes::InlineAttr::Hint);
}
let attrs = instance.def.attrs(ccx.tcx());
attributes::from_fn_attrs(ccx, &attrs, llfn);
let is_local_def = ccx.shared().translation_items().borrow()
@ -598,7 +111,9 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
}
}
if ccx.use_dll_storage_attrs() && ccx.sess().cstore.is_dllimport_foreign_item(def_id) {
if ccx.use_dll_storage_attrs() &&
ccx.sess().cstore.is_dllimport_foreign_item(instance.def_id())
{
unsafe {
llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
}
@ -608,5 +123,13 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ccx.instances().borrow_mut().insert(instance, llfn);
(llfn, fn_ty)
llfn
}
pub fn resolve_and_get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>)
-> ValueRef
{
get_fn(ccx, monomorphize::resolve(ccx.shared(), def_id, substs))
}

View File

@ -1,162 +0,0 @@
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! ## The Cleanup module
//!
//! The cleanup module tracks what values need to be cleaned up as scopes
//! are exited, either via panic or just normal control flow.
//!
//! Cleanup items can be scheduled into any of the scopes on the stack.
//! Typically, when a scope is finished, we generate the cleanup code. This
//! corresponds to a normal exit from a block (for example, an expression
//! completing evaluation successfully without panic).
use llvm::BasicBlockRef;
use base;
use mir::lvalue::LvalueRef;
use rustc::mir::tcx::LvalueTy;
use builder::Builder;
use common::Funclet;
use glue;
use type_::Type;
pub struct CleanupScope<'tcx> {
// Cleanup to run upon scope exit.
cleanup: Option<DropValue<'tcx>>,
// Computed on creation if compiling with landing pads (!sess.no_landing_pads)
pub landing_pad: Option<BasicBlockRef>,
}
#[derive(Copy, Clone)]
pub struct DropValue<'tcx> {
val: LvalueRef<'tcx>,
skip_dtor: bool,
}
impl<'tcx> DropValue<'tcx> {
fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &Builder<'a, 'tcx>) {
glue::call_drop_glue(bcx, self.val, self.skip_dtor, funclet)
}
/// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary
/// for an unwind and then `resume` to continue error propagation:
///
/// landing_pad -> ... cleanups ... -> [resume]
///
/// This should only be called once per function, as it creates an alloca for the landingpad.
fn get_landing_pad<'a>(&self, bcx: &Builder<'a, 'tcx>) -> BasicBlockRef {
debug!("get_landing_pad");
let bcx = bcx.build_sibling_block("cleanup_unwind");
let llpersonality = bcx.ccx.eh_personality();
bcx.set_personality_fn(llpersonality);
if base::wants_msvc_seh(bcx.sess()) {
let pad = bcx.cleanup_pad(None, &[]);
let funclet = Some(Funclet::new(pad));
self.trans(funclet.as_ref(), &bcx);
bcx.cleanup_ret(pad, None);
} else {
// The landing pad return type (the type being propagated). Not sure
// what this represents but it's determined by the personality
// function and this is what the EH proposal example uses.
let llretty = Type::struct_(bcx.ccx, &[Type::i8p(bcx.ccx), Type::i32(bcx.ccx)], false);
// The only landing pad clause will be 'cleanup'
let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.llfn());
// The landing pad block is a cleanup
bcx.set_cleanup(llretval);
// Insert cleanup instructions into the cleanup block
self.trans(None, &bcx);
if !bcx.sess().target.target.options.custom_unwind_resume {
bcx.resume(llretval);
} else {
let exc_ptr = bcx.extract_value(llretval, 0);
bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], None);
bcx.unreachable();
}
}
bcx.llbb()
}
}
impl<'a, 'tcx> CleanupScope<'tcx> {
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
pub fn schedule_drop_mem(
bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx>
) -> CleanupScope<'tcx> {
if let LvalueTy::Downcast { .. } = val.ty {
bug!("Cannot drop downcast ty yet");
}
if !bcx.ccx.shared().type_needs_drop(val.ty.to_ty(bcx.tcx())) {
return CleanupScope::noop();
}
let drop = DropValue {
val: val,
skip_dtor: false,
};
CleanupScope::new(bcx, drop)
}
/// Issue #23611: Schedules a (deep) drop of the contents of
/// `val`, which is a pointer to an instance of struct/enum type
/// `ty`. The scheduled code handles extracting the discriminant
/// and dropping the contents associated with that variant
/// *without* executing any associated drop implementation.
pub fn schedule_drop_adt_contents(
bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx>
) -> CleanupScope<'tcx> {
if let LvalueTy::Downcast { .. } = val.ty {
bug!("Cannot drop downcast ty yet");
}
// `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative.
if !bcx.ccx.shared().type_needs_drop(val.ty.to_ty(bcx.tcx())) {
return CleanupScope::noop();
}
let drop = DropValue {
val: val,
skip_dtor: true,
};
CleanupScope::new(bcx, drop)
}
fn new(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> {
CleanupScope {
cleanup: Some(drop_val),
landing_pad: if !bcx.sess().no_landing_pads() {
Some(drop_val.get_landing_pad(bcx))
} else {
None
},
}
}
pub fn noop() -> CleanupScope<'tcx> {
CleanupScope {
cleanup: None,
landing_pad: None,
}
}
pub fn trans(self, bcx: &'a Builder<'a, 'tcx>) {
if let Some(cleanup) = self.cleanup {
cleanup.trans(None, &bcx);
}
}
}

View File

@ -193,29 +193,21 @@ use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir::map as hir_map;
use rustc::hir::def_id::DefId;
use rustc::middle::lang_items::{BoxFreeFnLangItem, ExchangeMallocFnLangItem};
use rustc::middle::lang_items::{ExchangeMallocFnLangItem};
use rustc::traits;
use rustc::ty::subst::{Kind, Substs, Subst};
use rustc::ty::subst::{Substs, Subst};
use rustc::ty::{self, TypeFoldable, TyCtxt};
use rustc::ty::adjustment::CustomCoerceUnsized;
use rustc::mir::{self, Location};
use rustc::mir::visit as mir_visit;
use rustc::mir::visit::Visitor as MirVisitor;
use syntax::abi::Abi;
use syntax_pos::DUMMY_SP;
use base::custom_coerce_unsize_info;
use callee::needs_fn_once_adapter_shim;
use context::SharedCrateContext;
use common::{def_ty, fulfill_obligation};
use glue::{self, DropGlueKind};
use common::{def_ty, instance_ty};
use monomorphize::{self, Instance};
use util::nodemap::{FxHashSet, FxHashMap, DefIdMap};
use trans_item::{TransItem, DefPathBasedNames, InstantiationMode};
use std::iter;
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum TransItemCollectionMode {
Eager,
@ -331,27 +323,23 @@ fn collect_items_rec<'a, 'tcx: 'a>(scx: &SharedCrateContext<'a, 'tcx>,
let recursion_depth_reset;
match starting_point {
TransItem::DropGlue(t) => {
find_drop_glue_neighbors(scx, t, &mut neighbors);
recursion_depth_reset = None;
}
TransItem::Static(node_id) => {
let def_id = scx.tcx().hir.local_def_id(node_id);
let instance = Instance::mono(scx.tcx(), def_id);
// Sanity check whether this ended up being collected accidentally
debug_assert!(should_trans_locally(scx.tcx(), def_id));
debug_assert!(should_trans_locally(scx.tcx(), &instance));
let ty = def_ty(scx, def_id, Substs::empty());
let ty = glue::get_drop_glue_type(scx, ty);
neighbors.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
let ty = instance_ty(scx, &instance);
visit_drop_use(scx, ty, true, &mut neighbors);
recursion_depth_reset = None;
collect_neighbours(scx, Instance::mono(scx, def_id), &mut neighbors);
collect_neighbours(scx, instance, &mut neighbors);
}
TransItem::Fn(instance) => {
// Sanity check whether this ended up being collected accidentally
debug_assert!(should_trans_locally(scx.tcx(), instance.def));
debug_assert!(should_trans_locally(scx.tcx(), &instance));
// Keep track of the monomorphization recursion depth
recursion_depth_reset = Some(check_recursion_limit(scx.tcx(),
@ -395,27 +383,34 @@ fn check_recursion_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
instance: Instance<'tcx>,
recursion_depths: &mut DefIdMap<usize>)
-> (DefId, usize) {
let recursion_depth = recursion_depths.get(&instance.def)
.map(|x| *x)
.unwrap_or(0);
let def_id = instance.def_id();
let recursion_depth = recursion_depths.get(&def_id).cloned().unwrap_or(0);
debug!(" => recursion depth={}", recursion_depth);
let recursion_depth = if Some(def_id) == tcx.lang_items.drop_in_place_fn() {
// HACK: drop_in_place creates tight monomorphization loops. Give
// it more margin.
recursion_depth / 4
} else {
recursion_depth
};
// Code that needs to instantiate the same function recursively
// more than the recursion limit is assumed to be causing an
// infinite expansion.
if recursion_depth > tcx.sess.recursion_limit.get() {
let error = format!("reached the recursion limit while instantiating `{}`",
instance);
if let Some(node_id) = tcx.hir.as_local_node_id(instance.def) {
if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
tcx.sess.span_fatal(tcx.hir.span(node_id), &error);
} else {
tcx.sess.fatal(&error);
}
}
recursion_depths.insert(instance.def, recursion_depth + 1);
recursion_depths.insert(def_id, recursion_depth + 1);
(instance.def, recursion_depth)
(def_id, recursion_depth)
}
fn check_type_length_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
@ -438,7 +433,7 @@ fn check_type_length_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let instance_name = instance.to_string();
let msg = format!("reached the type-length limit while instantiating `{:.64}...`",
instance_name);
let mut diag = if let Some(node_id) = tcx.hir.as_local_node_id(instance.def) {
let mut diag = if let Some(node_id) = tcx.hir.as_local_node_id(instance.def_id()) {
tcx.sess.struct_span_fatal(tcx.hir.span(node_id), &msg)
} else {
tcx.sess.struct_fatal(&msg)
@ -489,37 +484,34 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
self.output);
}
}
mir::Rvalue::Cast(mir::CastKind::ReifyFnPointer, ref operand, _) => {
let fn_ty = operand.ty(self.mir, self.scx.tcx());
let fn_ty = monomorphize::apply_param_substs(
self.scx,
self.param_substs,
&fn_ty);
visit_fn_use(self.scx, fn_ty, false, &mut self.output);
}
mir::Rvalue::Cast(mir::CastKind::ClosureFnPointer, ref operand, _) => {
let source_ty = operand.ty(self.mir, self.scx.tcx());
match source_ty.sty {
ty::TyClosure(def_id, substs) => {
let closure_trans_item =
create_fn_trans_item(self.scx,
def_id,
substs.substs,
self.param_substs);
self.output.push(closure_trans_item);
let instance = monomorphize::resolve_closure(
self.scx, def_id, substs, ty::ClosureKind::FnOnce);
self.output.push(create_fn_trans_item(instance));
}
_ => bug!(),
}
}
mir::Rvalue::Box(..) => {
let exchange_malloc_fn_def_id =
self.scx
.tcx()
.lang_items
.require(ExchangeMallocFnLangItem)
.unwrap_or_else(|e| self.scx.sess().fatal(&e));
if should_trans_locally(self.scx.tcx(), exchange_malloc_fn_def_id) {
let empty_substs = self.scx.empty_substs_for_def_id(exchange_malloc_fn_def_id);
let exchange_malloc_fn_trans_item =
create_fn_trans_item(self.scx,
exchange_malloc_fn_def_id,
empty_substs,
self.param_substs);
self.output.push(exchange_malloc_fn_trans_item);
let tcx = self.scx.tcx();
let exchange_malloc_fn_def_id = tcx
.lang_items
.require(ExchangeMallocFnLangItem)
.unwrap_or_else(|e| self.scx.sess().fatal(&e));
let instance = Instance::mono(tcx, exchange_malloc_fn_def_id);
if should_trans_locally(tcx, &instance) {
self.output.push(create_fn_trans_item(instance));
}
}
_ => { /* not interesting */ }
@ -528,165 +520,120 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
self.super_rvalue(rvalue, location);
}
fn visit_lvalue(&mut self,
lvalue: &mir::Lvalue<'tcx>,
context: mir_visit::LvalueContext<'tcx>,
location: Location) {
debug!("visiting lvalue {:?}", *lvalue);
fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: Location) {
debug!("visiting constant {:?} @ {:?}", *constant, location);
if let mir_visit::LvalueContext::Drop = context {
let ty = lvalue.ty(self.mir, self.scx.tcx())
.to_ty(self.scx.tcx());
let ty = monomorphize::apply_param_substs(self.scx,
self.param_substs,
&ty);
assert!(ty.is_normalized_for_trans());
let ty = glue::get_drop_glue_type(self.scx, ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
if let ty::TyFnDef(..) = constant.ty.sty {
// function definitions are zero-sized, and only generate
// IR when they are called/reified.
self.super_constant(constant, location);
return
}
self.super_lvalue(lvalue, context, location);
if let mir::Literal::Item { def_id, substs } = constant.literal {
let substs = monomorphize::apply_param_substs(self.scx,
self.param_substs,
&substs);
let instance = monomorphize::resolve(self.scx, def_id, substs);
collect_neighbours(self.scx, instance, self.output);
}
self.super_constant(constant, location);
}
fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) {
debug!("visiting operand {:?}", *operand);
let callee = match *operand {
mir::Operand::Constant(ref constant) => {
if let ty::TyFnDef(def_id, substs, _) = constant.ty.sty {
// This is something that can act as a callee, proceed
Some((def_id, substs))
} else {
// This is not a callee, but we still have to look for
// references to `const` items
if let mir::Literal::Item { def_id, substs } = constant.literal {
let substs = monomorphize::apply_param_substs(self.scx,
self.param_substs,
&substs);
let instance = Instance::new(def_id, substs).resolve_const(self.scx);
collect_neighbours(self.scx, instance, self.output);
}
None
}
}
_ => None
};
if let Some((callee_def_id, callee_substs)) = callee {
debug!(" => operand is callable");
// `callee_def_id` might refer to a trait method instead of a
// concrete implementation, so we have to find the actual
// implementation. For example, the call might look like
//
// std::cmp::partial_cmp(0i32, 1i32)
//
// Calling do_static_dispatch() here will map the def_id of
// `std::cmp::partial_cmp` to the def_id of `i32::partial_cmp<i32>`
let dispatched = do_static_dispatch(self.scx,
callee_def_id,
callee_substs,
self.param_substs);
if let StaticDispatchResult::Dispatched {
def_id: callee_def_id,
substs: callee_substs,
fn_once_adjustment,
} = dispatched {
// if we have a concrete impl (which we might not have
// in the case of something compiler generated like an
// object shim or a closure that is handled differently),
// we check if the callee is something that will actually
// result in a translation item ...
if can_result_in_trans_item(self.scx.tcx(), callee_def_id) {
// ... and create one if it does.
let trans_item = create_fn_trans_item(self.scx,
callee_def_id,
callee_substs,
self.param_substs);
self.output.push(trans_item);
// This call will instantiate an FnOnce adapter, which drops
// the closure environment. Therefore we need to make sure
// that we collect the drop-glue for the environment type.
if let Some(env_ty) = fn_once_adjustment {
let env_ty = glue::get_drop_glue_type(self.scx, env_ty);
if self.scx.type_needs_drop(env_ty) {
let dg = DropGlueKind::Ty(env_ty);
self.output.push(TransItem::DropGlue(dg));
}
}
}
}
}
self.super_operand(operand, location);
fn can_result_in_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> bool {
match tcx.item_type(def_id).sty {
ty::TyFnDef(def_id, _, _) => {
// Some constructors also have type TyFnDef but they are
// always instantiated inline and don't result in a
// translation item. Same for FFI functions.
if let Some(hir_map::NodeForeignItem(_)) = tcx.hir.get_if_local(def_id) {
return false;
}
}
ty::TyClosure(..) => {}
_ => return false
}
should_trans_locally(tcx, def_id)
}
}
// This takes care of the "drop_in_place" intrinsic for which we otherwise
// we would not register drop-glues.
fn visit_terminator_kind(&mut self,
block: mir::BasicBlock,
kind: &mir::TerminatorKind<'tcx>,
location: Location) {
let tcx = self.scx.tcx();
match *kind {
mir::TerminatorKind::Call {
func: mir::Operand::Constant(ref constant),
ref args,
..
} => {
match constant.ty.sty {
ty::TyFnDef(def_id, _, bare_fn_ty)
if is_drop_in_place_intrinsic(tcx, def_id, bare_fn_ty) => {
let operand_ty = args[0].ty(self.mir, tcx);
if let ty::TyRawPtr(mt) = operand_ty.sty {
let operand_ty = monomorphize::apply_param_substs(self.scx,
self.param_substs,
&mt.ty);
let ty = glue::get_drop_glue_type(self.scx, operand_ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
} else {
bug!("Has the drop_in_place() intrinsic's signature changed?")
}
}
_ => { /* Nothing to do. */ }
}
mir::TerminatorKind::Call { ref func, .. } => {
let callee_ty = func.ty(self.mir, tcx);
let callee_ty = monomorphize::apply_param_substs(
self.scx, self.param_substs, &callee_ty);
visit_fn_use(self.scx, callee_ty, true, &mut self.output);
}
_ => { /* Nothing to do. */ }
mir::TerminatorKind::Drop { ref location, .. } |
mir::TerminatorKind::DropAndReplace { ref location, .. } => {
let ty = location.ty(self.mir, self.scx.tcx())
.to_ty(self.scx.tcx());
let ty = monomorphize::apply_param_substs(self.scx,
self.param_substs,
&ty);
visit_drop_use(self.scx, ty, true, self.output);
}
mir::TerminatorKind::Goto { .. } |
mir::TerminatorKind::SwitchInt { .. } |
mir::TerminatorKind::Resume |
mir::TerminatorKind::Return |
mir::TerminatorKind::Unreachable |
mir::TerminatorKind::Assert { .. } => {}
}
self.super_terminator_kind(block, kind, location);
}
}
fn is_drop_in_place_intrinsic<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
bare_fn_ty: ty::PolyFnSig<'tcx>)
-> bool {
(bare_fn_ty.abi() == Abi::RustIntrinsic ||
bare_fn_ty.abi() == Abi::PlatformIntrinsic) &&
tcx.item_name(def_id) == "drop_in_place"
fn visit_drop_use<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
ty: ty::Ty<'tcx>,
is_direct_call: bool,
output: &mut Vec<TransItem<'tcx>>)
{
let instance = monomorphize::resolve_drop_in_place(scx, ty);
visit_instance_use(scx, instance, is_direct_call, output);
}
fn visit_fn_use<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
ty: ty::Ty<'tcx>,
is_direct_call: bool,
output: &mut Vec<TransItem<'tcx>>)
{
if let ty::TyFnDef(def_id, substs, _) = ty.sty {
let instance = monomorphize::resolve(scx, def_id, substs);
visit_instance_use(scx, instance, is_direct_call, output);
}
}
fn visit_instance_use<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
instance: ty::Instance<'tcx>,
is_direct_call: bool,
output: &mut Vec<TransItem<'tcx>>)
{
debug!("visit_item_use({:?}, is_direct_call={:?})", instance, is_direct_call);
if !should_trans_locally(scx.tcx(), &instance) {
return
}
match instance.def {
ty::InstanceDef::Intrinsic(def_id) => {
if !is_direct_call {
bug!("intrinsic {:?} being reified", def_id);
}
}
ty::InstanceDef::Virtual(..) |
ty::InstanceDef::DropGlue(_, None) => {
// don't need to emit shim if we are calling directly.
if !is_direct_call {
output.push(create_fn_trans_item(instance));
}
}
ty::InstanceDef::DropGlue(_, Some(ty)) => {
match ty.sty {
ty::TyArray(ety, _) |
ty::TySlice(ety)
if is_direct_call =>
{
// drop of arrays/slices is translated in-line.
visit_drop_use(scx, ety, false, output);
}
_ => {}
};
output.push(create_fn_trans_item(instance));
}
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Item(..) |
ty::InstanceDef::FnPtrShim(..) => {
output.push(create_fn_trans_item(instance));
}
}
}
@ -694,295 +641,35 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
// Returns true if we should translate an instance in the local crate.
// Returns false if we can just link to the upstream crate and therefore don't
// need a translation item.
fn should_trans_locally<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
fn should_trans_locally<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: &Instance<'tcx>)
-> bool {
if let ty::TyFnDef(_, _, sig) = tcx.item_type(def_id).sty {
if let Some(adt_def) = sig.output().skip_binder().ty_adt_def() {
if adt_def.variants.iter().any(|v| def_id == v.did) {
// HACK: ADT constructors are translated in-place and
// do not have a trans-item.
return false;
}
}
}
if def_id.is_local() {
true
} else {
if tcx.sess.cstore.is_exported_symbol(def_id) ||
tcx.sess.cstore.is_foreign_item(def_id) {
// We can link to the item in question, no instance needed in this
// crate
false
} else {
if !tcx.sess.cstore.is_item_mir_available(def_id) {
bug!("Cannot create local trans-item for {:?}", def_id)
}
true
}
}
}
fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
dg: DropGlueKind<'tcx>,
output: &mut Vec<TransItem<'tcx>>) {
let ty = match dg {
DropGlueKind::Ty(ty) => ty,
DropGlueKind::TyContents(_) => {
// We already collected the neighbors of this item via the
// DropGlueKind::Ty variant.
return
}
let def_id = match instance.def {
ty::InstanceDef::Item(def_id) => def_id,
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Virtual(..) |
ty::InstanceDef::FnPtrShim(..) |
ty::InstanceDef::DropGlue(..) |
ty::InstanceDef::Intrinsic(_) => return true
};
debug!("find_drop_glue_neighbors: {}", type_to_string(scx.tcx(), ty));
// Make sure the BoxFreeFn lang-item gets translated if there is a boxed value.
if ty.is_box() {
let def_id = scx.tcx().require_lang_item(BoxFreeFnLangItem);
if should_trans_locally(scx.tcx(), def_id) {
let box_free_fn_trans_item =
create_fn_trans_item(scx,
def_id,
scx.tcx().mk_substs(iter::once(Kind::from(ty.boxed_ty()))),
scx.tcx().intern_substs(&[]));
output.push(box_free_fn_trans_item);
match tcx.hir.get_if_local(def_id) {
Some(hir_map::NodeForeignItem(..)) => {
false // foreign items are linked against, not translated.
}
}
// If the type implements Drop, also add a translation item for the
// monomorphized Drop::drop() implementation.
let destructor = match ty.sty {
ty::TyAdt(def, _) => def.destructor(scx.tcx()),
_ => None
};
if let (Some(destructor), false) = (destructor, ty.is_box()) {
use rustc::ty::ToPolyTraitRef;
let drop_trait_def_id = scx.tcx()
.lang_items
.drop_trait()
.unwrap();
let self_type_substs = scx.tcx().mk_substs_trait(ty, &[]);
let trait_ref = ty::TraitRef {
def_id: drop_trait_def_id,
substs: self_type_substs,
}.to_poly_trait_ref();
let substs = match fulfill_obligation(scx, DUMMY_SP, trait_ref) {
traits::VtableImpl(data) => data.substs,
_ => bug!()
};
if should_trans_locally(scx.tcx(), destructor.did) {
let trans_item = create_fn_trans_item(scx,
destructor.did,
substs,
scx.tcx().intern_substs(&[]));
output.push(trans_item);
}
// This type has a Drop implementation, we'll need the contents-only
// version of the glue too.
output.push(TransItem::DropGlue(DropGlueKind::TyContents(ty)));
}
// Finally add the types of nested values
match ty.sty {
ty::TyBool |
ty::TyChar |
ty::TyInt(_) |
ty::TyUint(_) |
ty::TyStr |
ty::TyFloat(_) |
ty::TyRawPtr(_) |
ty::TyRef(..) |
ty::TyFnDef(..) |
ty::TyFnPtr(_) |
ty::TyNever |
ty::TyDynamic(..) => {
/* nothing to do */
}
ty::TyAdt(def, _) if def.is_box() => {
let inner_type = glue::get_drop_glue_type(scx, ty.boxed_ty());
if scx.type_needs_drop(inner_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type)));
}
}
ty::TyAdt(def, substs) => {
for field in def.all_fields() {
let field_type = def_ty(scx, field.did, substs);
let field_type = glue::get_drop_glue_type(scx, field_type);
if scx.type_needs_drop(field_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(field_type)));
}
}
}
ty::TyClosure(def_id, substs) => {
for upvar_ty in substs.upvar_tys(def_id, scx.tcx()) {
let upvar_ty = glue::get_drop_glue_type(scx, upvar_ty);
if scx.type_needs_drop(upvar_ty) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(upvar_ty)));
}
}
}
ty::TySlice(inner_type) |
ty::TyArray(inner_type, _) => {
let inner_type = glue::get_drop_glue_type(scx, inner_type);
if scx.type_needs_drop(inner_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type)));
}
}
ty::TyTuple(args, _) => {
for arg in args {
let arg = glue::get_drop_glue_type(scx, arg);
if scx.type_needs_drop(arg) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(arg)));
}
}
}
ty::TyProjection(_) |
ty::TyParam(_) |
ty::TyInfer(_) |
ty::TyAnon(..) |
ty::TyError => {
bug!("encountered unexpected type");
}
}
}
fn do_static_dispatch<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
fn_def_id: DefId,
fn_substs: &'tcx Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>)
-> StaticDispatchResult<'tcx> {
debug!("do_static_dispatch(fn_def_id={}, fn_substs={:?}, param_substs={:?})",
def_id_to_string(scx.tcx(), fn_def_id),
fn_substs,
param_substs);
if let Some(trait_def_id) = scx.tcx().trait_of_item(fn_def_id) {
debug!(" => trait method, attempting to find impl");
do_static_trait_method_dispatch(scx,
&scx.tcx().associated_item(fn_def_id),
trait_def_id,
fn_substs,
param_substs)
} else {
debug!(" => regular function");
// The function is not part of an impl or trait, no dispatching
// to be done
StaticDispatchResult::Dispatched {
def_id: fn_def_id,
substs: fn_substs,
fn_once_adjustment: None,
}
}
}
enum StaticDispatchResult<'tcx> {
// The call could be resolved statically as going to the method with
// `def_id` and `substs`.
Dispatched {
def_id: DefId,
substs: &'tcx Substs<'tcx>,
// If this is a call to a closure that needs an FnOnce adjustment,
// this contains the new self type of the call (= type of the closure
// environment)
fn_once_adjustment: Option<ty::Ty<'tcx>>,
},
// This goes to somewhere that we don't know at compile-time
Unknown
}
// Given a trait-method and substitution information, find out the actual
// implementation of the trait method.
fn do_static_trait_method_dispatch<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
trait_method: &ty::AssociatedItem,
trait_id: DefId,
callee_substs: &'tcx Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>)
-> StaticDispatchResult<'tcx> {
let tcx = scx.tcx();
debug!("do_static_trait_method_dispatch(trait_method={}, \
trait_id={}, \
callee_substs={:?}, \
param_substs={:?}",
def_id_to_string(scx.tcx(), trait_method.def_id),
def_id_to_string(scx.tcx(), trait_id),
callee_substs,
param_substs);
let rcvr_substs = monomorphize::apply_param_substs(scx,
param_substs,
&callee_substs);
let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
let vtbl = fulfill_obligation(scx, DUMMY_SP, ty::Binder(trait_ref));
// Now that we know which impl is being used, we can dispatch to
// the actual function:
match vtbl {
traits::VtableImpl(impl_data) => {
let (def_id, substs) = traits::find_method(tcx,
trait_method.name,
rcvr_substs,
&impl_data);
StaticDispatchResult::Dispatched {
def_id: def_id,
substs: substs,
fn_once_adjustment: None,
}
}
traits::VtableClosure(closure_data) => {
let closure_def_id = closure_data.closure_def_id;
let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
let actual_closure_kind = tcx.closure_kind(closure_def_id);
let needs_fn_once_adapter_shim =
match needs_fn_once_adapter_shim(actual_closure_kind,
trait_closure_kind) {
Ok(true) => true,
_ => false,
};
let fn_once_adjustment = if needs_fn_once_adapter_shim {
Some(tcx.mk_closure_from_closure_substs(closure_def_id,
closure_data.substs))
Some(_) => true,
None => {
if tcx.sess.cstore.is_exported_symbol(def_id) ||
tcx.sess.cstore.is_foreign_item(def_id)
{
// We can link to the item in question, no instance needed
// in this crate
false
} else {
None
};
StaticDispatchResult::Dispatched {
def_id: closure_def_id,
substs: closure_data.substs.substs,
fn_once_adjustment: fn_once_adjustment,
if !tcx.sess.cstore.is_item_mir_available(def_id) {
bug!("Cannot create local trans-item for {:?}", def_id)
}
true
}
}
traits::VtableFnPointer(ref data) => {
// If we know the destination of this fn-pointer, we'll have to make
// sure that this destination actually gets instantiated.
if let ty::TyFnDef(def_id, substs, _) = data.fn_ty.sty {
// The destination of the pointer might be something that needs
// further dispatching, such as a trait method, so we do that.
do_static_dispatch(scx, def_id, substs, param_substs)
} else {
StaticDispatchResult::Unknown
}
}
// Trait object shims are always instantiated in-place, and as they are
// just an ABI-adjusting indirect call they do not have any dependencies.
traits::VtableObject(..) => {
StaticDispatchResult::Unknown
}
_ => {
bug!("static call to invalid vtable: {:?}", vtbl)
}
}
}
@ -1051,7 +738,8 @@ fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
&ty::TyAdt(target_adt_def, target_substs)) => {
assert_eq!(source_adt_def, target_adt_def);
let kind = custom_coerce_unsize_info(scx, source_ty, target_ty);
let kind =
monomorphize::custom_coerce_unsize_info(scx, source_ty, target_ty);
let coerce_index = match kind {
CustomCoerceUnsized::Struct(i) => i
@ -1075,28 +763,9 @@ fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
}
}
fn create_fn_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
def_id: DefId,
fn_substs: &'tcx Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>)
-> TransItem<'tcx> {
let tcx = scx.tcx();
debug!("create_fn_trans_item(def_id={}, fn_substs={:?}, param_substs={:?})",
def_id_to_string(tcx, def_id),
fn_substs,
param_substs);
// We only get here, if fn_def_id either designates a local item or
// an inlineable external item. Non-inlineable external items are
// ignored because we don't want to generate any code for them.
let concrete_substs = monomorphize::apply_param_substs(scx,
param_substs,
&fn_substs);
assert!(concrete_substs.is_normalized_for_trans(),
"concrete_substs not normalized for trans: {:?}",
concrete_substs);
TransItem::Fn(Instance::new(def_id, concrete_substs))
fn create_fn_trans_item<'a, 'tcx>(instance: Instance<'tcx>) -> TransItem<'tcx> {
debug!("create_fn_trans_item(instance={})", instance);
TransItem::Fn(instance)
}
/// Creates a `TransItem` for each method that is referenced by the vtable for
@ -1111,33 +780,18 @@ fn create_trans_items_for_vtable_methods<'a, 'tcx>(scx: &SharedCrateContext<'a,
if let ty::TyDynamic(ref trait_ty, ..) = trait_ty.sty {
if let Some(principal) = trait_ty.principal() {
let poly_trait_ref = principal.with_self_ty(scx.tcx(), impl_ty);
let param_substs = scx.tcx().intern_substs(&[]);
assert!(!poly_trait_ref.has_escaping_regions());
// Walk all methods of the trait, including those of its supertraits
let methods = traits::get_vtable_methods(scx.tcx(), poly_trait_ref);
let methods = methods.filter_map(|method| method)
.filter_map(|(def_id, substs)| {
if let StaticDispatchResult::Dispatched {
def_id,
substs,
// We already add the drop-glue for the closure env
// unconditionally below.
fn_once_adjustment: _ ,
} = do_static_dispatch(scx, def_id, substs, param_substs) {
Some((def_id, substs))
} else {
None
}
})
.filter(|&(def_id, _)| should_trans_locally(scx.tcx(), def_id))
.map(|(def_id, substs)| create_fn_trans_item(scx, def_id, substs, param_substs));
.map(|(def_id, substs)| monomorphize::resolve(scx, def_id, substs))
.filter(|&instance| should_trans_locally(scx.tcx(), &instance))
.map(|instance| create_fn_trans_item(instance));
output.extend(methods);
}
// Also add the destructor
let dg_type = glue::get_drop_glue_type(scx, impl_ty);
output.push(TransItem::DropGlue(DropGlueKind::Ty(dg_type)));
visit_drop_use(scx, impl_ty, false, output);
}
}
@ -1182,8 +836,7 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> {
def_id_to_string(self.scx.tcx(), def_id));
let ty = def_ty(self.scx, def_id, Substs::empty());
let ty = glue::get_drop_glue_type(self.scx, ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
visit_drop_use(self.scx, ty, true, self.output);
}
}
}
@ -1204,7 +857,7 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> {
debug!("RootCollector: ItemFn({})",
def_id_to_string(self.scx.tcx(), def_id));
let instance = Instance::mono(self.scx, def_id);
let instance = Instance::mono(self.scx.tcx(), def_id);
self.output.push(TransItem::Fn(instance));
}
}
@ -1242,7 +895,7 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> {
debug!("RootCollector: MethodImplItem({})",
def_id_to_string(self.scx.tcx(), def_id));
let instance = Instance::mono(self.scx, def_id);
let instance = Instance::mono(self.scx.tcx(), def_id);
self.output.push(TransItem::Fn(instance));
}
}
@ -1285,33 +938,17 @@ fn create_trans_items_for_default_impls<'a, 'tcx>(scx: &SharedCrateContext<'a, '
continue;
}
// The substitutions we have are on the impl, so we grab
// the method type from the impl to substitute into.
let impl_substs = Substs::for_item(tcx, impl_def_id,
|_, _| tcx.mk_region(ty::ReErased),
|_, _| tcx.types.err);
let impl_data = traits::VtableImplData {
impl_def_id: impl_def_id,
substs: impl_substs,
nested: vec![]
};
let (def_id, substs) = traits::find_method(tcx,
method.name,
callee_substs,
&impl_data);
let instance =
monomorphize::resolve(scx, method.def_id, callee_substs);
let predicates = tcx.item_predicates(def_id).predicates
.subst(tcx, substs);
let predicates = tcx.item_predicates(instance.def_id()).predicates
.subst(tcx, instance.substs);
if !traits::normalize_and_test_predicates(tcx, predicates) {
continue;
}
if should_trans_locally(tcx, method.def_id) {
let item = create_fn_trans_item(scx,
method.def_id,
callee_substs,
tcx.erase_regions(&substs));
output.push(item);
if should_trans_locally(tcx, &instance) {
output.push(create_fn_trans_item(instance));
}
}
}
@ -1327,7 +964,7 @@ fn collect_neighbours<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
instance: Instance<'tcx>,
output: &mut Vec<TransItem<'tcx>>)
{
let mir = scx.tcx().item_mir(instance.def);
let mir = scx.tcx().instance_mir(instance.def);
let mut visitor = MirNeighborCollector {
scx: scx,
@ -1351,12 +988,3 @@ fn def_id_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
printer.push_def_path(def_id, &mut output);
output
}
fn type_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: ty::Ty<'tcx>)
-> String {
let mut output = String::new();
let printer = DefPathBasedNames::new(tcx, false, false);
printer.push_type_name(ty, &mut output);
output
}

View File

@ -17,7 +17,6 @@ use llvm::{ValueRef, ContextRef, TypeKind};
use llvm::{True, False, Bool, OperandBundleDef};
use rustc::hir::def_id::DefId;
use rustc::hir::map::DefPathData;
use rustc::util::common::MemoizationMap;
use middle::lang_items::LangItem;
use base;
use builder::Builder;
@ -30,13 +29,12 @@ use value::Value;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::Layout;
use rustc::ty::subst::{Subst, Substs};
use rustc::traits::{self, SelectionContext, Reveal};
use rustc::hir;
use libc::{c_uint, c_char};
use std::iter;
use syntax::ast;
use syntax::attr;
use syntax::symbol::InternedString;
use syntax_pos::Span;
@ -426,73 +424,6 @@ pub fn is_null(val: ValueRef) -> bool {
}
}
/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
/// guarantee to us that all nested obligations *could be* resolved if we wanted to.
pub fn fulfill_obligation<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
span: Span,
trait_ref: ty::PolyTraitRef<'tcx>)
-> traits::Vtable<'tcx, ()>
{
let tcx = scx.tcx();
// Remove any references to regions; this helps improve caching.
let trait_ref = tcx.erase_regions(&trait_ref);
scx.trait_cache().memoize(trait_ref, || {
debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
trait_ref, trait_ref.def_id());
// Do the initial selection for the obligation. This yields the
// shallow result we are looking for -- that is, what specific impl.
tcx.infer_ctxt((), Reveal::All).enter(|infcx| {
let mut selcx = SelectionContext::new(&infcx);
let obligation_cause = traits::ObligationCause::misc(span,
ast::DUMMY_NODE_ID);
let obligation = traits::Obligation::new(obligation_cause,
trait_ref.to_poly_trait_predicate());
let selection = match selcx.select(&obligation) {
Ok(Some(selection)) => selection,
Ok(None) => {
// Ambiguity can happen when monomorphizing during trans
// expands to some humongo type that never occurred
// statically -- this humongo type can then overflow,
// leading to an ambiguous result. So report this as an
// overflow bug, since I believe this is the only case
// where ambiguity can result.
debug!("Encountered ambiguity selecting `{:?}` during trans, \
presuming due to overflow",
trait_ref);
tcx.sess.span_fatal(span,
"reached the recursion limit during monomorphization \
(selection ambiguity)");
}
Err(e) => {
span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans",
e, trait_ref)
}
};
debug!("fulfill_obligation: selection={:?}", selection);
// Currently, we use a fulfillment context to completely resolve
// all nested obligations. This is because they can inform the
// inference of the impl's type parameters.
let mut fulfill_cx = traits::FulfillmentContext::new();
let vtable = selection.map(|predicate| {
debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate);
fulfill_cx.register_predicate_obligation(&infcx, predicate);
});
let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
vtable
})
})
}
pub fn langcall(tcx: TyCtxt,
span: Option<Span>,
msg: &str,
@ -601,8 +532,31 @@ pub fn ty_fn_sig<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
}
pub fn is_closure(tcx: TyCtxt, def_id: DefId) -> bool {
tcx.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr
pub fn requests_inline<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
instance: &ty::Instance<'tcx>
) -> bool {
if is_inline_instance(tcx, instance) {
return true
}
attr::requests_inline(&instance.def.attrs(tcx)[..])
}
pub fn is_inline_instance<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
instance: &ty::Instance<'tcx>
) -> bool {
let def_id = match instance.def {
ty::InstanceDef::Item(def_id) => def_id,
ty::InstanceDef::DropGlue(_, Some(_)) => return false,
_ => return true
};
match tcx.def_key(def_id).disambiguated_data.data {
DefPathData::StructCtor |
DefPathData::EnumVariant(..) |
DefPathData::ClosureExpr => true,
_ => false
}
}
/// Given a DefId and some Substs, produces the monomorphic item type.
@ -614,3 +568,12 @@ pub fn def_ty<'a, 'tcx>(shared: &SharedCrateContext<'a, 'tcx>,
let ty = shared.tcx().item_type(def_id);
monomorphize::apply_param_substs(shared, substs, &ty)
}
/// Return the substituted type of an instance.
pub fn instance_ty<'a, 'tcx>(shared: &SharedCrateContext<'a, 'tcx>,
instance: &ty::Instance<'tcx>)
-> Ty<'tcx>
{
let ty = instance.def.def_ty(shared.tcx());
monomorphize::apply_param_substs(shared, instance.substs, &ty)
}

View File

@ -9,6 +9,7 @@
// except according to those terms.
use back::symbol_names;
use llvm;
use llvm::{SetUnnamedAddr};
use llvm::{ValueRef, True};
@ -24,7 +25,6 @@ use monomorphize::Instance;
use type_::Type;
use type_of;
use rustc::ty;
use rustc::ty::subst::Substs;
use rustc::hir;
@ -80,12 +80,12 @@ pub fn addr_of(ccx: &CrateContext,
}
pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef {
let instance = Instance::mono(ccx.shared(), def_id);
let instance = Instance::mono(ccx.tcx(), def_id);
if let Some(&g) = ccx.instances().borrow().get(&instance) {
return g;
}
let ty = common::def_ty(ccx.shared(), def_id, Substs::empty());
let ty = common::instance_ty(ccx.shared(), &instance);
let g = if let Some(id) = ccx.tcx().hir.as_local_node_id(def_id) {
let llty = type_of::type_of(ccx, ty);
@ -114,7 +114,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef {
hir_map::NodeForeignItem(&hir::ForeignItem {
ref attrs, span, node: hir::ForeignItemStatic(..), ..
}) => {
let sym = instance.symbol_name(ccx.shared());
let sym = symbol_names::symbol_name(instance, ccx.shared());
let g = if let Some(name) =
attr::first_attr_value_str_by_name(&attrs, "linkage") {
// If this is a static with a linkage specified, then we need to handle
@ -174,7 +174,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef {
g
} else {
let sym = instance.symbol_name(ccx.shared());
let sym = symbol_names::symbol_name(instance, ccx.shared());
// FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
// FIXME(nagisa): investigate whether it can be changed into define_global
@ -235,7 +235,8 @@ pub fn trans_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
v
};
let ty = common::def_ty(ccx.shared(), def_id, Substs::empty());
let instance = Instance::mono(ccx.tcx(), def_id);
let ty = common::instance_ty(ccx.shared(), &instance);
let llty = type_of::type_of(ccx, ty);
let g = if val_llty == llty {
g

View File

@ -18,10 +18,9 @@ use rustc::hir::def::ExportMap;
use rustc::hir::def_id::DefId;
use rustc::traits;
use debuginfo;
use callee::Callee;
use callee;
use base;
use declare;
use glue::DropGlueKind;
use monomorphize::Instance;
use partitioning::CodegenUnit;
@ -46,7 +45,7 @@ use std::str;
use syntax::ast;
use syntax::symbol::InternedString;
use syntax_pos::DUMMY_SP;
use abi::{Abi, FnType};
use abi::Abi;
pub struct Stats {
pub n_glues_created: Cell<usize>,
@ -94,8 +93,6 @@ pub struct LocalCrateContext<'tcx> {
previous_work_product: Option<WorkProduct>,
codegen_unit: CodegenUnit<'tcx>,
needs_unwind_cleanup_cache: RefCell<FxHashMap<Ty<'tcx>, bool>>,
fn_pointer_shims: RefCell<FxHashMap<Ty<'tcx>, ValueRef>>,
drop_glues: RefCell<FxHashMap<DropGlueKind<'tcx>, (ValueRef, FnType)>>,
/// Cache instances of monomorphic and polymorphic items
instances: RefCell<FxHashMap<Instance<'tcx>, ValueRef>>,
/// Cache generated vtables
@ -546,16 +543,6 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> {
&self.translation_items
}
/// Given the def-id of some item that has no type parameters, make
/// a suitable "empty substs" for it.
pub fn empty_substs_for_def_id(&self, item_def_id: DefId) -> &'tcx Substs<'tcx> {
Substs::for_item(self.tcx(), item_def_id,
|_, _| self.tcx().mk_region(ty::ReErased),
|_, _| {
bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id)
})
}
pub fn metadata_symbol_name(&self) -> String {
format!("rust_metadata_{}_{}",
self.link_meta().crate_name,
@ -597,8 +584,6 @@ impl<'tcx> LocalCrateContext<'tcx> {
previous_work_product: previous_work_product,
codegen_unit: codegen_unit,
needs_unwind_cleanup_cache: RefCell::new(FxHashMap()),
fn_pointer_shims: RefCell::new(FxHashMap()),
drop_glues: RefCell::new(FxHashMap()),
instances: RefCell::new(FxHashMap()),
vtables: RefCell::new(FxHashMap()),
const_cstr_cache: RefCell::new(FxHashMap()),
@ -733,15 +718,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
&self.local().needs_unwind_cleanup_cache
}
pub fn fn_pointer_shims(&self) -> &RefCell<FxHashMap<Ty<'tcx>, ValueRef>> {
&self.local().fn_pointer_shims
}
pub fn drop_glues<'a>(&'a self)
-> &'a RefCell<FxHashMap<DropGlueKind<'tcx>, (ValueRef, FnType)>> {
&self.local().drop_glues
}
pub fn instances<'a>(&'a self) -> &'a RefCell<FxHashMap<Instance<'tcx>, ValueRef>> {
&self.local().instances
}
@ -886,7 +862,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
/// Given the def-id of some item that has no type parameters, make
/// a suitable "empty substs" for it.
pub fn empty_substs_for_def_id(&self, item_def_id: DefId) -> &'tcx Substs<'tcx> {
self.shared().empty_substs_for_def_id(item_def_id)
self.tcx().empty_substs_for_def_id(item_def_id)
}
/// Generate a new symbol name with the given prefix. This symbol name must
@ -930,7 +906,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
let tcx = self.tcx();
let llfn = match tcx.lang_items.eh_personality() {
Some(def_id) if !base::wants_msvc_seh(self.sess()) => {
Callee::def(self, def_id, tcx.intern_substs(&[])).reify(self)
callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]))
}
_ => {
let name = if base::wants_msvc_seh(self.sess()) {
@ -958,7 +934,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
let tcx = self.tcx();
assert!(self.sess().target.target.options.custom_unwind_resume);
if let Some(def_id) = tcx.lang_items.eh_unwind_resume() {
let llfn = Callee::def(self, def_id, tcx.intern_substs(&[])).reify(self);
let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]));
unwresume.set(Some(llfn));
return llfn;
}

View File

@ -205,7 +205,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
return FunctionDebugContext::DebugInfoDisabled;
}
for attr in cx.tcx().get_attrs(instance.def).iter() {
for attr in instance.def.attrs(cx.tcx()).iter() {
if attr.check_name("no_debug") {
return FunctionDebugContext::FunctionWithoutDebugInfo;
}
@ -229,11 +229,11 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
};
// Find the enclosing function, in case this is a closure.
let def_key = cx.tcx().def_key(instance.def);
let def_key = cx.tcx().def_key(instance.def_id());
let mut name = def_key.disambiguated_data.data.to_string();
let name_len = name.len();
let fn_def_id = cx.tcx().closure_base_def_id(instance.def);
let fn_def_id = cx.tcx().closure_base_def_id(instance.def_id());
// Get_template_parameters() will append a `<...>` clause to the function
// name if necessary.
@ -246,11 +246,11 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
&mut name);
// Build the linkage_name out of the item path and "template" parameters.
let linkage_name = mangled_name_of_item(cx, instance.def, &name[name_len..]);
let linkage_name = mangled_name_of_item(cx, instance.def_id(), &name[name_len..]);
let scope_line = span_start(cx, span).line;
let local_id = cx.tcx().hir.as_local_node_id(instance.def);
let local_id = cx.tcx().hir.as_local_node_id(instance.def_id());
let is_local_to_unit = local_id.map_or(false, |id| is_node_local_to_unit(cx, id));
let function_name = CString::new(name).unwrap();
@ -394,7 +394,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
// First, let's see if this is a method within an inherent impl. Because
// if yes, we want to make the result subroutine DIE a child of the
// subroutine's self-type.
let self_type = cx.tcx().impl_of_method(instance.def).and_then(|impl_def_id| {
let self_type = cx.tcx().impl_of_method(instance.def_id()).and_then(|impl_def_id| {
// If the method does *not* belong to a trait, proceed
if cx.tcx().trait_id_of_impl(impl_def_id).is_none() {
let impl_self_ty =
@ -417,9 +417,9 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
self_type.unwrap_or_else(|| {
namespace::item_namespace(cx, DefId {
krate: instance.def.krate,
krate: instance.def_id().krate,
index: cx.tcx()
.def_key(instance.def)
.def_key(instance.def_id())
.parent
.expect("get_containing_scope: missing parent?")
})

View File

@ -13,70 +13,35 @@
// Code relating to drop glue.
use std;
use std::iter;
use llvm;
use llvm::{ValueRef, get_param};
use middle::lang_items::BoxFreeFnLangItem;
use rustc::ty::subst::{Substs};
use llvm::{ValueRef};
use rustc::traits;
use rustc::ty::{self, layout, AdtDef, AdtKind, Ty, TypeFoldable};
use rustc::ty::subst::Kind;
use rustc::mir::tcx::LvalueTy;
use mir::lvalue::LvalueRef;
use adt;
use base::*;
use callee::Callee;
use cleanup::CleanupScope;
use rustc::ty::{self, Ty, TypeFoldable};
use common::*;
use machine::*;
use meth;
use monomorphize;
use trans_item::TransItem;
use tvec;
use type_of::{type_of, sizing_type_of, align_of};
use type_::Type;
use type_of::{sizing_type_of, align_of};
use value::Value;
use Disr;
use builder::Builder;
use syntax_pos::DUMMY_SP;
use mir::lvalue::Alignment;
pub fn trans_exchange_free_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) {
let content_ty = ptr.ty.to_ty(bcx.tcx());
let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem);
let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty)));
let callee = Callee::def(bcx.ccx, def_id, substs);
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
let llret = bcx.call(callee.reify(bcx.ccx),
&[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize], None);
fn_ty.apply_attrs_callsite(llret);
}
pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
pub fn needs_drop_glue<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx>) -> bool {
assert!(t.is_normalized_for_trans());
let t = scx.tcx().erase_regions(&t);
// Even if there is no dtor for t, there might be one deeper down and we
// might need to pass in the vtable ptr.
if !scx.type_is_sized(t) {
return t;
}
// FIXME (#22815): note that type_needs_drop conservatively
// approximates in some cases and may say a type expression
// requires drop glue when it actually does not.
//
// (In this case it is not clear whether any harm is done, i.e.
// erroneously returning `t` in some cases where we could have
// returned `tcx.types.i8` does not appear unsound. The impact on
// erroneously returning `true` in some cases where we could have
// returned `false` does not appear unsound. The impact on
// code quality is unknown at this time.)
if !scx.type_needs_drop(t) {
return scx.tcx().types.i8;
return false;
}
match t.sty {
ty::TyAdt(def, _) if def.is_box() => {
@ -86,215 +51,19 @@ pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'t
let layout = t.layout(&infcx).unwrap();
if layout.size(&scx.tcx().data_layout).bytes() == 0 {
// `Box<ZeroSizeType>` does not allocate.
scx.tcx().types.i8
false
} else {
t
true
}
})
} else {
t
true
}
}
_ => t
_ => true
}
}
fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: LvalueRef<'tcx>) {
call_drop_glue(bcx, args, false, None)
}
pub fn call_drop_glue<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
mut args: LvalueRef<'tcx>,
skip_dtor: bool,
funclet: Option<&'a Funclet>,
) {
let t = args.ty.to_ty(bcx.tcx());
// NB: v is an *alias* of type t here, not a direct value.
debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor);
if bcx.ccx.shared().type_needs_drop(t) {
let ccx = bcx.ccx;
let g = if skip_dtor {
DropGlueKind::TyContents(t)
} else {
DropGlueKind::Ty(t)
};
let glue = get_drop_glue_core(ccx, g);
let glue_type = get_drop_glue_type(ccx.shared(), t);
if glue_type != t {
args.llval = bcx.pointercast(args.llval, type_of(ccx, glue_type).ptr_to());
}
// No drop-hint ==> call standard drop glue
bcx.call(glue, &[args.llval, args.llextra][..1 + args.has_extra() as usize],
funclet.map(|b| b.bundle()));
}
}
pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
get_drop_glue_core(ccx, DropGlueKind::Ty(t))
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum DropGlueKind<'tcx> {
/// The normal path; runs the dtor, and then recurs on the contents
Ty(Ty<'tcx>),
/// Skips the dtor, if any, for ty; drops the contents directly.
/// Note that the dtor is only skipped at the most *shallow*
/// level, namely, an `impl Drop for Ty` itself. So, for example,
/// if Ty is Newtype(S) then only the Drop impl for Newtype itself
/// will be skipped, while the Drop impl for S, if any, will be
/// invoked.
TyContents(Ty<'tcx>),
}
impl<'tcx> DropGlueKind<'tcx> {
pub fn ty(&self) -> Ty<'tcx> {
match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
}
pub fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
{
match *self {
DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
}
}
}
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) -> ValueRef {
let g = g.map_ty(|t| get_drop_glue_type(ccx.shared(), t));
match ccx.drop_glues().borrow().get(&g) {
Some(&(glue, _)) => glue,
None => {
bug!("Could not find drop glue for {:?} -- {} -- {}.",
g,
TransItem::DropGlue(g).to_raw_string(),
ccx.codegen_unit().name());
}
}
}
pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) {
assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty()));
let (llfn, _) = ccx.drop_glues().borrow().get(&g).unwrap().clone();
let mut bcx = Builder::new_block(ccx, llfn, "entry-block");
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
// passed by value.
//
// llfn is expected be declared to take a parameter of the appropriate
// type, so we don't need to explicitly cast the function parameter.
// NB: v0 is an *alias* of type t here, not a direct value.
// Only drop the value when it ... well, we used to check for
// non-null, (and maybe we need to continue doing so), but we now
// must definitely check for special bit-patterns corresponding to
// the special dtor markings.
let t = g.ty();
let value = get_param(llfn, 0);
let ptr = if ccx.shared().type_is_sized(t) {
LvalueRef::new_sized_ty(value, t, Alignment::AbiAligned)
} else {
LvalueRef::new_unsized_ty(value, get_param(llfn, 1), t, Alignment::AbiAligned)
};
let skip_dtor = match g {
DropGlueKind::Ty(_) => false,
DropGlueKind::TyContents(_) => true
};
let bcx = match t.sty {
ty::TyAdt(def, _) if def.is_box() => {
// Support for Box is built-in as yet and its drop glue is special
// despite having a dummy Drop impl in the library.
assert!(!skip_dtor);
let content_ty = t.boxed_ty();
let ptr = if !bcx.ccx.shared().type_is_sized(content_ty) {
let llbox = bcx.load(get_dataptr(&bcx, ptr.llval), None);
let info = bcx.load(get_meta(&bcx, ptr.llval), None);
LvalueRef::new_unsized_ty(llbox, info, content_ty, Alignment::AbiAligned)
} else {
LvalueRef::new_sized_ty(
bcx.load(ptr.llval, None),
content_ty, Alignment::AbiAligned)
};
drop_ty(&bcx, ptr);
trans_exchange_free_ty(&bcx, ptr);
bcx
}
ty::TyDynamic(..) => {
// No support in vtable for distinguishing destroying with
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
assert!(!skip_dtor);
let dtor = bcx.load(ptr.llextra, None);
bcx.call(dtor, &[ptr.llval], None);
bcx
}
ty::TyAdt(def, ..) if def.has_dtor(bcx.tcx()) && !skip_dtor => {
let shallow_drop = def.is_union();
let tcx = bcx.tcx();
let def = t.ty_adt_def().unwrap();
// Be sure to put the contents into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
// destructors if the user destructor panics.
//
// FIXME (#14875) panic-in-drop semantics might be unsupported; we
// might well consider changing below to more direct code.
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
let contents_scope = if !shallow_drop {
CleanupScope::schedule_drop_adt_contents(&bcx, ptr)
} else {
CleanupScope::noop()
};
let trait_ref = ty::Binder(ty::TraitRef {
def_id: tcx.lang_items.drop_trait().unwrap(),
substs: tcx.mk_substs_trait(t, &[])
});
let vtbl = match fulfill_obligation(bcx.ccx.shared(), DUMMY_SP, trait_ref) {
traits::VtableImpl(data) => data,
_ => bug!("dtor for {:?} is not an impl???", t)
};
let dtor_did = def.destructor(tcx).unwrap().did;
let callee = Callee::def(bcx.ccx, dtor_did, vtbl.substs);
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
let llret;
let args = &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize];
if let Some(landing_pad) = contents_scope.landing_pad {
let normal_bcx = bcx.build_sibling_block("normal-return");
llret = bcx.invoke(callee.reify(ccx), args, normal_bcx.llbb(), landing_pad, None);
bcx = normal_bcx;
} else {
llret = bcx.call(callee.reify(bcx.ccx), args, None);
}
fn_ty.apply_attrs_callsite(llret);
contents_scope.trans(&bcx);
bcx
}
ty::TyAdt(def, ..) if def.is_union() => {
bcx
}
_ => {
if bcx.ccx.shared().type_needs_drop(t) {
drop_structural_ty(bcx, ptr)
} else {
bcx
}
}
};
bcx.ret_void();
}
pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {:?}",
@ -381,20 +150,8 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
(size, align)
}
ty::TyDynamic(..) => {
// info points to the vtable and the second entry in the vtable is the
// dynamic size of the object.
let info = bcx.pointercast(info, Type::int(bcx.ccx).ptr_to());
let size_ptr = bcx.gepi(info, &[1]);
let align_ptr = bcx.gepi(info, &[2]);
let size = bcx.load(size_ptr, None);
let align = bcx.load(align_ptr, None);
// Vtable loads are invariant
bcx.set_invariant_load(size);
bcx.set_invariant_load(align);
(size, align)
// load size/align from vtable
(meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info))
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx());
@ -409,141 +166,3 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
_ => bug!("Unexpected unsized type, found {}", t)
}
}
// Iterates through the elements of a structural type, dropping them.
fn drop_structural_ty<'a, 'tcx>(
cx: Builder<'a, 'tcx>,
mut ptr: LvalueRef<'tcx>
) -> Builder<'a, 'tcx> {
fn iter_variant_fields<'a, 'tcx>(
cx: &'a Builder<'a, 'tcx>,
av: LvalueRef<'tcx>,
adt_def: &'tcx AdtDef,
variant_index: usize,
substs: &'tcx Substs<'tcx>
) {
let variant = &adt_def.variants[variant_index];
let tcx = cx.tcx();
for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field);
let (field_ptr, align) = av.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg, align));
}
}
let mut cx = cx;
let t = ptr.ty.to_ty(cx.tcx());
match t.sty {
ty::TyClosure(def_id, substs) => {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
let (llupvar, align) = ptr.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty, align));
}
}
ty::TyArray(_, n) => {
let base = get_dataptr(&cx, ptr.llval);
let len = C_uint(cx.ccx, n);
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, base, unit_ty, len,
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty, ptr.alignment)));
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, ptr.llval, unit_ty, ptr.llextra,
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty, ptr.alignment)));
}
ty::TyTuple(ref args, _) => {
for (i, arg) in args.iter().enumerate() {
let (llfld_a, align) = ptr.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg, align));
}
}
ty::TyAdt(adt, substs) => match adt.adt_kind() {
AdtKind::Struct => {
for (i, field) in adt.variants[0].fields.iter().enumerate() {
let field_ty = monomorphize::field_ty(cx.tcx(), substs, field);
let (llval, align) = ptr.trans_field_ptr(&cx, i);
let field_ptr = if cx.ccx.shared().type_is_sized(field_ty) {
LvalueRef::new_sized_ty(llval, field_ty, align)
} else {
LvalueRef::new_unsized_ty(llval, ptr.llextra, field_ty, align)
};
drop_ty(&cx, field_ptr);
}
}
AdtKind::Union => {
bug!("Union in `glue::drop_structural_ty`");
}
AdtKind::Enum => {
let n_variants = adt.variants.len();
// NB: we must hit the discriminant first so that structural
// comparison know not to proceed when the discriminants differ.
// Obtain a representation of the discriminant sufficient to translate
// destructuring; this may or may not involve the actual discriminant.
let l = cx.ccx.layout_of(t);
match *l {
layout::Univariant { .. } |
layout::UntaggedUnion { .. } => {
if n_variants != 0 {
assert!(n_variants == 1);
ptr.ty = LvalueTy::Downcast {
adt_def: adt,
substs: substs,
variant_index: 0,
};
iter_variant_fields(&cx, ptr, &adt, 0, substs);
}
}
layout::CEnum { .. } |
layout::General { .. } |
layout::RawNullablePointer { .. } |
layout::StructWrappedNullablePointer { .. } => {
let lldiscrim_a = adt::trans_get_discr(
&cx, t, ptr.llval, ptr.alignment, None, false);
// Create a fall-through basic block for the "else" case of
// the switch instruction we're about to generate. Note that
// we do **not** use an Unreachable instruction here, even
// though most of the time this basic block will never be hit.
//
// When an enum is dropped it's contents are currently
// overwritten to DTOR_DONE, which means the discriminant
// could have changed value to something not within the actual
// range of the discriminant. Currently this function is only
// used for drop glue so in this case we just return quickly
// from the outer function, and any other use case will only
// call this for an already-valid enum in which case the `ret
// void` will never be hit.
let ret_void_cx = cx.build_sibling_block("enum-iter-ret-void");
ret_void_cx.ret_void();
let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants);
let next_cx = cx.build_sibling_block("enum-iter-next");
for (i, discr) in adt.discriminants(cx.tcx()).enumerate() {
let variant_cx_name = format!("enum-iter-variant-{}", i);
let variant_cx = cx.build_sibling_block(&variant_cx_name);
let case_val = adt::trans_case(&cx, t, Disr::from(discr));
variant_cx.add_case(llswitch, case_val, variant_cx.llbb());
ptr.ty = LvalueTy::Downcast {
adt_def: adt,
substs: substs,
variant_index: i,
};
iter_variant_fields(&variant_cx, ptr, &adt, i, substs);
variant_cx.br(next_cx.llbb());
}
cx = next_cx;
}
_ => bug!("{} is not an enum.", t),
}
}
},
_ => {
cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
}
}
return cx;
}

View File

@ -28,6 +28,7 @@
#![feature(box_syntax)]
#![feature(const_fn)]
#![feature(custom_attribute)]
#![cfg_attr(stage0, feature(field_init_shorthand))]
#![allow(unused_attributes)]
#![feature(i128_type)]
#![feature(libc)]
@ -112,7 +113,6 @@ mod cabi_x86;
mod cabi_x86_64;
mod cabi_x86_win64;
mod callee;
mod cleanup;
mod collector;
mod common;
mod consts;

View File

@ -8,105 +8,51 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use attributes;
use llvm::{ValueRef, get_params};
use llvm::ValueRef;
use rustc::traits;
use callee::{Callee, CalleeData};
use callee;
use common::*;
use builder::Builder;
use consts;
use declare;
use glue;
use machine;
use monomorphize::Instance;
use monomorphize;
use type_::Type;
use type_of::*;
use value::Value;
use rustc::ty;
// drop_glue pointer, size, align.
const VTABLE_OFFSET: usize = 3;
#[derive(Copy, Clone, Debug)]
pub struct VirtualIndex(usize);
/// Extracts a method from a trait object's vtable, at the specified index.
pub fn get_virtual_method<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
llvtable: ValueRef,
vtable_index: usize) -> ValueRef {
// Load the data pointer from the object.
debug!("get_virtual_method(vtable_index={}, llvtable={:?})",
vtable_index, Value(llvtable));
pub const DESTRUCTOR: VirtualIndex = VirtualIndex(0);
pub const SIZE: VirtualIndex = VirtualIndex(1);
pub const ALIGN: VirtualIndex = VirtualIndex(2);
let ptr = bcx.load_nonnull(bcx.gepi(llvtable, &[vtable_index + VTABLE_OFFSET]), None);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
ptr
}
/// Generate a shim function that allows an object type like `SomeTrait` to
/// implement the type `SomeTrait`. Imagine a trait definition:
///
/// trait SomeTrait { fn get(&self) -> i32; ... }
///
/// And a generic bit of code:
///
/// fn foo<T:SomeTrait>(t: &T) {
/// let x = SomeTrait::get;
/// x(t)
/// }
///
/// What is the value of `x` when `foo` is invoked with `T=SomeTrait`?
/// The answer is that it is a shim function generated by this routine:
///
/// fn shim(t: &SomeTrait) -> i32 {
/// // ... call t.get() virtually ...
/// }
///
/// In fact, all virtual calls can be thought of as normal trait calls
/// that go through this shim function.
pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
callee: Callee<'tcx>)
-> ValueRef {
debug!("trans_object_shim({:?})", callee);
let function_name = match callee.ty.sty {
ty::TyFnDef(def_id, substs, _) => {
let instance = Instance::new(def_id, substs);
instance.symbol_name(ccx.shared())
}
_ => bug!()
};
let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty);
attributes::set_frame_pointer_elimination(ccx, llfn);
let bcx = Builder::new_block(ccx, llfn, "entry-block");
let mut llargs = get_params(llfn);
let fn_ret = callee.ty.fn_ret();
let fn_ty = callee.direct_fn_type(ccx, &[]);
let fn_ptr = match callee.data {
CalleeData::Virtual(idx) => {
let fn_ptr = get_virtual_method(&bcx,
llargs.remove(fn_ty.ret.is_indirect() as usize + 1), idx);
let llty = fn_ty.llvm_type(bcx.ccx).ptr_to();
bcx.pointercast(fn_ptr, llty)
},
_ => bug!("trans_object_shim called with non-virtual callee"),
};
let llret = bcx.call(fn_ptr, &llargs, None);
fn_ty.apply_attrs_callsite(llret);
if fn_ret.0.is_never() {
bcx.unreachable();
} else {
if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() {
bcx.ret_void();
} else {
bcx.ret(llret);
}
impl<'a, 'tcx> VirtualIndex {
pub fn from_index(index: usize) -> Self {
VirtualIndex(index + 3)
}
llfn
pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef {
// Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", Value(llvtable), self);
let ptr = bcx.load_nonnull(bcx.gepi(llvtable, &[self.0]), None);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
ptr
}
pub fn get_usize(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef {
// Load the data pointer from the object.
debug!("get_int({:?}, {:?})", Value(llvtable), self);
let llvtable = bcx.pointercast(llvtable, Type::int(bcx.ccx).ptr_to());
let ptr = bcx.load(bcx.gepi(llvtable, &[self.0]), None);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
ptr
}
}
/// Creates a dynamic vtable for the given type and vtable origin.
@ -139,8 +85,7 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let align = align_of(ccx, ty);
let mut components: Vec<_> = [
// Generate a destructor for the vtable.
glue::get_drop_glue(ccx, ty),
callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.shared(), ty)),
C_uint(ccx, size),
C_uint(ccx, align)
].iter().cloned().collect();
@ -149,7 +94,7 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let trait_ref = trait_ref.with_self_ty(tcx, ty);
let methods = traits::get_vtable_methods(tcx, trait_ref).map(|opt_mth| {
opt_mth.map_or(nullptr, |(def_id, substs)| {
Callee::def(ccx, def_id, substs).reify(ccx)
callee::resolve_and_get_fn(ccx, def_id, substs)
})
});
components.extend(methods);

View File

@ -16,15 +16,16 @@ use rustc::ty::{self, layout, TypeFoldable};
use rustc::mir;
use abi::{Abi, FnType, ArgType};
use base::{self, Lifetime};
use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual};
use callee;
use builder::Builder;
use common::{self, Funclet};
use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef};
use common::{C_bool, C_str_slice, C_struct, C_u32, C_uint, C_undef};
use consts;
use machine::llalign_of_min;
use meth;
use monomorphize;
use tvec;
use type_of::{self, align_of};
use glue;
use type_::Type;
use rustc_data_structures::indexed_vec::IndexVec;
@ -208,21 +209,49 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::TerminatorKind::Drop { ref location, target, unwind } => {
let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx());
let ty = self.monomorphize(&ty);
let drop_fn = monomorphize::resolve_drop_in_place(bcx.ccx.shared(), ty);
// Double check for necessity to drop
if !bcx.ccx.shared().type_needs_drop(ty) {
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
// we don't actually need to drop anything.
funclet_br(self, bcx, target);
return;
return
}
let mut lvalue = self.trans_lvalue(&bcx, location);
let drop_fn = glue::get_drop_glue(bcx.ccx, ty);
let drop_ty = glue::get_drop_glue_type(bcx.ccx.shared(), ty);
if bcx.ccx.shared().type_is_sized(ty) && drop_ty != ty {
lvalue.llval = bcx.pointercast(
lvalue.llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to());
}
let args = &[lvalue.llval, lvalue.llextra][..1 + lvalue.has_extra() as usize];
let lvalue = self.trans_lvalue(&bcx, location);
let (drop_fn, need_extra) = match ty.sty {
ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra),
false),
ty::TyArray(ety, _) | ty::TySlice(ety) => {
// FIXME: handle panics
let drop_fn = monomorphize::resolve_drop_in_place(
bcx.ccx.shared(), ety);
let drop_fn = callee::get_fn(bcx.ccx, drop_fn);
let bcx = tvec::slice_for_each(
&bcx,
lvalue.project_index(&bcx, C_uint(bcx.ccx, 0u64)),
ety,
lvalue.len(bcx.ccx),
|bcx, llval, loop_bb| {
self.set_debug_loc(&bcx, terminator.source_info);
if let Some(unwind) = unwind {
bcx.invoke(
drop_fn,
&[llval],
loop_bb,
llblock(self, unwind),
cleanup_bundle
);
} else {
bcx.call(drop_fn, &[llval], cleanup_bundle);
bcx.br(loop_bb);
}
});
funclet_br(self, bcx, target);
return
}
_ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra())
};
let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize];
if let Some(unwind) = unwind {
bcx.invoke(
drop_fn,
@ -340,9 +369,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Obtain the panic entry point.
let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item);
let callee = Callee::def(bcx.ccx, def_id,
bcx.ccx.empty_substs_for_def_id(def_id));
let llfn = callee.reify(bcx.ccx);
let instance = ty::Instance::mono(bcx.tcx(), def_id);
let llfn = callee::get_fn(bcx.ccx, instance);
// Translate the actual panic invoke/call.
if let Some(unwind) = cleanup {
@ -365,30 +393,30 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
let callee = self.trans_operand(&bcx, func);
let (mut callee, sig) = match callee.ty.sty {
let (instance, mut llfn, sig) = match callee.ty.sty {
ty::TyFnDef(def_id, substs, sig) => {
(Callee::def(bcx.ccx, def_id, substs), sig)
(Some(monomorphize::resolve(bcx.ccx.shared(), def_id, substs)),
None,
sig)
}
ty::TyFnPtr(sig) => {
(Callee {
data: Fn(callee.immediate()),
ty: callee.ty
}, sig)
(None,
Some(callee.immediate()),
sig)
}
_ => bug!("{} is not callable", callee.ty)
};
let def = instance.map(|i| i.def);
let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
let abi = sig.abi;
// Handle intrinsics old trans wants Expr's for, ourselves.
let intrinsic = match (&callee.ty.sty, &callee.data) {
(&ty::TyFnDef(def_id, ..), &Intrinsic) => {
Some(bcx.tcx().item_name(def_id).as_str())
}
let intrinsic = match def {
Some(ty::InstanceDef::Intrinsic(def_id))
=> Some(bcx.tcx().item_name(def_id).as_str()),
_ => None
};
let mut intrinsic = intrinsic.as_ref().map(|s| &s[..]);
let intrinsic = intrinsic.as_ref().map(|s| &s[..]);
if intrinsic == Some("move_val_init") {
let &(_, target) = destination.as_ref().unwrap();
@ -412,27 +440,19 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let op_ty = op_arg.ty(&self.mir, bcx.tcx());
self.monomorphize(&op_ty)
}).collect::<Vec<_>>();
let fn_ty = callee.direct_fn_type(bcx.ccx, &extra_args);
if intrinsic == Some("drop_in_place") {
let &(_, target) = destination.as_ref().unwrap();
let ty = if let ty::TyFnDef(_, substs, _) = callee.ty.sty {
substs.type_at(0)
} else {
bug!("Unexpected ty: {}", callee.ty);
};
// Double check for necessity to drop
if !bcx.ccx.shared().type_needs_drop(ty) {
let fn_ty = match def {
Some(ty::InstanceDef::Virtual(..)) => {
FnType::new_vtable(bcx.ccx, sig, &extra_args)
}
Some(ty::InstanceDef::DropGlue(_, None)) => {
// empty drop glue - a nop.
let &(_, target) = destination.as_ref().unwrap();
funclet_br(self, bcx, target);
return;
}
let drop_fn = glue::get_drop_glue(bcx.ccx, ty);
let llty = fn_ty.llvm_type(bcx.ccx).ptr_to();
callee.data = Fn(bcx.pointercast(drop_fn, llty));
intrinsic = None;
}
_ => FnType::new(bcx.ccx, sig, &extra_args)
};
// The arguments we'll be passing. Plus one to account for outptr, if used.
let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize;
@ -440,12 +460,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Prepare the return value destination
let ret_dest = if let Some((ref dest, _)) = *destination {
let is_intrinsic = if let Intrinsic = callee.data {
true
} else {
false
};
self.make_return_dest(&bcx, dest, &fn_ty.ret, &mut llargs, is_intrinsic)
let is_intrinsic = intrinsic.is_some();
self.make_return_dest(&bcx, dest, &fn_ty.ret, &mut llargs,
is_intrinsic)
} else {
ReturnDest::Nothing
};
@ -483,56 +500,56 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let op = self.trans_operand(&bcx, arg);
self.trans_argument(&bcx, op, &mut llargs, &fn_ty,
&mut idx, &mut callee.data);
&mut idx, &mut llfn, &def);
}
if let Some(tup) = untuple {
self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty,
&mut idx, &mut callee.data)
&mut idx, &mut llfn, &def)
}
let fn_ptr = match callee.data {
NamedTupleConstructor(_) => {
// FIXME translate this like mir::Rvalue::Aggregate.
callee.reify(bcx.ccx)
}
Intrinsic => {
use intrinsic::trans_intrinsic_call;
if intrinsic.is_some() && intrinsic != Some("drop_in_place") {
use intrinsic::trans_intrinsic_call;
let (dest, llargs) = match ret_dest {
_ if fn_ty.ret.is_indirect() => {
(llargs[0], &llargs[1..])
}
ReturnDest::Nothing => {
(C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..])
}
ReturnDest::IndirectOperand(dst, _) |
ReturnDest::Store(dst) => (dst, &llargs[..]),
ReturnDest::DirectOperand(_) =>
bug!("Cannot use direct operand with an intrinsic call")
let (dest, llargs) = match ret_dest {
_ if fn_ty.ret.is_indirect() => {
(llargs[0], &llargs[1..])
}
ReturnDest::Nothing => {
(C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..])
}
ReturnDest::IndirectOperand(dst, _) |
ReturnDest::Store(dst) => (dst, &llargs[..]),
ReturnDest::DirectOperand(_) =>
bug!("Cannot use direct operand with an intrinsic call")
};
let callee_ty = common::instance_ty(
bcx.ccx.shared(), instance.as_ref().unwrap());
trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &llargs, dest,
terminator.source_info.span);
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
// Make a fake operand for store_return
let op = OperandRef {
val: Ref(dst, Alignment::AbiAligned),
ty: sig.output(),
};
trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest,
terminator.source_info.span);
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
// Make a fake operand for store_return
let op = OperandRef {
val: Ref(dst, Alignment::AbiAligned),
ty: sig.output(),
};
self.store_return(&bcx, ret_dest, fn_ty.ret, op);
}
if let Some((_, target)) = *destination {
funclet_br(self, bcx, target);
} else {
bcx.unreachable();
}
return;
self.store_return(&bcx, ret_dest, fn_ty.ret, op);
}
Fn(f) => f,
Virtual(_) => bug!("Virtual fn ptr not extracted")
if let Some((_, target)) = *destination {
funclet_br(self, bcx, target);
} else {
bcx.unreachable();
}
return;
}
let fn_ptr = match (llfn, instance) {
(Some(llfn), _) => llfn,
(None, Some(instance)) => callee::get_fn(bcx.ccx, instance),
_ => span_bug!(span, "no llfn for call"),
};
// Many different ways to call a function handled here
@ -582,16 +599,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
llargs: &mut Vec<ValueRef>,
fn_ty: &FnType,
next_idx: &mut usize,
callee: &mut CalleeData) {
llfn: &mut Option<ValueRef>,
def: &Option<ty::InstanceDef<'tcx>>) {
if let Pair(a, b) = op.val {
// Treat the values in a fat pointer separately.
if common::type_is_fat_ptr(bcx.ccx, op.ty) {
let (ptr, meta) = (a, b);
if *next_idx == 0 {
if let Virtual(idx) = *callee {
let llfn = meth::get_virtual_method(bcx, meta, idx);
if let Some(ty::InstanceDef::Virtual(_, idx)) = *def {
let llmeth = meth::VirtualIndex::from_index(idx).get_fn(bcx, meta);
let llty = fn_ty.llvm_type(bcx.ccx).ptr_to();
*callee = Fn(bcx.pointercast(llfn, llty));
*llfn = Some(bcx.pointercast(llmeth, llty));
}
}
@ -600,8 +618,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// We won't be checking the type again.
ty: bcx.tcx().types.err
};
self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, llfn, def);
self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, llfn, def);
return;
}
}
@ -664,7 +682,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
llargs: &mut Vec<ValueRef>,
fn_ty: &FnType,
next_idx: &mut usize,
callee: &mut CalleeData) {
llfn: &mut Option<ValueRef>,
def: &Option<ty::InstanceDef<'tcx>>) {
let tuple = self.trans_operand(bcx, operand);
let arg_types = match tuple.ty.sty {
@ -690,7 +709,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
val: val,
ty: ty
};
self.trans_argument(bcx, op, llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def);
}
}
@ -712,7 +731,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
val: Immediate(elem),
ty: ty
};
self.trans_argument(bcx, op, llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def);
}
}
Pair(a, b) => {
@ -728,7 +747,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
val: Immediate(elem),
ty: ty
};
self.trans_argument(bcx, op, llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def);
}
}
}
@ -756,14 +775,18 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
return block;
}
let block = self.blocks[target_bb];
let landing_pad = self.landing_pad_uncached(block);
self.landing_pads[target_bb] = Some(landing_pad);
landing_pad
}
fn landing_pad_uncached(&mut self, target_bb: BasicBlockRef) -> BasicBlockRef {
if base::wants_msvc_seh(self.ccx.sess()) {
return self.blocks[target_bb];
return target_bb;
}
let target = self.get_builder(target_bb);
let bcx = self.new_block("cleanup");
self.landing_pads[target_bb] = Some(bcx.llbb());
let ccx = bcx.ccx;
let llpersonality = self.ccx.eh_personality();
@ -772,7 +795,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
bcx.set_cleanup(llretval);
let slot = self.get_personality_slot(&bcx);
bcx.store(llretval, slot, None);
bcx.br(target.llbb());
bcx.br(target_bb);
bcx.llbb()
}

View File

@ -23,14 +23,14 @@ use rustc::ty::cast::{CastTy, IntTy};
use rustc::ty::subst::{Kind, Substs, Subst};
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use {abi, adt, base, Disr, machine};
use callee::Callee;
use callee;
use builder::Builder;
use common::{self, CrateContext, const_get_elt, val_ty};
use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral, C_big_integral};
use common::{C_null, C_struct, C_str_slice, C_undef, C_uint, C_vector, is_undef};
use common::const_to_opt_u128;
use consts;
use monomorphize::{self, Instance};
use monomorphize;
use type_of;
use type_::Type;
use value::Value;
@ -245,11 +245,12 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
}
fn trans_def(ccx: &'a CrateContext<'a, 'tcx>,
instance: Instance<'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
args: IndexVec<mir::Local, Const<'tcx>>)
-> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
let instance = instance.resolve_const(ccx.shared());
let mir = ccx.tcx().item_mir(instance.def);
let instance = monomorphize::resolve(ccx.shared(), def_id, substs);
let mir = ccx.tcx().instance_mir(instance.def);
MirConstContext::new(ccx, &mir, instance.substs, args).trans()
}
@ -332,10 +333,8 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
mir::TerminatorKind::Call { ref func, ref args, ref destination, .. } => {
let fn_ty = func.ty(self.mir, tcx);
let fn_ty = self.monomorphize(&fn_ty);
let instance = match fn_ty.sty {
ty::TyFnDef(def_id, substs, _) => {
Instance::new(def_id, substs)
}
let (def_id, substs) = match fn_ty.sty {
ty::TyFnDef(def_id, substs, _) => (def_id, substs),
_ => span_bug!(span, "calling {:?} (of type {}) in constant",
func, fn_ty)
};
@ -348,7 +347,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
}
}
if let Some((ref dest, target)) = *destination {
match MirConstContext::trans_def(self.ccx, instance, const_args) {
match MirConstContext::trans_def(self.ccx, def_id, substs, const_args) {
Ok(value) => self.store(dest, value, span),
Err(err) => if failure.is_ok() { failure = Err(err); }
}
@ -485,8 +484,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
}
let substs = self.monomorphize(&substs);
let instance = Instance::new(def_id, substs);
MirConstContext::trans_def(self.ccx, instance, IndexVec::new())
MirConstContext::trans_def(self.ccx, def_id, substs, IndexVec::new())
}
mir::Literal::Promoted { index } => {
let mir = &self.mir.promoted[index];
@ -567,8 +565,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
mir::CastKind::ReifyFnPointer => {
match operand.ty.sty {
ty::TyFnDef(def_id, substs, _) => {
Callee::def(self.ccx, def_id, substs)
.reify(self.ccx)
callee::resolve_and_get_fn(self.ccx, def_id, substs)
}
_ => {
span_bug!(span, "{} cannot be reified to a fn ptr",
@ -588,10 +585,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
// Now create its substs [Closure, Tuple]
let input = tcx.closure_type(def_id)
.subst(tcx, substs.substs).input(0);
let substs = tcx.mk_substs([operand.ty, input.skip_binder()]
let input = tcx.erase_late_bound_regions_and_normalize(&input);
let substs = tcx.mk_substs([operand.ty, input]
.iter().cloned().map(Kind::from));
Callee::def(self.ccx, call_once, substs)
.reify(self.ccx)
callee::resolve_and_get_fn(self.ccx, call_once, substs)
}
_ => {
bug!("{} cannot be cast to a fn ptr", operand.ty)
@ -935,8 +932,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
let substs = self.monomorphize(&substs);
let instance = Instance::new(def_id, substs);
MirConstContext::trans_def(bcx.ccx, instance, IndexVec::new())
MirConstContext::trans_def(bcx.ccx, def_id, substs, IndexVec::new())
}
mir::Literal::Promoted { index } => {
let mir = &self.mir.promoted[index];
@ -964,8 +960,8 @@ pub fn trans_static_initializer<'a, 'tcx>(
def_id: DefId)
-> Result<ValueRef, ConstEvalErr<'tcx>>
{
let instance = Instance::mono(ccx.shared(), def_id);
MirConstContext::trans_def(ccx, instance, IndexVec::new()).map(|c| c.llval)
MirConstContext::trans_def(ccx, def_id, Substs::empty(), IndexVec::new())
.map(|c| c.llval)
}
/// Construct a constant value, suitable for initializing a

View File

@ -27,7 +27,6 @@ use std::ptr;
use std::ops;
use super::{MirContext, LocalRef};
use super::operand::OperandValue;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Alignment {
@ -95,16 +94,6 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
LvalueRef::new_sized(llval, LvalueTy::from_ty(ty), alignment)
}
pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>, alignment: Alignment)
-> LvalueRef<'tcx> {
LvalueRef {
llval: llval,
llextra: llextra,
ty: LvalueTy::from_ty(ty),
alignment: alignment,
}
}
pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> {
debug!("alloca({:?}: {:?})", name, ty);
let tmp = bcx.alloca(type_of::type_of(bcx.ccx, ty), name);
@ -279,6 +268,16 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
_ => bug!("element access in type without elements: {} represented as {:#?}", t, l)
}
}
pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef {
if let ty::TySlice(_) = self.ty.to_ty(bcx.tcx()).sty {
// Slices already point to the array element type.
bcx.inbounds_gep(self.llval, &[llindex])
} else {
let zero = common::C_uint(bcx.ccx, 0u64);
bcx.inbounds_gep(self.llval, &[zero, llindex])
}
}
}
impl<'a, 'tcx> MirContext<'a, 'tcx> {
@ -314,21 +313,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
elem: mir::ProjectionElem::Deref
}) => {
// Load the pointer from its location.
let ptr = self.trans_consume(bcx, base);
let projected_ty = LvalueTy::from_ty(ptr.ty)
.projection_ty(tcx, &mir::ProjectionElem::Deref);
let projected_ty = self.monomorphize(&projected_ty);
let (llptr, llextra) = match ptr.val {
OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()),
OperandValue::Pair(llptr, llextra) => (llptr, llextra),
OperandValue::Ref(..) => bug!("Deref of by-Ref type {:?}", ptr.ty)
};
LvalueRef {
llval: llptr,
llextra: llextra,
ty: projected_ty,
alignment: Alignment::AbiAligned,
}
self.trans_consume(bcx, base).deref()
}
mir::Lvalue::Projection(ref projection) => {
let tr_base = self.trans_lvalue(bcx, &projection.base);
@ -336,17 +321,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let projected_ty = self.monomorphize(&projected_ty);
let align = tr_base.alignment;
let project_index = |llindex| {
let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty {
// Slices already point to the array element type.
bcx.inbounds_gep(tr_base.llval, &[llindex])
} else {
let zero = common::C_uint(bcx.ccx, 0u64);
bcx.inbounds_gep(tr_base.llval, &[zero, llindex])
};
(element, align)
};
let ((llprojected, align), llextra) = match projection.elem {
mir::ProjectionElem::Deref => bug!(),
mir::ProjectionElem::Field(ref field, _) => {
@ -359,13 +333,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
mir::ProjectionElem::Index(ref index) => {
let index = self.trans_operand(bcx, index);
(project_index(self.prepare_index(bcx, index.immediate())), ptr::null_mut())
let llindex = self.prepare_index(bcx, index.immediate());
((tr_base.project_index(bcx, llindex), align), ptr::null_mut())
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: false,
min_length: _ } => {
let lloffset = C_uint(bcx.ccx, offset);
(project_index(lloffset), ptr::null_mut())
((tr_base.project_index(bcx, lloffset), align), ptr::null_mut())
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: true,
@ -373,11 +348,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let lloffset = C_uint(bcx.ccx, offset);
let lllen = tr_base.len(bcx.ccx);
let llindex = bcx.sub(lllen, lloffset);
(project_index(llindex), ptr::null_mut())
((tr_base.project_index(bcx, llindex), align), ptr::null_mut())
}
mir::ProjectionElem::Subslice { from, to } => {
let llindex = C_uint(bcx.ccx, from);
let (llbase, align) = project_index(llindex);
let llbase = tr_base.project_index(bcx, C_uint(bcx.ccx, from));
let base_ty = tr_base.ty.to_ty(bcx.tcx());
match base_ty.sty {

View File

@ -9,9 +9,10 @@
// except according to those terms.
use llvm::ValueRef;
use rustc::ty::Ty;
use rustc::ty::{self, Ty};
use rustc::ty::layout::Layout;
use rustc::mir;
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
use base;
@ -22,9 +23,10 @@ use type_of;
use type_::Type;
use std::fmt;
use std::ptr;
use super::{MirContext, LocalRef};
use super::lvalue::Alignment;
use super::lvalue::{Alignment, LvalueRef};
/// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a
@ -86,6 +88,22 @@ impl<'a, 'tcx> OperandRef<'tcx> {
}
}
pub fn deref(self) -> LvalueRef<'tcx> {
let projected_ty = self.ty.builtin_deref(true, ty::NoPreference)
.unwrap().ty;
let (llptr, llextra) = match self.val {
OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()),
OperandValue::Pair(llptr, llextra) => (llptr, llextra),
OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self)
};
LvalueRef {
llval: llptr,
llextra: llextra,
ty: LvalueTy::from_ty(projected_ty),
alignment: Alignment::AbiAligned,
}
}
/// If this operand is a Pair, we return an
/// Immediate aggregate with the two values.
pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
@ -236,7 +254,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
mir::Operand::Constant(ref constant) => {
let val = self.trans_constant(bcx, constant);
let val = self.trans_constant(&bcx, constant);
let operand = val.to_operand(bcx.ccx);
if let OperandValue::Ref(ptr, align) = operand.val {
// If this is a OperandValue::Ref to an immediate constant, load it.

View File

@ -12,18 +12,18 @@ use llvm::{self, ValueRef};
use rustc::ty::{self, Ty};
use rustc::ty::cast::{CastTy, IntTy};
use rustc::ty::layout::Layout;
use rustc::ty::subst::{Kind, Subst};
use rustc::mir::tcx::LvalueTy;
use rustc::mir;
use middle::lang_items::ExchangeMallocFnLangItem;
use base;
use builder::Builder;
use callee::Callee;
use callee;
use common::{self, val_ty, C_bool, C_null, C_uint};
use common::{C_integral};
use adt;
use machine;
use monomorphize;
use type_::Type;
use type_of;
use tvec;
@ -98,8 +98,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let size = count.as_u64(bcx.tcx().sess.target.uint_type);
let size = C_uint(bcx.ccx, size);
let base = base::get_dataptr(&bcx, dest.llval);
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| {
self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem);
bcx.br(loop_bb);
})
}
@ -183,8 +184,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
match operand.ty.sty {
ty::TyFnDef(def_id, substs, _) => {
OperandValue::Immediate(
Callee::def(bcx.ccx, def_id, substs)
.reify(bcx.ccx))
callee::resolve_and_get_fn(bcx.ccx, def_id, substs))
}
_ => {
bug!("{} cannot be reified to a fn ptr", operand.ty)
@ -194,20 +194,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::CastKind::ClosureFnPointer => {
match operand.ty.sty {
ty::TyClosure(def_id, substs) => {
// Get the def_id for FnOnce::call_once
let fn_once = bcx.tcx().lang_items.fn_once_trait().unwrap();
let call_once = bcx.tcx()
.global_tcx().associated_items(fn_once)
.find(|it| it.kind == ty::AssociatedKind::Method)
.unwrap().def_id;
// Now create its substs [Closure, Tuple]
let input = bcx.tcx().closure_type(def_id)
.subst(bcx.tcx(), substs.substs).input(0);
let substs = bcx.tcx().mk_substs([operand.ty, input.skip_binder()]
.iter().cloned().map(Kind::from));
OperandValue::Immediate(
Callee::def(bcx.ccx, call_once, substs)
.reify(bcx.ccx))
let instance = monomorphize::resolve_closure(
bcx.ccx.shared(), def_id, substs, ty::ClosureKind::FnOnce);
OperandValue::Immediate(callee::get_fn(bcx.ccx, instance))
}
_ => {
bug!("{} cannot be cast to a fn ptr", operand.ty)
@ -461,8 +450,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
}
};
let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[]))
.reify(bcx.ccx);
let instance = ty::Instance::mono(bcx.tcx(), def_id);
let r = callee::get_fn(bcx.ccx, instance);
let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
let operand = OperandRef {
@ -471,7 +460,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
};
(bcx, operand)
}
mir::Rvalue::Use(ref operand) => {
let operand = self.trans_operand(&bcx, operand);
(bcx, operand)
@ -674,7 +662,7 @@ pub fn rvalue_creates_operand(rvalue: &mir::Rvalue) -> bool {
mir::Rvalue::UnaryOp(..) |
mir::Rvalue::Discriminant(..) |
mir::Rvalue::Box(..) |
mir::Rvalue::Use(..) =>
mir::Rvalue::Use(..) => // (*)
true,
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) =>

View File

@ -8,60 +8,290 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::Abi;
use common::*;
use glue;
use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize;
use rustc::traits;
use rustc::middle::lang_items::DropInPlaceFnLangItem;
use rustc::traits::{self, SelectionContext, Reveal};
use rustc::ty::adjustment::CustomCoerceUnsized;
use rustc::ty::fold::{TypeFolder, TypeFoldable};
use rustc::ty::subst::{Subst, Substs};
use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::util::ppaux;
use rustc::util::common::MemoizationMap;
use syntax::codemap::DUMMY_SP;
use syntax::ast;
use syntax::codemap::{Span, DUMMY_SP};
use std::fmt;
pub use rustc::ty::Instance;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct Instance<'tcx> {
pub def: DefId,
pub substs: &'tcx Substs<'tcx>,
fn fn_once_adapter_instance<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
closure_did: DefId,
substs: ty::ClosureSubsts<'tcx>,
) -> Instance<'tcx> {
debug!("fn_once_adapter_shim({:?}, {:?})",
closure_did,
substs);
let fn_once = tcx.lang_items.fn_once_trait().unwrap();
let call_once = tcx.associated_items(fn_once)
.find(|it| it.kind == ty::AssociatedKind::Method)
.unwrap().def_id;
let def = ty::InstanceDef::ClosureOnceShim { call_once };
let self_ty = tcx.mk_closure_from_closure_substs(
closure_did, substs);
let sig = tcx.closure_type(closure_did).subst(tcx, substs.substs);
let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
assert_eq!(sig.inputs().len(), 1);
let substs = tcx.mk_substs([
Kind::from(self_ty),
Kind::from(sig.inputs()[0]),
].iter().cloned());
debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig);
Instance { def, substs }
}
impl<'tcx> fmt::Display for Instance<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
ppaux::parameterized(f, &self.substs, self.def, &[])
fn needs_fn_once_adapter_shim(actual_closure_kind: ty::ClosureKind,
trait_closure_kind: ty::ClosureKind)
-> Result<bool, ()>
{
match (actual_closure_kind, trait_closure_kind) {
(ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
(ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
// No adapter needed.
Ok(false)
}
(ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
// The closure fn `llfn` is a `fn(&self, ...)`. We want a
// `fn(&mut self, ...)`. In fact, at trans time, these are
// basically the same thing, so we can just return llfn.
Ok(false)
}
(ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
// The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
// self, ...)`. We want a `fn(self, ...)`. We can produce
// this by doing something like:
//
// fn call_once(self, ...) { call_mut(&self, ...) }
// fn call_once(mut self, ...) { call_mut(&mut self, ...) }
//
// These are both the same at trans time.
Ok(true)
}
_ => Err(()),
}
}
impl<'a, 'tcx> Instance<'tcx> {
pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>)
-> Instance<'tcx> {
assert!(substs.regions().all(|&r| r == ty::ReErased));
Instance { def: def_id, substs: substs }
}
pub fn resolve_closure<'a, 'tcx> (
scx: &SharedCrateContext<'a, 'tcx>,
def_id: DefId,
substs: ty::ClosureSubsts<'tcx>,
requested_kind: ty::ClosureKind)
-> Instance<'tcx>
{
let actual_kind = scx.tcx().closure_kind(def_id);
pub fn mono(scx: &SharedCrateContext<'a, 'tcx>, def_id: DefId) -> Instance<'tcx> {
Instance::new(def_id, scx.empty_substs_for_def_id(def_id))
match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
Ok(true) => fn_once_adapter_instance(scx.tcx(), def_id, substs),
_ => Instance::new(def_id, substs.substs)
}
}
/// For associated constants from traits, return the impl definition.
pub fn resolve_const(&self, scx: &SharedCrateContext<'a, 'tcx>) -> Self {
if let Some(trait_id) = scx.tcx().trait_of_item(self.def) {
let trait_ref = ty::TraitRef::new(trait_id, self.substs);
let trait_ref = ty::Binder(trait_ref);
let vtable = fulfill_obligation(scx, DUMMY_SP, trait_ref);
if let traits::VtableImpl(vtable_impl) = vtable {
let name = scx.tcx().item_name(self.def);
let ac = scx.tcx().associated_items(vtable_impl.impl_def_id)
.find(|item| item.kind == ty::AssociatedKind::Const && item.name == name);
if let Some(ac) = ac {
return Instance::new(ac.def_id, vtable_impl.substs);
/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
/// guarantee to us that all nested obligations *could be* resolved if we wanted to.
fn fulfill_obligation<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
span: Span,
trait_ref: ty::PolyTraitRef<'tcx>)
-> traits::Vtable<'tcx, ()>
{
let tcx = scx.tcx();
// Remove any references to regions; this helps improve caching.
let trait_ref = tcx.erase_regions(&trait_ref);
scx.trait_cache().memoize(trait_ref, || {
debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
trait_ref, trait_ref.def_id());
// Do the initial selection for the obligation. This yields the
// shallow result we are looking for -- that is, what specific impl.
tcx.infer_ctxt((), Reveal::All).enter(|infcx| {
let mut selcx = SelectionContext::new(&infcx);
let obligation_cause = traits::ObligationCause::misc(span,
ast::DUMMY_NODE_ID);
let obligation = traits::Obligation::new(obligation_cause,
trait_ref.to_poly_trait_predicate());
let selection = match selcx.select(&obligation) {
Ok(Some(selection)) => selection,
Ok(None) => {
// Ambiguity can happen when monomorphizing during trans
// expands to some humongo type that never occurred
// statically -- this humongo type can then overflow,
// leading to an ambiguous result. So report this as an
// overflow bug, since I believe this is the only case
// where ambiguity can result.
debug!("Encountered ambiguity selecting `{:?}` during trans, \
presuming due to overflow",
trait_ref);
tcx.sess.span_fatal(span,
"reached the recursion limit during monomorphization \
(selection ambiguity)");
}
Err(e) => {
span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans",
e, trait_ref)
}
};
debug!("fulfill_obligation: selection={:?}", selection);
// Currently, we use a fulfillment context to completely resolve
// all nested obligations. This is because they can inform the
// inference of the impl's type parameters.
let mut fulfill_cx = traits::FulfillmentContext::new();
let vtable = selection.map(|predicate| {
debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate);
fulfill_cx.register_predicate_obligation(&infcx, predicate);
});
let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
vtable
})
})
}
fn resolve_associated_item<'a, 'tcx>(
scx: &SharedCrateContext<'a, 'tcx>,
trait_item: &ty::AssociatedItem,
trait_id: DefId,
rcvr_substs: &'tcx Substs<'tcx>
) -> Instance<'tcx> {
let tcx = scx.tcx();
let def_id = trait_item.def_id;
debug!("resolve_associated_item(trait_item={:?}, \
trait_id={:?}, \
rcvr_substs={:?})",
def_id, trait_id, rcvr_substs);
let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
let vtbl = fulfill_obligation(scx, DUMMY_SP, ty::Binder(trait_ref));
// Now that we know which impl is being used, we can dispatch to
// the actual function:
match vtbl {
traits::VtableImpl(impl_data) => {
let (def_id, substs) = traits::find_associated_item(
tcx, trait_item, rcvr_substs, &impl_data);
let substs = tcx.erase_regions(&substs);
ty::Instance::new(def_id, substs)
}
traits::VtableClosure(closure_data) => {
let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
resolve_closure(scx, closure_data.closure_def_id, closure_data.substs,
trait_closure_kind)
}
traits::VtableFnPointer(ref data) => {
Instance {
def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty),
substs: rcvr_substs
}
}
traits::VtableObject(ref data) => {
let index = tcx.get_vtable_index_of_object_method(data, def_id);
Instance {
def: ty::InstanceDef::Virtual(def_id, index),
substs: rcvr_substs
}
}
_ => {
bug!("static call to invalid vtable: {:?}", vtbl)
}
}
}
*self
/// The point where linking happens. Resolve a (def_id, substs)
/// pair to an instance.
pub fn resolve<'a, 'tcx>(
scx: &SharedCrateContext<'a, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>
) -> Instance<'tcx> {
debug!("resolve(def_id={:?}, substs={:?})",
def_id, substs);
let result = if let Some(trait_def_id) = scx.tcx().trait_of_item(def_id) {
debug!(" => associated item, attempting to find impl");
let item = scx.tcx().associated_item(def_id);
resolve_associated_item(scx, &item, trait_def_id, substs)
} else {
let item_type = def_ty(scx, def_id, substs);
let def = match item_type.sty {
ty::TyFnDef(_, _, f) if
f.abi() == Abi::RustIntrinsic ||
f.abi() == Abi::PlatformIntrinsic =>
{
debug!(" => intrinsic");
ty::InstanceDef::Intrinsic(def_id)
}
_ => {
if Some(def_id) == scx.tcx().lang_items.drop_in_place_fn() {
let ty = substs.type_at(0);
if glue::needs_drop_glue(scx, ty) {
debug!(" => nontrivial drop glue");
ty::InstanceDef::DropGlue(def_id, Some(ty))
} else {
debug!(" => trivial drop glue");
ty::InstanceDef::DropGlue(def_id, None)
}
} else {
debug!(" => free item");
ty::InstanceDef::Item(def_id)
}
}
};
Instance { def, substs }
};
debug!("resolve(def_id={:?}, substs={:?}) = {}",
def_id, substs, result);
result
}
pub fn resolve_drop_in_place<'a, 'tcx>(
scx: &SharedCrateContext<'a, 'tcx>,
ty: Ty<'tcx>)
-> ty::Instance<'tcx>
{
let def_id = scx.tcx().require_lang_item(DropInPlaceFnLangItem);
let substs = scx.tcx().intern_substs(&[Kind::from(ty)]);
resolve(scx, def_id, substs)
}
pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx>,
source_ty: Ty<'tcx>,
target_ty: Ty<'tcx>)
-> CustomCoerceUnsized {
let trait_ref = ty::Binder(ty::TraitRef {
def_id: scx.tcx().lang_items.coerce_unsized_trait().unwrap(),
substs: scx.tcx().mk_substs_trait(source_ty, &[target_ty])
});
match fulfill_obligation(scx, DUMMY_SP, trait_ref) {
traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
scx.tcx().custom_coerce_unsized_kind(impl_def_id)
}
vtable => {
bug!("invalid CoerceUnsized vtable: {:?}", vtable);
}
}
}
@ -80,7 +310,6 @@ pub fn apply_param_substs<'a, 'tcx, T>(scx: &SharedCrateContext<'a, 'tcx>,
AssociatedTypeNormalizer::new(scx).fold(&substituted)
}
/// Returns the normalized type of a struct field
pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_substs: &Substs<'tcx>,

View File

@ -110,7 +110,7 @@ use rustc::dep_graph::{DepNode, WorkProductId};
use rustc::hir::def_id::DefId;
use rustc::hir::map::DefPathData;
use rustc::session::config::NUMBERED_CODEGEN_UNIT_MARKER;
use rustc::ty::TyCtxt;
use rustc::ty::{self, TyCtxt};
use rustc::ty::item_path::characteristic_def_id_of_type;
use rustc_incremental::IchHasher;
use std::cmp::Ordering;
@ -186,14 +186,14 @@ impl<'tcx> CodegenUnit<'tcx> {
symbol_name.hash(&mut state);
let exported = match item {
TransItem::Fn(ref instance) => {
let node_id = scx.tcx().hir.as_local_node_id(instance.def);
let node_id =
scx.tcx().hir.as_local_node_id(instance.def_id());
node_id.map(|node_id| exported_symbols.contains(&node_id))
.unwrap_or(false)
}
TransItem::Static(node_id) => {
exported_symbols.contains(&node_id)
}
TransItem::DropGlue(..) => false,
};
exported.hash(&mut state);
}
@ -241,10 +241,9 @@ impl<'tcx> CodegenUnit<'tcx> {
fn local_node_id(tcx: TyCtxt, trans_item: TransItem) -> Option<NodeId> {
match trans_item {
TransItem::Fn(instance) => {
tcx.hir.as_local_node_id(instance.def)
tcx.hir.as_local_node_id(instance.def_id())
}
TransItem::Static(node_id) => Some(node_id),
TransItem::DropGlue(_) => None,
}
}
}
@ -340,7 +339,6 @@ fn place_root_translation_items<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>,
match trans_item {
TransItem::Fn(..) |
TransItem::Static(..) => llvm::ExternalLinkage,
TransItem::DropGlue(..) => unreachable!(),
}
}
};
@ -455,17 +453,26 @@ fn characteristic_def_id_of_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 't
let tcx = scx.tcx();
match trans_item {
TransItem::Fn(instance) => {
let def_id = match instance.def {
ty::InstanceDef::Item(def_id) => def_id,
ty::InstanceDef::FnPtrShim(..) |
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Intrinsic(..) |
ty::InstanceDef::DropGlue(..) |
ty::InstanceDef::Virtual(..) => return None
};
// If this is a method, we want to put it into the same module as
// its self-type. If the self-type does not provide a characteristic
// DefId, we use the location of the impl after all.
if tcx.trait_of_item(instance.def).is_some() {
if tcx.trait_of_item(def_id).is_some() {
let self_ty = instance.substs.type_at(0);
// This is an implementation of a trait method.
return characteristic_def_id_of_type(self_ty).or(Some(instance.def));
return characteristic_def_id_of_type(self_ty).or(Some(def_id));
}
if let Some(impl_def_id) = tcx.impl_of_method(instance.def) {
if let Some(impl_def_id) = tcx.impl_of_method(def_id) {
// This is a method within an inherent impl, find out what the
// self-type is:
let impl_self_ty = common::def_ty(scx, impl_def_id, instance.substs);
@ -474,9 +481,8 @@ fn characteristic_def_id_of_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 't
}
}
Some(instance.def)
Some(def_id)
}
TransItem::DropGlue(dg) => characteristic_def_id_of_type(dg.ty()),
TransItem::Static(node_id) => Some(tcx.hir.local_def_id(node_id)),
}
}

View File

@ -97,10 +97,9 @@ impl<'tcx> SymbolMap<'tcx> {
trans_item: TransItem<'tcx>) -> Option<Span> {
match trans_item {
TransItem::Fn(Instance { def, .. }) => {
tcx.hir.as_local_node_id(def)
tcx.hir.as_local_node_id(def.def_id())
}
TransItem::Static(node_id) => Some(node_id),
TransItem::DropGlue(_) => None,
}.map(|node_id| {
tcx.hir.span(node_id)
})

View File

@ -14,6 +14,7 @@
//! item-path. This is used for unit testing the code that generates
//! paths etc in all kinds of annoying scenarios.
use back::symbol_names;
use rustc::hir;
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
use syntax::ast;
@ -51,8 +52,8 @@ impl<'a, 'tcx> SymbolNamesTest<'a, 'tcx> {
for attr in tcx.get_attrs(def_id).iter() {
if attr.check_name(SYMBOL_NAME) {
// for now, can only use on monomorphic names
let instance = Instance::mono(self.scx, def_id);
let name = instance.symbol_name(self.scx);
let instance = Instance::mono(tcx, def_id);
let name = symbol_names::symbol_name(instance, self.scx);
tcx.sess.span_err(attr.span, &format!("symbol-name({})", name));
} else if attr.check_name(ITEM_PATH) {
let path = tcx.item_path_str(def_id);
@ -86,4 +87,3 @@ impl<'a, 'tcx> Visitor<'tcx> for SymbolNamesTest<'a, 'tcx> {
intravisit::walk_impl_item(self, ii)
}
}

View File

@ -20,7 +20,6 @@ use consts;
use context::{CrateContext, SharedCrateContext};
use common;
use declare;
use glue::DropGlueKind;
use llvm;
use monomorphize::Instance;
use rustc::dep_graph::DepNode;
@ -32,15 +31,12 @@ use rustc_const_eval::fatal_const_eval_err;
use syntax::ast::{self, NodeId};
use syntax::attr;
use type_of;
use glue;
use abi::{Abi, FnType};
use back::symbol_names;
use std::fmt::Write;
use std::iter;
#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)]
pub enum TransItem<'tcx> {
DropGlue(DropGlueKind<'tcx>),
Fn(Instance<'tcx>),
Static(NodeId)
}
@ -95,13 +91,10 @@ impl<'a, 'tcx> TransItem<'tcx> {
}
TransItem::Fn(instance) => {
let _task = ccx.tcx().dep_graph.in_task(
DepNode::TransCrateItem(instance.def)); // (*)
DepNode::TransCrateItem(instance.def_id())); // (*)
base::trans_instance(&ccx, instance);
}
TransItem::DropGlue(dg) => {
glue::implement_drop_glue(&ccx, dg);
}
}
debug!("END IMPLEMENTING '{} ({})' in cgu {}",
@ -130,9 +123,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
TransItem::Fn(instance) => {
TransItem::predefine_fn(ccx, instance, linkage, &symbol_name);
}
TransItem::DropGlue(dg) => {
TransItem::predefine_drop_glue(ccx, dg, linkage, &symbol_name);
}
}
debug!("END PREDEFINING '{} ({})' in cgu {}",
@ -146,7 +136,8 @@ impl<'a, 'tcx> TransItem<'tcx> {
linkage: llvm::Linkage,
symbol_name: &str) {
let def_id = ccx.tcx().hir.local_def_id(node_id);
let ty = common::def_ty(ccx.shared(), def_id, Substs::empty());
let instance = Instance::mono(ccx.tcx(), def_id);
let ty = common::instance_ty(ccx.shared(), &instance);
let llty = type_of::type_of(ccx, ty);
let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| {
@ -156,7 +147,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
unsafe { llvm::LLVMRustSetLinkage(g, linkage) };
let instance = Instance::mono(ccx.shared(), def_id);
ccx.instances().borrow_mut().insert(instance, g);
ccx.statics().borrow_mut().insert(g, def_id);
}
@ -168,8 +158,8 @@ impl<'a, 'tcx> TransItem<'tcx> {
assert!(!instance.substs.needs_infer() &&
!instance.substs.has_param_types());
let mono_ty = common::def_ty(ccx.shared(), instance.def, instance.substs);
let attrs = ccx.tcx().get_attrs(instance.def);
let mono_ty = common::instance_ty(ccx.shared(), &instance);
let attrs = instance.def.attrs(ccx.tcx());
let lldecl = declare::declare_fn(ccx, symbol_name, mono_ty);
unsafe { llvm::LLVMRustSetLinkage(lldecl, linkage) };
base::set_link_section(ccx, lldecl, &attrs);
@ -178,71 +168,23 @@ impl<'a, 'tcx> TransItem<'tcx> {
llvm::SetUniqueComdat(ccx.llmod(), lldecl);
}
if let ty::TyClosure(..) = mono_ty.sty {
// set an inline hint for all closures
debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance);
if common::is_inline_instance(ccx.tcx(), &instance) {
attributes::inline(lldecl, attributes::InlineAttr::Hint);
}
attributes::from_fn_attrs(ccx, &attrs, lldecl);
ccx.instances().borrow_mut().insert(instance, lldecl);
}
fn predefine_drop_glue(ccx: &CrateContext<'a, 'tcx>,
dg: glue::DropGlueKind<'tcx>,
linkage: llvm::Linkage,
symbol_name: &str) {
let tcx = ccx.tcx();
assert_eq!(dg.ty(), glue::get_drop_glue_type(ccx.shared(), dg.ty()));
let t = dg.ty();
let sig = tcx.mk_fn_sig(
iter::once(tcx.mk_mut_ptr(t)),
tcx.mk_nil(),
false,
hir::Unsafety::Normal,
Abi::Rust
);
debug!("predefine_drop_glue: sig={}", sig);
let fn_ty = FnType::new(ccx, sig, &[]);
let llfnty = fn_ty.llvm_type(ccx);
assert!(declare::get_defined_value(ccx, symbol_name).is_none());
let llfn = declare::declare_cfn(ccx, symbol_name, llfnty);
unsafe { llvm::LLVMRustSetLinkage(llfn, linkage) };
if linkage == llvm::Linkage::LinkOnceODRLinkage ||
linkage == llvm::Linkage::WeakODRLinkage {
llvm::SetUniqueComdat(ccx.llmod(), llfn);
}
attributes::set_frame_pointer_elimination(ccx, llfn);
ccx.drop_glues().borrow_mut().insert(dg, (llfn, fn_ty));
}
pub fn compute_symbol_name(&self,
scx: &SharedCrateContext<'a, 'tcx>) -> String {
match *self {
TransItem::Fn(instance) => instance.symbol_name(scx),
TransItem::Fn(instance) => symbol_names::symbol_name(instance, scx),
TransItem::Static(node_id) => {
let def_id = scx.tcx().hir.local_def_id(node_id);
Instance::mono(scx, def_id).symbol_name(scx)
symbol_names::symbol_name(Instance::mono(scx.tcx(), def_id), scx)
}
TransItem::DropGlue(dg) => {
let prefix = match dg {
DropGlueKind::Ty(_) => "drop",
DropGlueKind::TyContents(_) => "drop_contents",
};
symbol_names::exported_name_from_type_and_prefix(scx, dg.ty(), prefix)
}
}
}
pub fn is_from_extern_crate(&self) -> bool {
match *self {
TransItem::Fn(ref instance) => !instance.def.is_local(),
TransItem::DropGlue(..) |
TransItem::Static(..) => false,
}
}
@ -252,14 +194,13 @@ impl<'a, 'tcx> TransItem<'tcx> {
match *self {
TransItem::Fn(ref instance) => {
if self.explicit_linkage(tcx).is_none() &&
(common::is_closure(tcx, instance.def) ||
attr::requests_inline(&tcx.get_attrs(instance.def)[..])) {
common::requests_inline(tcx, instance)
{
InstantiationMode::LocalCopy
} else {
InstantiationMode::GloballyShared
}
}
TransItem::DropGlue(..) => InstantiationMode::LocalCopy,
TransItem::Static(..) => InstantiationMode::GloballyShared,
}
}
@ -269,16 +210,14 @@ impl<'a, 'tcx> TransItem<'tcx> {
TransItem::Fn(ref instance) => {
instance.substs.types().next().is_some()
}
TransItem::DropGlue(..) |
TransItem::Static(..) => false,
}
}
pub fn explicit_linkage(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option<llvm::Linkage> {
let def_id = match *self {
TransItem::Fn(ref instance) => instance.def,
TransItem::Fn(ref instance) => instance.def_id(),
TransItem::Static(node_id) => tcx.hir.local_def_id(node_id),
TransItem::DropGlue(..) => return None,
};
let attributes = tcx.get_attrs(def_id);
@ -302,16 +241,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
let hir_map = &tcx.hir;
return match *self {
TransItem::DropGlue(dg) => {
let mut s = String::with_capacity(32);
match dg {
DropGlueKind::Ty(_) => s.push_str("drop-glue "),
DropGlueKind::TyContents(_) => s.push_str("drop-glue-contents "),
};
let printer = DefPathBasedNames::new(tcx, false, false);
printer.push_type_name(dg.ty(), &mut s);
s
}
TransItem::Fn(instance) => {
to_string_internal(tcx, "fn ", instance)
},
@ -336,13 +265,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
pub fn to_raw_string(&self) -> String {
match *self {
TransItem::DropGlue(dg) => {
let prefix = match dg {
DropGlueKind::Ty(_) => "Ty",
DropGlueKind::TyContents(_) => "TyContents",
};
format!("DropGlue({}: {})", prefix, dg.ty() as *const _ as usize)
}
TransItem::Fn(instance) => {
format!("Fn({:?}, {})",
instance.def,
@ -581,7 +503,7 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> {
pub fn push_instance_as_string(&self,
instance: Instance<'tcx>,
output: &mut String) {
self.push_def_path(instance.def, output);
self.push_def_path(instance.def_id(), output);
self.push_type_params(instance.substs, iter::empty(), output);
}
}

View File

@ -10,7 +10,7 @@
use llvm;
use builder::Builder;
use llvm::ValueRef;
use llvm::{BasicBlockRef, ValueRef};
use common::*;
use rustc::ty::Ty;
@ -20,7 +20,7 @@ pub fn slice_for_each<'a, 'tcx, F>(
unit_ty: Ty<'tcx>,
len: ValueRef,
f: F
) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef) {
) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef, BasicBlockRef) {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
let zst = type_is_zero_size(bcx.ccx, unit_ty);
let add = |bcx: &Builder, a, b| if zst {
@ -46,9 +46,8 @@ pub fn slice_for_each<'a, 'tcx, F>(
let keep_going = header_bcx.icmp(llvm::IntNE, current, end);
header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
f(&body_bcx, if zst { data_ptr } else { current });
let next = add(&body_bcx, current, C_uint(bcx.ccx, 1usize));
f(&body_bcx, if zst { data_ptr } else { current }, header_bcx.llbb());
header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb());
body_bcx.br(header_bcx.llbb());
next_bcx
}

@ -1 +1 @@
Subproject commit 859fb269364623b17e092efaba3f94e70ce97c5e
Subproject commit d5ef27a79661d4f0d57d7b7d2cdbe9204f790a4a

View File

@ -1,4 +1,4 @@
# If this file is modified, then llvm will be (optionally) cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
2017-03-04
2017-03-19

View File

@ -30,5 +30,3 @@ fn main()
// This should not introduce a codegen item
let _ = cgu_generic_function::exported_but_not_generic(3);
}
//~ TRANS_ITEM drop-glue i8

View File

@ -11,8 +11,7 @@
// ignore-tidy-linelength
// compile-flags:-Zprint-trans-items=eager
//~ TRANS_ITEM drop-glue drop_in_place_intrinsic::StructWithDtor[0]
//~ TRANS_ITEM drop-glue-contents drop_in_place_intrinsic::StructWithDtor[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<drop_in_place_intrinsic::StructWithDtor[0]> @@ drop_in_place_intrinsic.cgu-0[Internal]
struct StructWithDtor(u32);
impl Drop for StructWithDtor {
@ -23,7 +22,7 @@ impl Drop for StructWithDtor {
//~ TRANS_ITEM fn drop_in_place_intrinsic::main[0]
fn main() {
//~ TRANS_ITEM drop-glue [drop_in_place_intrinsic::StructWithDtor[0]; 2]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<[drop_in_place_intrinsic::StructWithDtor[0]; 2]> @@ drop_in_place_intrinsic.cgu-0[Internal]
let x = [StructWithDtor(0), StructWithDtor(1)];
drop_slice_in_place(&x);
@ -35,7 +34,7 @@ fn drop_slice_in_place(x: &[StructWithDtor]) {
// This is the interesting thing in this test case: Normally we would
// not have drop-glue for the unsized [StructWithDtor]. This has to be
// generated though when the drop_in_place() intrinsic is used.
//~ TRANS_ITEM drop-glue [drop_in_place_intrinsic::StructWithDtor[0]]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<[drop_in_place_intrinsic::StructWithDtor[0]]> @@ drop_in_place_intrinsic.cgu-0[Internal]
::std::ptr::drop_in_place(x as *const _ as *mut [StructWithDtor]);
}
}

View File

@ -28,10 +28,12 @@ fn main() {
//~ TRANS_ITEM fn function_as_argument::take_fn_once[0]<u32, &str, fn(u32, &str)>
//~ TRANS_ITEM fn function_as_argument::function[0]<u32, &str>
//~ TRANS_ITEM fn core::ops[0]::FnOnce[0]::call_once[0]<fn(u32, &str), (u32, &str)>
take_fn_once(function, 0u32, "abc");
//~ TRANS_ITEM fn function_as_argument::take_fn_once[0]<char, f64, fn(char, f64)>
//~ TRANS_ITEM fn function_as_argument::function[0]<char, f64>
//~ TRANS_ITEM fn core::ops[0]::FnOnce[0]::call_once[0]<fn(char, f64), (char, f64)>
take_fn_once(function, 'c', 0f64);
//~ TRANS_ITEM fn function_as_argument::take_fn_pointer[0]<i32, ()>
@ -42,5 +44,3 @@ fn main() {
//~ TRANS_ITEM fn function_as_argument::function[0]<f32, i64>
take_fn_pointer(function, 0f32, 0i64);
}
//~ TRANS_ITEM drop-glue i8

View File

@ -45,8 +45,7 @@ enum EnumNoDrop<T1, T2> {
struct NonGenericNoDrop(i32);
struct NonGenericWithDrop(i32);
//~ TRANS_ITEM drop-glue generic_drop_glue::NonGenericWithDrop[0]
//~ TRANS_ITEM drop-glue-contents generic_drop_glue::NonGenericWithDrop[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::NonGenericWithDrop[0]> @@ generic_drop_glue.cgu-0[Internal]
impl Drop for NonGenericWithDrop {
//~ TRANS_ITEM fn generic_drop_glue::{{impl}}[2]::drop[0]
@ -55,13 +54,11 @@ impl Drop for NonGenericWithDrop {
//~ TRANS_ITEM fn generic_drop_glue::main[0]
fn main() {
//~ TRANS_ITEM drop-glue generic_drop_glue::StructWithDrop[0]<i8, char>
//~ TRANS_ITEM drop-glue-contents generic_drop_glue::StructWithDrop[0]<i8, char>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::StructWithDrop[0]<i8, char>> @@ generic_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn generic_drop_glue::{{impl}}[0]::drop[0]<i8, char>
let _ = StructWithDrop { x: 0i8, y: 'a' }.x;
//~ TRANS_ITEM drop-glue generic_drop_glue::StructWithDrop[0]<&str, generic_drop_glue::NonGenericNoDrop[0]>
//~ TRANS_ITEM drop-glue-contents generic_drop_glue::StructWithDrop[0]<&str, generic_drop_glue::NonGenericNoDrop[0]>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::StructWithDrop[0]<&str, generic_drop_glue::NonGenericNoDrop[0]>> @@ generic_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn generic_drop_glue::{{impl}}[0]::drop[0]<&str, generic_drop_glue::NonGenericNoDrop[0]>
let _ = StructWithDrop { x: "&str", y: NonGenericNoDrop(0) }.y;
@ -70,19 +67,17 @@ fn main() {
// This is supposed to generate drop-glue because it contains a field that
// needs to be dropped.
//~ TRANS_ITEM drop-glue generic_drop_glue::StructNoDrop[0]<generic_drop_glue::NonGenericWithDrop[0], f64>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::StructNoDrop[0]<generic_drop_glue::NonGenericWithDrop[0], f64>> @@ generic_drop_glue.cgu-0[Internal]
let _ = StructNoDrop { x: NonGenericWithDrop(0), y: 0f64 }.y;
//~ TRANS_ITEM drop-glue generic_drop_glue::EnumWithDrop[0]<i32, i64>
//~ TRANS_ITEM drop-glue-contents generic_drop_glue::EnumWithDrop[0]<i32, i64>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::EnumWithDrop[0]<i32, i64>> @@ generic_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn generic_drop_glue::{{impl}}[1]::drop[0]<i32, i64>
let _ = match EnumWithDrop::A::<i32, i64>(0) {
EnumWithDrop::A(x) => x,
EnumWithDrop::B(x) => x as i32
};
//~ TRANS_ITEM drop-glue generic_drop_glue::EnumWithDrop[0]<f64, f32>
//~ TRANS_ITEM drop-glue-contents generic_drop_glue::EnumWithDrop[0]<f64, f32>
//~TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::EnumWithDrop[0]<f64, f32>> @@ generic_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn generic_drop_glue::{{impl}}[1]::drop[0]<f64, f32>
let _ = match EnumWithDrop::B::<f64, f32>(1.0) {
EnumWithDrop::A(x) => x,
@ -99,5 +94,3 @@ fn main() {
EnumNoDrop::B(x) => x as f64
};
}
//~ TRANS_ITEM drop-glue i8

View File

@ -31,12 +31,13 @@ impl<T> Trait for Struct<T> {
fn main() {
let s1 = Struct { _a: 0u32 };
//~ TRANS_ITEM drop-glue i8
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<instantiation_through_vtable::Struct[0]<u32>> @@ instantiation_through_vtable.cgu-0[Internal]
//~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::foo[0]<u32>
//~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::bar[0]<u32>
let _ = &s1 as &Trait;
let s1 = Struct { _a: 0u64 };
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<instantiation_through_vtable::Struct[0]<u64>> @@ instantiation_through_vtable.cgu-0[Internal]
//~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::foo[0]<u64>
//~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::bar[0]<u64>
let _ = &s1 as &Trait;

View File

@ -40,5 +40,3 @@ fn main() {
//~ TRANS_ITEM fn items_within_generic_items::generic_fn[0]<i8>
let _ = generic_fn(0i8);
}
//~ TRANS_ITEM drop-glue i8

View File

@ -13,8 +13,7 @@
#![deny(dead_code)]
//~ TRANS_ITEM drop-glue non_generic_drop_glue::StructWithDrop[0]
//~ TRANS_ITEM drop-glue-contents non_generic_drop_glue::StructWithDrop[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<non_generic_drop_glue::StructWithDrop[0]> @@ non_generic_drop_glue.cgu-0[Internal]
struct StructWithDrop {
x: i32
}
@ -28,8 +27,7 @@ struct StructNoDrop {
x: i32
}
//~ TRANS_ITEM drop-glue non_generic_drop_glue::EnumWithDrop[0]
//~ TRANS_ITEM drop-glue-contents non_generic_drop_glue::EnumWithDrop[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<non_generic_drop_glue::EnumWithDrop[0]> @@ non_generic_drop_glue.cgu-0[Internal]
enum EnumWithDrop {
A(i32)
}
@ -54,5 +52,3 @@ fn main() {
EnumNoDrop::A(x) => x
};
}
//~ TRANS_ITEM drop-glue i8

View File

@ -77,5 +77,3 @@ fn main() {
let x = Struct { _x: 0 };
x.bar();
}
//~ TRANS_ITEM drop-glue i8

View File

@ -68,5 +68,3 @@ impl Deref for Equatable {
&self.0
}
}
//~ TRANS_ITEM drop-glue i8

View File

@ -20,4 +20,3 @@ pub fn foo<T>() { }
fn main() { }
//~ TRANS_ITEM fn static_init::main[0]
//~ TRANS_ITEM drop-glue i8

View File

@ -60,5 +60,3 @@ fn main() {
//~ TRANS_ITEM static statics_and_consts::foo[0]::STATIC2[2]
//~ TRANS_ITEM fn statics_and_consts::main[0]
//~ TRANS_ITEM drop-glue i8

View File

@ -78,5 +78,3 @@ fn main() {
//~ TRANS_ITEM fn trait_implementations::{{impl}}[3]::bar[0]<&str, &str>
0f32.bar("&str", "&str");
}
//~ TRANS_ITEM drop-glue i8

View File

@ -40,23 +40,27 @@ fn take_foo_mut<T, F: FnMut(T) -> T>(mut f: F, arg: T) -> T {
fn main() {
//~ TRANS_ITEM fn trait_method_as_argument::take_foo_once[0]<u32, fn(u32) -> u32>
//~ TRANS_ITEM fn trait_method_as_argument::{{impl}}[0]::foo[0]
//~ TRANS_ITEM fn core::ops[0]::FnOnce[0]::call_once[0]<fn(u32) -> u32, (u32)>
take_foo_once(Trait::foo, 0u32);
//~ TRANS_ITEM fn trait_method_as_argument::take_foo_once[0]<char, fn(char) -> char>
//~ TRANS_ITEM fn trait_method_as_argument::Trait[0]::foo[0]<char>
//~ TRANS_ITEM fn core::ops[0]::FnOnce[0]::call_once[0]<fn(char) -> char, (char)>
take_foo_once(Trait::foo, 'c');
//~ TRANS_ITEM fn trait_method_as_argument::take_foo[0]<u32, fn(u32) -> u32>
//~ TRANS_ITEM fn core::ops[0]::Fn[0]::call[0]<fn(u32) -> u32, (u32)>
take_foo(Trait::foo, 0u32);
//~ TRANS_ITEM fn trait_method_as_argument::take_foo[0]<char, fn(char) -> char>
//~ TRANS_ITEM fn core::ops[0]::Fn[0]::call[0]<fn(char) -> char, (char)>
take_foo(Trait::foo, 'c');
//~ TRANS_ITEM fn trait_method_as_argument::take_foo_mut[0]<u32, fn(u32) -> u32>
//~ TRANS_ITEM fn core::ops[0]::FnMut[0]::call_mut[0]<fn(char) -> char, (char)>
take_foo_mut(Trait::foo, 0u32);
//~ TRANS_ITEM fn trait_method_as_argument::take_foo_mut[0]<char, fn(char) -> char>
//~ TRANS_ITEM fn core::ops[0]::FnMut[0]::call_mut[0]<fn(u32) -> u32, (u32)>
take_foo_mut(Trait::foo, 'c');
}
//~ TRANS_ITEM drop-glue i8

View File

@ -66,5 +66,3 @@ fn main() {
//~ TRANS_ITEM fn trait_method_default_impl::SomeGenericTrait[0]::bar[0]<u32, i16, ()>
0u32.bar(0i16, ());
}
//~ TRANS_ITEM drop-glue i8

View File

@ -13,12 +13,11 @@
#![deny(dead_code)]
//~ TRANS_ITEM drop-glue transitive_drop_glue::Root[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::Root[0]> @@ transitive_drop_glue.cgu-0[Internal]
struct Root(Intermediate);
//~ TRANS_ITEM drop-glue transitive_drop_glue::Intermediate[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::Intermediate[0]> @@ transitive_drop_glue.cgu-0[Internal]
struct Intermediate(Leaf);
//~ TRANS_ITEM drop-glue transitive_drop_glue::Leaf[0]
//~ TRANS_ITEM drop-glue-contents transitive_drop_glue::Leaf[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::Leaf[0]> @@ transitive_drop_glue.cgu-0[Internal]
struct Leaf;
impl Drop for Leaf {
@ -39,17 +38,15 @@ fn main() {
let _ = Root(Intermediate(Leaf));
//~ TRANS_ITEM drop-glue transitive_drop_glue::RootGen[0]<u32>
//~ TRANS_ITEM drop-glue transitive_drop_glue::IntermediateGen[0]<u32>
//~ TRANS_ITEM drop-glue transitive_drop_glue::LeafGen[0]<u32>
//~ TRANS_ITEM drop-glue-contents transitive_drop_glue::LeafGen[0]<u32>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::RootGen[0]<u32>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::IntermediateGen[0]<u32>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::LeafGen[0]<u32>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn transitive_drop_glue::{{impl}}[1]::drop[0]<u32>
let _ = RootGen(IntermediateGen(LeafGen(0u32)));
//~ TRANS_ITEM drop-glue transitive_drop_glue::RootGen[0]<i16>
//~ TRANS_ITEM drop-glue transitive_drop_glue::IntermediateGen[0]<i16>
//~ TRANS_ITEM drop-glue transitive_drop_glue::LeafGen[0]<i16>
//~ TRANS_ITEM drop-glue-contents transitive_drop_glue::LeafGen[0]<i16>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::RootGen[0]<i16>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::IntermediateGen[0]<i16>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::LeafGen[0]<i16>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn transitive_drop_glue::{{impl}}[1]::drop[0]<i16>
let _ = RootGen(IntermediateGen(LeafGen(0i16)));
}

View File

@ -13,8 +13,7 @@
#![deny(dead_code)]
//~ TRANS_ITEM drop-glue tuple_drop_glue::Dropped[0]
//~ TRANS_ITEM drop-glue-contents tuple_drop_glue::Dropped[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<tuple_drop_glue::Dropped[0]> @@ tuple_drop_glue.cgu-0[Internal]
struct Dropped;
impl Drop for Dropped {
@ -24,10 +23,10 @@ impl Drop for Dropped {
//~ TRANS_ITEM fn tuple_drop_glue::main[0]
fn main() {
//~ TRANS_ITEM drop-glue (u32, tuple_drop_glue::Dropped[0])
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<(u32, tuple_drop_glue::Dropped[0])> @@ tuple_drop_glue.cgu-0[Internal]
let x = (0u32, Dropped);
//~ TRANS_ITEM drop-glue (i16, (tuple_drop_glue::Dropped[0], bool))
//~ TRANS_ITEM drop-glue (tuple_drop_glue::Dropped[0], bool)
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<(i16, (tuple_drop_glue::Dropped[0], bool))> @@ tuple_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<(tuple_drop_glue::Dropped[0], bool)> @@ tuple_drop_glue.cgu-0[Internal]
let x = (0i16, (Dropped, true));
}

View File

@ -57,11 +57,13 @@ fn main()
{
// simple case
let bool_sized = &true;
//~ TRANS_ITEM drop-glue i8
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<bool> @@ unsizing.cgu-0[Internal]
//~ TRANS_ITEM fn unsizing::{{impl}}[0]::foo[0]
let _bool_unsized = bool_sized as &Trait;
let char_sized = &true;
let char_sized = &'a';
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<char> @@ unsizing.cgu-0[Internal]
//~ TRANS_ITEM fn unsizing::{{impl}}[1]::foo[0]
let _char_unsized = char_sized as &Trait;
@ -71,11 +73,13 @@ fn main()
_b: 2,
_c: 3.0f64
};
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<f64> @@ unsizing.cgu-0[Internal]
//~ TRANS_ITEM fn unsizing::{{impl}}[2]::foo[0]
let _struct_unsized = struct_sized as &Struct<Trait>;
// custom coercion
let wrapper_sized = Wrapper(&0u32);
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<u32> @@ unsizing.cgu-0[Internal]
//~ TRANS_ITEM fn unsizing::{{impl}}[3]::foo[0]
let _wrapper_sized = wrapper_sized as Wrapper<Trait>;
}

View File

@ -86,4 +86,3 @@ impl NonGeneric {
// Only the non-generic methods should be instantiated:
//~ TRANS_ITEM fn unused_traits_and_generics::{{impl}}[3]::foo[0]
//~ TRANS_ITEM drop-glue i8

View File

@ -20,15 +20,14 @@
// aux-build:cgu_extern_drop_glue.rs
extern crate cgu_extern_drop_glue;
//~ TRANS_ITEM drop-glue cgu_extern_drop_glue::Struct[0] @@ extern_drop_glue[Internal] extern_drop_glue-mod1[Internal]
//~ TRANS_ITEM drop-glue-contents cgu_extern_drop_glue::Struct[0] @@ extern_drop_glue[Internal] extern_drop_glue-mod1[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<cgu_extern_drop_glue::Struct[0]> @@ extern_drop_glue[Internal] extern_drop_glue-mod1[Internal]
struct LocalStruct(cgu_extern_drop_glue::Struct);
//~ TRANS_ITEM fn extern_drop_glue::user[0] @@ extern_drop_glue[External]
fn user()
{
//~ TRANS_ITEM drop-glue extern_drop_glue::LocalStruct[0] @@ extern_drop_glue[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<extern_drop_glue::LocalStruct[0]> @@ extern_drop_glue[Internal]
let _ = LocalStruct(cgu_extern_drop_glue::Struct(0));
}
@ -40,7 +39,7 @@ mod mod1 {
//~ TRANS_ITEM fn extern_drop_glue::mod1[0]::user[0] @@ extern_drop_glue-mod1[External]
fn user()
{
//~ TRANS_ITEM drop-glue extern_drop_glue::mod1[0]::LocalStruct[0] @@ extern_drop_glue-mod1[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<extern_drop_glue::mod1[0]::LocalStruct[0]> @@ extern_drop_glue-mod1[Internal]
let _ = LocalStruct(cgu_extern_drop_glue::Struct(0));
}
}

View File

@ -60,5 +60,3 @@ mod mod3 {
// once for the current crate
//~ TRANS_ITEM fn cgu_generic_function::foo[0]<&str> @@ cgu_generic_function.volatile[External]
//~ TRANS_ITEM fn cgu_generic_function::bar[0]<&str> @@ cgu_generic_function.volatile[External]
//~ TRANS_ITEM drop-glue i8

View File

@ -16,8 +16,7 @@
#![allow(dead_code)]
#![crate_type="lib"]
//~ TRANS_ITEM drop-glue local_drop_glue::Struct[0] @@ local_drop_glue[Internal] local_drop_glue-mod1[Internal]
//~ TRANS_ITEM drop-glue-contents local_drop_glue::Struct[0] @@ local_drop_glue[Internal] local_drop_glue-mod1[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<local_drop_glue::Struct[0]> @@ local_drop_glue[Internal] local_drop_glue-mod1[Internal]
struct Struct {
_a: u32
}
@ -27,7 +26,7 @@ impl Drop for Struct {
fn drop(&mut self) {}
}
//~ TRANS_ITEM drop-glue local_drop_glue::Outer[0] @@ local_drop_glue[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<local_drop_glue::Outer[0]> @@ local_drop_glue[Internal]
struct Outer {
_a: Struct
}
@ -46,10 +45,10 @@ mod mod1
{
use super::Struct;
//~ TRANS_ITEM drop-glue local_drop_glue::mod1[0]::Struct2[0] @@ local_drop_glue-mod1[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<local_drop_glue::mod1[0]::Struct2[0]> @@ local_drop_glue-mod1[Internal]
struct Struct2 {
_a: Struct,
//~ TRANS_ITEM drop-glue (u32, local_drop_glue::Struct[0]) @@ local_drop_glue-mod1[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<(u32, local_drop_glue::Struct[0])> @@ local_drop_glue-mod1[Internal]
_b: (u32, Struct),
}

View File

@ -80,5 +80,3 @@ mod mod2 {
static BAZ: u64 = 0;
}
}
//~ TRANS_ITEM drop-glue i8

View File

@ -46,5 +46,3 @@ mod mod1 {
static BAR: u32 = 0;
}
}
//~ TRANS_ITEM drop-glue i8

View File

@ -69,7 +69,7 @@ mod mod1 {
//~ TRANS_ITEM fn vtable_through_const::main[0] @@ vtable_through_const[External]
fn main() {
//~ TRANS_ITEM drop-glue i8 @@ vtable_through_const[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<u32> @@ vtable_through_const[Internal]
// Since Trait1::do_something() is instantiated via its default implementation,
// it is considered a generic and is instantiated here only because it is

View File

@ -8,6 +8,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::panic;
impl<'a> panic::UnwindSafe for Foo<'a> {}
impl<'a> panic::RefUnwindSafe for Foo<'a> {}
struct Foo<'a>(&'a mut bool);
impl<'a> Drop for Foo<'a> {
@ -28,5 +33,15 @@ fn main() {
f(x);
}
assert!(ran_drop);
}
let mut ran_drop = false;
{
let x = Foo(&mut ran_drop);
let result = panic::catch_unwind(move || {
let x = move || { let _ = x; panic!() };
f(x);
});
assert!(result.is_err());
}
assert!(ran_drop);
}

View File

@ -0,0 +1,56 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(fn_traits)]
#![feature(never_type)]
use std::panic;
fn foo(x: u32, y: u32) -> u32 { x/y }
fn foo_diverges() -> ! { panic!() }
fn test_fn_ptr<T>(mut t: T)
where T: Fn(u32, u32) -> u32,
{
let as_fn = <T as Fn<(u32, u32)>>::call;
assert_eq!(as_fn(&t, (9, 3)), 3);
let as_fn_mut = <T as FnMut<(u32, u32)>>::call_mut;
assert_eq!(as_fn_mut(&mut t, (18, 3)), 6);
let as_fn_once = <T as FnOnce<(u32, u32)>>::call_once;
assert_eq!(as_fn_once(t, (24, 3)), 8);
}
fn assert_panics<F>(f: F) where F: FnOnce() {
let f = panic::AssertUnwindSafe(f);
let result = panic::catch_unwind(move || {
f.0()
});
if let Ok(..) = result {
panic!("diverging function returned");
}
}
fn test_fn_ptr_panic<T>(mut t: T)
where T: Fn() -> !
{
let as_fn = <T as Fn<()>>::call;
assert_panics(|| as_fn(&t, ()));
let as_fn_mut = <T as FnMut<()>>::call_mut;
assert_panics(|| as_fn_mut(&mut t, ()));
let as_fn_once = <T as FnOnce<()>>::call_once;
assert_panics(|| as_fn_once(t, ()));
}
fn main() {
test_fn_ptr(foo);
test_fn_ptr(foo as fn(u32, u32) -> u32);
test_fn_ptr_panic(foo_diverges);
test_fn_ptr_panic(foo_diverges as fn() -> !);
}