rust/src/librustc_trans/glue.rs

649 lines
26 KiB
Rust
Raw Normal View History

// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//!
//
// Code relating to drop glue.
use std;
use llvm;
use llvm::{ValueRef, get_param};
2014-09-30 22:26:04 +00:00
use middle::lang_items::ExchangeFreeFnLangItem;
use rustc::ty::subst::{Substs};
use rustc::traits;
use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable};
use adt;
use base::*;
use callee::{Callee, CalleeData};
use cleanup::CleanupScope;
use meth;
use common::*;
use machine::*;
use monomorphize;
use trans_item::TransItem;
use tvec;
use type_of::{type_of, sizing_type_of, align_of};
use type_::Type;
use value::Value;
use Disr;
2013-06-16 10:52:44 +00:00
use syntax_pos::DUMMY_SP;
2016-12-11 03:32:44 +00:00
pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
v: ValueRef,
size: ValueRef,
align: ValueRef)
2016-12-11 03:32:44 +00:00
-> BlockAndBuilder<'blk, 'tcx> {
let _icx = push_ctxt("trans_exchange_free");
let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem);
let args = [bcx.pointercast(v, Type::i8p(bcx.ccx())), size, align];
Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[]))
2016-12-11 23:28:10 +00:00
.call(bcx, &args, None, None).0
}
2016-12-11 03:32:44 +00:00
pub fn trans_exchange_free<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
v: ValueRef,
size: u64,
align: u32)
2016-12-11 03:32:44 +00:00
-> BlockAndBuilder<'blk, 'tcx> {
let ccx = cx.ccx();
trans_exchange_free_dyn(cx, v, C_uint(ccx, size), C_uint(ccx, align))
2014-08-06 09:59:40 +00:00
}
2016-12-11 03:32:44 +00:00
pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
ptr: ValueRef,
content_ty: Ty<'tcx>)
2016-12-11 03:32:44 +00:00
-> BlockAndBuilder<'blk, 'tcx> {
assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
// `Box<ZeroSizeType>` does not allocate.
if content_size != 0 {
2014-08-06 09:59:40 +00:00
let content_align = align_of(bcx.ccx(), content_ty);
trans_exchange_free(bcx, ptr, content_size, content_align)
} else {
bcx
}
}
pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: Ty<'tcx>) -> bool {
tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment())
}
pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
t: Ty<'tcx>) -> Ty<'tcx> {
assert!(t.is_normalized_for_trans());
let t = tcx.erase_regions(&t);
2014-08-06 09:59:40 +00:00
// Even if there is no dtor for t, there might be one deeper down and we
// might need to pass in the vtable ptr.
if !type_is_sized(tcx, t) {
return t;
2014-08-06 09:59:40 +00:00
}
// FIXME (#22815): note that type_needs_drop conservatively
// approximates in some cases and may say a type expression
// requires drop glue when it actually does not.
//
// (In this case it is not clear whether any harm is done, i.e.
// erroneously returning `t` in some cases where we could have
// returned `tcx.types.i8` does not appear unsound. The impact on
// code quality is unknown at this time.)
if !type_needs_drop(tcx, t) {
return tcx.types.i8;
}
match t.sty {
ty::TyBox(typ) if !type_needs_drop(tcx, typ)
&& type_is_sized(tcx, typ) => {
tcx.infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| {
let layout = t.layout(&infcx).unwrap();
if layout.size(&tcx.data_layout).bytes() == 0 {
// `Box<ZeroSizeType>` does not allocate.
tcx.types.i8
} else {
t
}
})
}
_ => t
}
}
fn drop_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) {
2016-12-12 05:19:39 +00:00
call_drop_glue(bcx, v, t, false, None)
}
2016-12-12 05:19:39 +00:00
pub fn call_drop_glue<'blk, 'tcx>(
bcx: &BlockAndBuilder<'blk, 'tcx>,
2016-12-12 05:19:39 +00:00
v: ValueRef,
t: Ty<'tcx>,
skip_dtor: bool,
funclet: Option<&'blk Funclet>,
) {
// NB: v is an *alias* of type t here, not a direct value.
2016-12-12 05:19:39 +00:00
debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor);
let _icx = push_ctxt("drop_ty");
2016-12-11 03:32:44 +00:00
if bcx.fcx().type_needs_drop(t) {
2014-04-25 03:14:52 +00:00
let ccx = bcx.ccx();
let g = if skip_dtor {
DropGlueKind::TyContents(t)
} else {
DropGlueKind::Ty(t)
};
let glue = get_drop_glue_core(ccx, g);
let glue_type = get_drop_glue_type(ccx.tcx(), t);
let ptr = if glue_type != t {
bcx.pointercast(v, type_of(ccx, glue_type).ptr_to())
} else {
v
};
// No drop-hint ==> call standard drop glue
bcx.call(glue, &[ptr], funclet.map(|b| b.bundle()));
}
}
pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
get_drop_glue_core(ccx, DropGlueKind::Ty(t))
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum DropGlueKind<'tcx> {
/// The normal path; runs the dtor, and then recurs on the contents
Ty(Ty<'tcx>),
/// Skips the dtor, if any, for ty; drops the contents directly.
/// Note that the dtor is only skipped at the most *shallow*
/// level, namely, an `impl Drop for Ty` itself. So, for example,
/// if Ty is Newtype(S) then only the Drop impl for Newtype itself
/// will be skipped, while the Drop impl for S, if any, will be
/// invoked.
TyContents(Ty<'tcx>),
}
impl<'tcx> DropGlueKind<'tcx> {
pub fn ty(&self) -> Ty<'tcx> {
match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
}
pub fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
{
match *self {
DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
}
}
}
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
g: DropGlueKind<'tcx>) -> ValueRef {
let g = g.map_ty(|t| get_drop_glue_type(ccx.tcx(), t));
match ccx.drop_glues().borrow().get(&g) {
Some(&(glue, _)) => glue,
None => {
bug!("Could not find drop glue for {:?} -- {} -- {}.",
g,
TransItem::DropGlue(g).to_raw_string(),
ccx.codegen_unit().name());
}
}
}
pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
g: DropGlueKind<'tcx>) {
let tcx = ccx.tcx();
assert_eq!(g.ty(), get_drop_glue_type(tcx, g.ty()));
let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone();
2016-12-12 05:19:39 +00:00
let fcx = FunctionContext::new(ccx, llfn, fn_ty, None);
let bcx = fcx.init(false);
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
// passed by value.
//
// llfn is expected be declared to take a parameter of the appropriate
// type, so we don't need to explicitly cast the function parameter.
let bcx = make_drop_glue(bcx, get_param(llfn, 0), g);
fcx.finish(&bcx);
}
2016-12-11 03:32:44 +00:00
fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
t: Ty<'tcx>,
v0: ValueRef,
shallow_drop: bool)
2016-12-11 03:32:44 +00:00
-> BlockAndBuilder<'blk, 'tcx>
{
debug!("trans_custom_dtor t: {}", t);
let tcx = bcx.tcx();
let def = t.ty_adt_def().unwrap();
// Be sure to put the contents into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
// destructors if the user destructor panics.
//
// FIXME (#14875) panic-in-drop semantics might be unsupported; we
// might well consider changing below to more direct code.
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
let mut contents_scope = if !shallow_drop {
2016-12-13 01:00:42 +00:00
bcx.fcx().schedule_drop_adt_contents(v0, t)
} else {
None
};
let (sized_args, unsized_args);
let args: &[ValueRef] = if type_is_sized(tcx, t) {
sized_args = [v0];
&sized_args
} else {
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
unsized_args = [
bcx.load(get_dataptr(&bcx, v0)),
bcx.load(get_meta(&bcx, v0))
];
&unsized_args
};
let trait_ref = ty::Binder(ty::TraitRef {
def_id: tcx.lang_items.drop_trait().unwrap(),
substs: tcx.mk_substs_trait(t, &[])
});
let vtbl = match fulfill_obligation(bcx.ccx().shared(), DUMMY_SP, trait_ref) {
traits::VtableImpl(data) => data,
_ => bug!("dtor for {:?} is not an impl???", t)
};
let dtor_did = def.destructor().unwrap();
let callee = Callee::def(bcx.ccx(), dtor_did, vtbl.substs);
let bcx = trans_call_custom_dtor(bcx, callee, args, &mut contents_scope);
bcx.fcx().trans_scope(&bcx, contents_scope);
bcx
}
// Inlined and simplified version of callee::trans_call_inner
fn trans_call_custom_dtor<'a, 'blk, 'tcx>(
bcx: BlockAndBuilder<'blk, 'tcx>,
callee: Callee<'tcx>,
args: &[ValueRef],
cleanup_scope: &mut Option<CleanupScope<'tcx>>,
) -> BlockAndBuilder<'blk, 'tcx> {
let fn_ret = callee.ty.fn_ret();
let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]);
// Return must be direct, with no cast.
assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none());
let mut llargs = Vec::new();
let llfn = match callee.data {
CalleeData::Virtual(idx) => {
llargs.push(args[0]);
let fn_ptr = meth::get_virtual_method(&bcx, args[1], idx);
let llty = fn_ty.llvm_type(&bcx.ccx()).ptr_to();
let llfn = bcx.pointercast(fn_ptr, llty);
llargs.extend_from_slice(&args[2..]);
llfn
}
CalleeData::Fn(f) => {
llargs.extend_from_slice(args);
f
}
_ => bug!("Expected virtual or fn pointer callee, found {:?}", callee)
};
let _icx = push_ctxt("invoke_");
let (llret, bcx) = if cleanup_scope.is_some() && !bcx.sess().no_landing_pads() {
let normal_bcx = bcx.fcx().build_new_block("normal-return");
let landing_pad = bcx.fcx().get_landing_pad(cleanup_scope.as_mut().unwrap());
let llresult = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None);
(llresult, normal_bcx)
} else {
let llresult = bcx.call(llfn, &llargs[..], None);
(llresult, bcx)
};
fn_ty.apply_attrs_callsite(llret);
if fn_ret.0.is_never() {
bcx.unreachable();
}
bcx
}
pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {:?}",
t, Value(info));
if type_is_sized(bcx.tcx(), t) {
2014-08-06 09:59:40 +00:00
let sizing_type = sizing_type_of(bcx.ccx(), t);
let size = llsize_of_alloc(bcx.ccx(), sizing_type);
let align = align_of(bcx.ccx(), t);
debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}",
t, Value(info), size, align);
let size = C_uint(bcx.ccx(), size);
let align = C_uint(bcx.ccx(), align);
2014-08-06 09:59:40 +00:00
return (size, align);
}
match t.sty {
ty::TyAdt(def, substs) => {
2014-08-06 09:59:40 +00:00
let ccx = bcx.ccx();
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized,
// and it also rounds up to alignment, which we want to avoid,
// as the unsized field's alignment could be smaller.
2015-08-06 15:25:15 +00:00
assert!(!t.is_simd());
let layout = ccx.layout_of(t);
debug!("DST {} layout: {:?}", t, layout);
let (sized_size, sized_align) = match *layout {
ty::layout::Layout::Univariant { ref variant, .. } => {
(variant.offsets.last().map_or(0, |o| o.bytes()), variant.align.abi())
}
_ => {
bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}",
t, layout);
}
};
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
let sized_size = C_uint(ccx, sized_size);
let sized_align = C_uint(ccx, sized_align);
2014-08-06 09:59:40 +00:00
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let last_field = def.struct_variant().fields.last().unwrap();
let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
2014-08-06 09:59:40 +00:00
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
// required of the unsized field that follows) before
// summing it with `sized_size`. (Note that since #26403
// is unfixed, we do not yet add the necessary padding
// here. But this is where the add would go.)
2014-08-06 09:59:40 +00:00
// Return the sum of sizes and max of aligns.
let size = bcx.add(sized_size, unsized_size);
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let align = match (const_to_opt_uint(sized_align), const_to_opt_uint(unsized_align)) {
(Some(sized_align), Some(unsized_align)) => {
// If both alignments are constant, (the sized_align should always be), then
// pick the correct alignment statically.
C_uint(ccx, std::cmp::max(sized_align, unsized_align))
}
_ => bcx.select(bcx.icmp(llvm::IntUGT, sized_align, unsized_align),
sized_align,
unsized_align)
};
// Issue #27023: must add any necessary padding to `size`
// (to make it a multiple of `align`) before returning it.
//
// Namely, the returned size should be, in C notation:
//
// `size + ((size & (align-1)) ? align : 0)`
//
// emulated via the semi-standard fast bit trick:
//
// `(size + (align-1)) & -align`
let addend = bcx.sub(align, C_uint(bcx.ccx(), 1_u64));
let size = bcx.and(bcx.add(size, addend), bcx.neg(align));
2014-08-06 09:59:40 +00:00
(size, align)
}
ty::TyDynamic(..) => {
2014-08-06 09:59:40 +00:00
// info points to the vtable and the second entry in the vtable is the
// dynamic size of the object.
let info = bcx.pointercast(info, Type::int(bcx.ccx()).ptr_to());
let size_ptr = bcx.gepi(info, &[1]);
let align_ptr = bcx.gepi(info, &[2]);
(bcx.load(size_ptr), bcx.load(align_ptr))
2014-08-06 09:59:40 +00:00
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx());
// The info in this case is the length of the str, so the size is that
2014-08-06 09:59:40 +00:00
// times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
let unit_align = llalign_of_min(bcx.ccx(), llunit_ty);
2014-08-06 09:59:40 +00:00
let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
(bcx.mul(info, C_uint(bcx.ccx(), unit_size)),
C_uint(bcx.ccx(), unit_align))
2014-08-06 09:59:40 +00:00
}
_ => bug!("Unexpected unsized type, found {}", t)
2014-08-06 09:59:40 +00:00
}
}
2016-12-11 03:32:44 +00:00
fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
v0: ValueRef,
g: DropGlueKind<'tcx>)
2016-12-11 03:32:44 +00:00
-> BlockAndBuilder<'blk, 'tcx> {
let t = g.ty();
let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
// NB: v0 is an *alias* of type t here, not a direct value.
let _icx = push_ctxt("make_drop_glue");
// Only drop the value when it ... well, we used to check for
// non-null, (and maybe we need to continue doing so), but we now
// must definitely check for special bit-patterns corresponding to
// the special dtor markings.
match t.sty {
ty::TyBox(content_ty) => {
// Support for TyBox is built-in and its drop glue is
// special. It may move to library and have Drop impl. As
// a safe-guard, assert TyBox not used with TyContents.
assert!(!skip_dtor);
if !type_is_sized(bcx.tcx(), content_ty) {
2016-12-11 03:32:44 +00:00
let llval = get_dataptr(&bcx, v0);
let llbox = bcx.load(llval);
drop_ty(&bcx, v0, content_ty);
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
2016-12-11 03:32:44 +00:00
let info = get_meta(&bcx, v0);
let info = bcx.load(info);
2016-12-11 03:32:44 +00:00
let (llsize, llalign) = size_and_align_of_dst(&bcx, content_ty, info);
// `Box<ZeroSizeType>` does not allocate.
let needs_free = bcx.icmp(
2016-12-11 03:32:44 +00:00
llvm::IntNE,
llsize,
C_uint(bcx.ccx(), 0u64),
);
with_cond(bcx, needs_free, |bcx| {
trans_exchange_free_dyn(bcx, llbox, llsize, llalign)
})
} else {
let llval = v0;
let llbox = bcx.load(llval);
drop_ty(&bcx, llbox, content_ty);
trans_exchange_free_ty(bcx, llbox, content_ty)
}
}
ty::TyDynamic(..) => {
// No support in vtable for distinguishing destroying with
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
assert!(!skip_dtor);
2016-12-11 03:32:44 +00:00
let data_ptr = get_dataptr(&bcx, v0);
let vtable_ptr = bcx.load(get_meta(&bcx, v0));
let dtor = bcx.load(vtable_ptr);
2016-12-11 23:28:10 +00:00
bcx.call(dtor, &[bcx.pointercast(bcx.load(data_ptr), Type::i8p(bcx.ccx()))], None);
2014-08-06 09:59:40 +00:00
bcx
}
ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => {
trans_custom_dtor(bcx, t, v0, def.is_union())
}
ty::TyAdt(def, ..) if def.is_union() => {
bcx
}
_ => {
2016-12-11 03:32:44 +00:00
if bcx.fcx().type_needs_drop(t) {
drop_structural_ty(bcx, v0, t)
} else {
bcx
}
}
}
}
// Iterates through the elements of a structural type, dropping them.
2016-12-11 03:32:44 +00:00
fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
av: ValueRef,
t: Ty<'tcx>)
2016-12-11 03:32:44 +00:00
-> BlockAndBuilder<'blk, 'tcx> {
let _icx = push_ctxt("drop_structural_ty");
fn iter_variant<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>,
t: Ty<'tcx>,
av: adt::MaybeSizedValue,
variant: &'tcx ty::VariantDef,
substs: &Substs<'tcx>) {
let _icx = push_ctxt("iter_variant");
let tcx = cx.tcx();
for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field);
2016-12-11 03:32:44 +00:00
let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i);
drop_ty(&cx, field_ptr, arg);
}
}
let value = if type_is_sized(cx.tcx(), t) {
adt::MaybeSizedValue::sized(av)
} else {
// FIXME(#36457) -- we should pass unsized values as two arguments
let data = cx.load(get_dataptr(&cx, av));
let info = cx.load(get_meta(&cx, av));
adt::MaybeSizedValue::unsized_(data, info)
};
let mut cx = cx;
match t.sty {
ty::TyClosure(def_id, substs) => {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
2016-12-11 03:32:44 +00:00
let llupvar = adt::trans_field_ptr(&cx, t, value, Disr(0), i);
drop_ty(&cx, llupvar, upvar_ty);
}
}
ty::TyArray(_, n) => {
2016-12-11 03:32:44 +00:00
let base = get_dataptr(&cx, value.value);
let len = C_uint(cx.ccx(), n);
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, base, unit_ty, len, |bb, vv| drop_ty(bb, vv, unit_ty));
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, value.value, unit_ty, value.meta,
|bb, vv| drop_ty(bb, vv, unit_ty));
}
ty::TyTuple(ref args) => {
for (i, arg) in args.iter().enumerate() {
2016-12-11 03:32:44 +00:00
let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr(0), i);
drop_ty(&cx, llfld_a, *arg);
}
}
ty::TyAdt(adt, substs) => match adt.adt_kind() {
AdtKind::Struct => {
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
2016-12-11 03:32:44 +00:00
let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr::from(discr), i);
let val = if type_is_sized(cx.tcx(), field_ty) {
llfld_a
} else {
// FIXME(#36457) -- we should pass unsized values as two arguments
2016-12-11 03:32:44 +00:00
let scratch = alloc_ty(&cx, field_ty, "__fat_ptr_iter");
cx.store(llfld_a, get_dataptr(&cx, scratch));
cx.store(value.meta, get_meta(&cx, scratch));
scratch
};
drop_ty(&cx, val, field_ty);
}
}
AdtKind::Union => {
bug!("Union in `glue::drop_structural_ty`");
}
AdtKind::Enum => {
2016-12-11 03:32:44 +00:00
let fcx = cx.fcx();
let ccx = fcx.ccx;
let n_variants = adt.variants.len();
// NB: we must hit the discriminant first so that structural
// comparison know not to proceed when the discriminants differ.
2016-12-11 03:32:44 +00:00
match adt::trans_switch(&cx, t, av, false) {
(adt::BranchKind::Single, None) => {
if n_variants != 0 {
assert!(n_variants == 1);
iter_variant(&cx, t, adt::MaybeSizedValue::sized(av),
&adt.variants[0], substs);
}
}
(adt::BranchKind::Switch, Some(lldiscrim_a)) => {
2016-12-11 03:32:44 +00:00
let tcx = cx.tcx();
drop_ty(&cx, lldiscrim_a, tcx.types.isize);
// Create a fall-through basic block for the "else" case of
// the switch instruction we're about to generate. Note that
// we do **not** use an Unreachable instruction here, even
// though most of the time this basic block will never be hit.
//
// When an enum is dropped it's contents are currently
// overwritten to DTOR_DONE, which means the discriminant
// could have changed value to something not within the actual
// range of the discriminant. Currently this function is only
// used for drop glue so in this case we just return quickly
// from the outer function, and any other use case will only
// call this for an already-valid enum in which case the `ret
// void` will never be hit.
2016-12-12 05:19:39 +00:00
let ret_void_cx = fcx.build_new_block("enum-iter-ret-void");
ret_void_cx.ret_void();
let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants);
2016-12-12 05:19:39 +00:00
let next_cx = fcx.build_new_block("enum-iter-next");
for variant in &adt.variants {
2016-12-11 03:32:44 +00:00
let variant_cx_name = format!("enum-iter-variant-{}",
&variant.disr_val.to_string());
2016-12-12 05:19:39 +00:00
let variant_cx = fcx.build_new_block(&variant_cx_name);
2016-12-11 03:32:44 +00:00
let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val));
2016-12-11 22:03:52 +00:00
variant_cx.add_case(llswitch, case_val, variant_cx.llbb());
iter_variant(&variant_cx, t, value, variant, substs);
variant_cx.br(next_cx.llbb());
}
cx = next_cx;
}
_ => ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"),
}
}
},
_ => {
cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
}
}
return cx;
}