Auto merge of #31307 - nagisa:mir-drop-terminator, r=nikomatsakis

The scope of these refactorings is a little bit bigger than the title implies. See each commit for details.

I’m submitting this for nitpicking now (the first 4 commits), because I feel the basic idea/implementation is sound and should work. I will eventually expand this PR to cover the translator changes necessary for all this to work (+ tests), ~~and perhaps implement a dynamic dropping scheme while I’m at it as well.~~

r? @nikomatsakis
This commit is contained in:
bors 2016-02-06 01:24:22 +00:00
commit 5147c1f2c0
25 changed files with 554 additions and 339 deletions

View File

@ -39,11 +39,17 @@ unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
p
}
#[lang = "exchange_free"]
unsafe fn deallocate(ptr: *mut u8, _size: usize, _align: usize) {
libc::free(ptr as *mut libc::c_void)
}
#[lang = "box_free"]
unsafe fn box_free<T>(ptr: *mut T) {
deallocate(ptr as *mut u8, ::core::mem::size_of::<T>(), ::core::mem::align_of::<T>());
}
#[start]
fn main(argc: isize, argv: *const *const u8) -> isize {
let x = box 1;

View File

@ -16,6 +16,8 @@
issue = "27700")]
use core::{isize, usize};
#[cfg(not(test))]
use core::intrinsics::{size_of, min_align_of};
#[allow(improper_ctypes)]
extern "C" {
@ -147,6 +149,17 @@ unsafe fn exchange_free(ptr: *mut u8, old_size: usize, align: usize) {
deallocate(ptr, old_size, align);
}
#[cfg(not(test))]
#[lang = "box_free"]
#[inline]
unsafe fn box_free<T>(ptr: *mut T) {
let size = size_of::<T>();
// We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary.
if size != 0 {
deallocate(ptr as *mut u8, size, min_align_of::<T>());
}
}
#[cfg(test)]
mod tests {
extern crate test;

View File

@ -435,7 +435,7 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> {
let is_named = node.name().is_some();
let field_type = self.tcx.node_id_to_type(node.id);
let is_marker_field = match field_type.ty_to_def_id() {
Some(def_id) => self.tcx.lang_items.items().any(|(_, item)| *item == Some(def_id)),
Some(def_id) => self.tcx.lang_items.items().iter().any(|item| *item == Some(def_id)),
_ => false
};
is_named

View File

@ -37,9 +37,6 @@ use syntax::parse::token::InternedString;
use rustc_front::intravisit::Visitor;
use rustc_front::hir;
use std::iter::Enumerate;
use std::slice;
// The actual lang items defined come at the end of this file in one handy table.
// So you probably just want to nip down to the end.
macro_rules! lets_do_this {
@ -70,8 +67,8 @@ impl LanguageItems {
}
}
pub fn items<'a>(&'a self) -> Enumerate<slice::Iter<'a, Option<DefId>>> {
self.items.iter().enumerate()
pub fn items(&self) -> &[Option<DefId>] {
&*self.items
}
pub fn item_name(index: usize) -> &'static str {
@ -336,6 +333,7 @@ lets_do_this! {
ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn;
ExchangeFreeFnLangItem, "exchange_free", exchange_free_fn;
BoxFreeFnLangItem, "box_free", box_free_fn;
StrDupUniqFnLangItem, "strdup_uniq", strdup_uniq_fn;
StartFnLangItem, "start", start_fn;

View File

@ -362,7 +362,7 @@ pub fn find_reachable(tcx: &ty::ctxt,
for (id, _) in &access_levels.map {
reachable_context.worklist.push(*id);
}
for (_, item) in tcx.lang_items.items() {
for item in tcx.lang_items.items().iter() {
if let Some(did) = *item {
if let Some(node_id) = tcx.map.as_local_node_id(did) {
reachable_context.worklist.push(node_id);

View File

@ -8,21 +8,20 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use graphviz::IntoCow;
use middle::const_eval::ConstVal;
use middle::def_id::DefId;
use middle::subst::Substs;
use middle::ty::{self, AdtDef, ClosureSubsts, FnOutput, Region, Ty};
use rustc_back::slice;
use rustc_data_structures::tuple_slice::TupleSlice;
use rustc_front::hir::InlineAsm;
use syntax::ast::{self, Name};
use syntax::codemap::Span;
use graphviz::IntoCow;
use std::ascii;
use std::borrow::Cow;
use std::borrow::{Cow};
use std::fmt::{self, Debug, Formatter, Write};
use std::{iter, u32};
use std::ops::{Index, IndexMut};
use syntax::ast::{self, Name};
use syntax::codemap::Span;
/// Lowered representation of a single function.
#[derive(Clone, RustcEncodable, RustcDecodable)]
@ -263,101 +262,63 @@ pub enum Terminator<'tcx> {
/// `END_BLOCK`.
Return,
/// Drop the Lvalue
Drop {
value: Lvalue<'tcx>,
target: BasicBlock,
unwind: Option<BasicBlock>
},
/// Block ends with a call of a converging function
Call {
/// The function thats being called
func: Operand<'tcx>,
/// Arguments the function is called with
args: Vec<Operand<'tcx>>,
/// The kind of call with associated information
kind: CallKind<'tcx>,
/// Destination for the return value. If some, the call is converging.
destination: Option<(Lvalue<'tcx>, BasicBlock)>,
/// Cleanups to be done if the call unwinds.
cleanup: Option<BasicBlock>
},
}
#[derive(Clone, RustcEncodable, RustcDecodable)]
pub enum CallKind<'tcx> {
/// Diverging function without associated cleanup
Diverging,
/// Diverging function with associated cleanup
DivergingCleanup(BasicBlock),
/// Converging function without associated cleanup
Converging {
/// Destination where the call result is written
destination: Lvalue<'tcx>,
/// Block to branch into on successful return
target: BasicBlock,
},
ConvergingCleanup {
/// Destination where the call result is written
destination: Lvalue<'tcx>,
/// First target is branched to on successful return.
/// Second block contains the cleanups to do on unwind.
targets: (BasicBlock, BasicBlock)
}
}
impl<'tcx> CallKind<'tcx> {
pub fn successors(&self) -> &[BasicBlock] {
match *self {
CallKind::Diverging => &[],
CallKind::DivergingCleanup(ref b) |
CallKind::Converging { target: ref b, .. } => slice::ref_slice(b),
CallKind::ConvergingCleanup { ref targets, .. } => targets.as_slice(),
}
}
pub fn successors_mut(&mut self) -> &mut [BasicBlock] {
match *self {
CallKind::Diverging => &mut [],
CallKind::DivergingCleanup(ref mut b) |
CallKind::Converging { target: ref mut b, .. } => slice::mut_ref_slice(b),
CallKind::ConvergingCleanup { ref mut targets, .. } => targets.as_mut_slice(),
}
}
pub fn destination(&self) -> Option<&Lvalue<'tcx>> {
match *self {
CallKind::Converging { ref destination, .. } |
CallKind::ConvergingCleanup { ref destination, .. } => Some(destination),
CallKind::Diverging |
CallKind::DivergingCleanup(_) => None
}
}
pub fn destination_mut(&mut self) -> Option<&mut Lvalue<'tcx>> {
match *self {
CallKind::Converging { ref mut destination, .. } |
CallKind::ConvergingCleanup { ref mut destination, .. } => Some(destination),
CallKind::Diverging |
CallKind::DivergingCleanup(_) => None
}
}
}
impl<'tcx> Terminator<'tcx> {
pub fn successors(&self) -> &[BasicBlock] {
pub fn successors(&self) -> Cow<[BasicBlock]> {
use self::Terminator::*;
match *self {
Goto { target: ref b } => slice::ref_slice(b),
If { targets: ref b, .. } => b.as_slice(),
Switch { targets: ref b, .. } => b,
SwitchInt { targets: ref b, .. } => b,
Resume => &[],
Return => &[],
Call { ref kind, .. } => kind.successors(),
Goto { target: ref b } => slice::ref_slice(b).into_cow(),
If { targets: (b1, b2), .. } => vec![b1, b2].into_cow(),
Switch { targets: ref b, .. } => b[..].into_cow(),
SwitchInt { targets: ref b, .. } => b[..].into_cow(),
Resume => (&[]).into_cow(),
Return => (&[]).into_cow(),
Call { destination: Some((_, t)), cleanup: Some(c), .. } => vec![t, c].into_cow(),
Call { destination: Some((_, ref t)), cleanup: None, .. } =>
slice::ref_slice(t).into_cow(),
Call { destination: None, cleanup: Some(ref c), .. } => slice::ref_slice(c).into_cow(),
Call { destination: None, cleanup: None, .. } => (&[]).into_cow(),
Drop { target, unwind: Some(unwind), .. } => vec![target, unwind].into_cow(),
Drop { ref target, .. } => slice::ref_slice(target).into_cow(),
}
}
pub fn successors_mut(&mut self) -> &mut [BasicBlock] {
// FIXME: no mootable cow. Im honestly not sure what a “cow” between `&mut [BasicBlock]` and
// `Vec<&mut BasicBlock>` would look like in the first place.
pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> {
use self::Terminator::*;
match *self {
Goto { target: ref mut b } => slice::mut_ref_slice(b),
If { targets: ref mut b, .. } => b.as_mut_slice(),
Switch { targets: ref mut b, .. } => b,
SwitchInt { targets: ref mut b, .. } => b,
Resume => &mut [],
Return => &mut [],
Call { ref mut kind, .. } => kind.successors_mut(),
Goto { target: ref mut b } => vec![b],
If { targets: (ref mut b1, ref mut b2), .. } => vec![b1, b2],
Switch { targets: ref mut b, .. } => b.iter_mut().collect(),
SwitchInt { targets: ref mut b, .. } => b.iter_mut().collect(),
Resume => Vec::new(),
Return => Vec::new(),
Call { destination: Some((_, ref mut t)), cleanup: Some(ref mut c), .. } => vec![t, c],
Call { destination: Some((_, ref mut t)), cleanup: None, .. } => vec![t],
Call { destination: None, cleanup: Some(ref mut c), .. } => vec![c],
Call { destination: None, cleanup: None, .. } => vec![],
Drop { ref mut target, unwind: Some(ref mut unwind), .. } => vec![target, unwind],
Drop { ref mut target, .. } => vec![target]
}
}
}
@ -424,8 +385,9 @@ impl<'tcx> Terminator<'tcx> {
SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv),
Return => write!(fmt, "return"),
Resume => write!(fmt, "resume"),
Call { ref kind, ref func, ref args } => {
if let Some(destination) = kind.destination() {
Drop { ref value, .. } => write!(fmt, "drop({:?})", value),
Call { ref func, ref args, ref destination, .. } => {
if let Some((ref destination, _)) = *destination {
try!(write!(fmt, "{:?} = ", destination));
}
try!(write!(fmt, "{:?}(", func));
@ -445,12 +407,12 @@ impl<'tcx> Terminator<'tcx> {
use self::Terminator::*;
match *self {
Return | Resume => vec![],
Goto { .. } => vec!["".into_cow()],
If { .. } => vec!["true".into_cow(), "false".into_cow()],
Goto { .. } => vec!["".into()],
If { .. } => vec!["true".into(), "false".into()],
Switch { ref adt_def, .. } => {
adt_def.variants
.iter()
.map(|variant| variant.name.to_string().into_cow())
.map(|variant| variant.name.to_string().into())
.collect()
}
SwitchInt { ref values, .. } => {
@ -458,21 +420,18 @@ impl<'tcx> Terminator<'tcx> {
.map(|const_val| {
let mut buf = String::new();
fmt_const_val(&mut buf, const_val).unwrap();
buf.into_cow()
buf.into()
})
.chain(iter::once(String::from("otherwise").into_cow()))
.chain(iter::once(String::from("otherwise").into()))
.collect()
}
Call { ref kind, .. } => match *kind {
CallKind::Diverging =>
vec![],
CallKind::DivergingCleanup(..) =>
vec!["unwind".into_cow()],
CallKind::Converging { .. } =>
vec!["return".into_cow()],
CallKind::ConvergingCleanup { .. } =>
vec!["return".into_cow(), "unwind".into_cow()],
},
Call { destination: Some(_), cleanup: Some(_), .. } =>
vec!["return".into_cow(), "unwind".into_cow()],
Call { destination: Some(_), cleanup: None, .. } => vec!["return".into_cow()],
Call { destination: None, cleanup: Some(_), .. } => vec!["unwind".into_cow()],
Call { destination: None, cleanup: None, .. } => vec![],
Drop { unwind: None, .. } => vec!["return".into_cow()],
Drop { .. } => vec!["return".into_cow(), "unwind".into_cow()],
}
}
}
@ -490,23 +449,13 @@ pub struct Statement<'tcx> {
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub enum StatementKind<'tcx> {
Assign(Lvalue<'tcx>, Rvalue<'tcx>),
Drop(DropKind, Lvalue<'tcx>),
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)]
pub enum DropKind {
/// free a partially constructed box, should go away eventually
Free,
Deep
}
impl<'tcx> Debug for Statement<'tcx> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
use self::StatementKind::*;
match self.kind {
Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv),
Drop(DropKind::Free, ref lv) => write!(fmt, "free {:?}", lv),
Drop(DropKind::Deep, ref lv) => write!(fmt, "drop {:?}", lv),
Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv)
}
}
}

View File

@ -124,9 +124,6 @@ macro_rules! make_mir_visitor {
ref $($mutability)* rvalue) => {
self.visit_assign(block, lvalue, rvalue);
}
StatementKind::Drop(_, ref $($mutability)* lvalue) => {
self.visit_lvalue(lvalue, LvalueContext::Drop);
}
}
}
@ -177,30 +174,25 @@ macro_rules! make_mir_visitor {
Terminator::Return => {
}
Terminator::Drop { ref $($mutability)* value, target, unwind } => {
self.visit_lvalue(value, LvalueContext::Drop);
self.visit_branch(block, target);
unwind.map(|t| self.visit_branch(block, t));
}
Terminator::Call { ref $($mutability)* func,
ref $($mutability)* args,
ref $($mutability)* kind } => {
ref $($mutability)* destination,
cleanup } => {
self.visit_operand(func);
for arg in args {
self.visit_operand(arg);
}
match *kind {
CallKind::Converging {
ref $($mutability)* destination,
..
} |
CallKind::ConvergingCleanup {
ref $($mutability)* destination,
..
} => {
self.visit_lvalue(destination, LvalueContext::Store);
}
CallKind::Diverging |
CallKind::DivergingCleanup(_) => {}
}
for &target in kind.successors() {
if let Some((ref $($mutability)* destination, target)) = *destination {
self.visit_lvalue(destination, LvalueContext::Store);
self.visit_branch(block, target);
}
cleanup.map(|t| self.visit_branch(block, t));
}
}
}

View File

@ -1649,7 +1649,7 @@ fn encode_crate_deps(rbml_w: &mut Encoder, cstore: &cstore::CStore) {
fn encode_lang_items(ecx: &EncodeContext, rbml_w: &mut Encoder) {
rbml_w.start_tag(tag_lang_items);
for (i, &opt_def_id) in ecx.tcx.lang_items.items() {
for (i, &opt_def_id) in ecx.tcx.lang_items.items().iter().enumerate() {
if let Some(def_id) = opt_def_id {
if def_id.is_local() {
rbml_w.start_tag(tag_lang_items_item);

View File

@ -32,19 +32,17 @@ impl<'tcx> CFG<'tcx> {
BasicBlock::new(node_index)
}
pub fn start_new_cleanup_block(&mut self) -> BasicBlock {
let bb = self.start_new_block();
self.block_data_mut(bb).is_cleanup = true;
bb
}
pub fn push(&mut self, block: BasicBlock, statement: Statement<'tcx>) {
debug!("push({:?}, {:?})", block, statement);
self.block_data_mut(block).statements.push(statement);
}
pub fn push_drop(&mut self, block: BasicBlock, span: Span,
kind: DropKind, lvalue: &Lvalue<'tcx>) {
self.push(block, Statement {
span: span,
kind: StatementKind::Drop(kind, lvalue.clone())
});
}
pub fn push_assign(&mut self,
block: BasicBlock,
span: Span,
@ -81,4 +79,3 @@ impl<'tcx> CFG<'tcx> {
self.block_data_mut(block).terminator = Some(terminator);
}
}

View File

@ -59,25 +59,18 @@ impl<'a,'tcx> Builder<'a,'tcx> {
let arg = unpack!(block = this.as_operand(block, arg));
block.and(Rvalue::UnaryOp(op, arg))
}
ExprKind::Box { value } => {
ExprKind::Box { value, value_extents } => {
let value = this.hir.mirror(value);
let result = this.temp(expr.ty);
// to start, malloc some memory of suitable type (thus far, uninitialized):
let rvalue = Rvalue::Box(value.ty);
this.cfg.push_assign(block, expr_span, &result, rvalue);
// schedule a shallow free of that memory, lest we unwind:
let extent = this.extent_of_innermost_scope();
this.schedule_drop(expr_span, extent, DropKind::Free, &result, value.ty);
// initialize the box contents:
let contents = result.clone().deref();
unpack!(block = this.into(&contents, block, value));
// now that the result is fully initialized, cancel the drop
// by "using" the result (which is linear):
block.and(Rvalue::Use(Operand::Consume(result)))
this.cfg.push_assign(block, expr_span, &result, Rvalue::Box(value.ty));
this.in_scope(value_extents, block, |this| {
// schedule a shallow free of that memory, lest we unwind:
this.schedule_box_free(expr_span, value_extents, &result, value.ty);
// initialize the box contents:
unpack!(block = this.into(&result.clone().deref(), block, value));
block.and(Rvalue::Use(Operand::Consume(result)))
})
}
ExprKind::Cast { source } => {
let source = unpack!(block = this.as_operand(block, source));

View File

@ -41,7 +41,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
this.hir.span_bug(expr.span, "no temp_lifetime for expr");
}
};
this.schedule_drop(expr.span, temp_lifetime, DropKind::Deep, &temp, expr_ty);
this.schedule_drop(expr.span, temp_lifetime, &temp, expr_ty);
// Careful here not to cause an infinite cycle. If we always
// called `into`, then for lvalues like `x.f`, it would

View File

@ -188,7 +188,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
// operators like x[j] = x[i].
let rhs = unpack!(block = this.as_operand(block, rhs));
let lhs = unpack!(block = this.as_lvalue(block, lhs));
this.cfg.push_drop(block, expr_span, DropKind::Deep, &lhs);
unpack!(block = this.build_drop(block, lhs.clone()));
this.cfg.push_assign(block, expr_span, &lhs, Rvalue::Use(rhs));
block.unit()
}
@ -253,17 +253,11 @@ impl<'a,'tcx> Builder<'a,'tcx> {
this.cfg.terminate(block, Terminator::Call {
func: fun,
args: args,
kind: match (cleanup, diverges) {
(None, true) => CallKind::Diverging,
(Some(c), true) => CallKind::DivergingCleanup(c),
(None, false) => CallKind::Converging {
destination: destination.clone(),
target: success
},
(Some(c), false) => CallKind::ConvergingCleanup {
destination: destination.clone(),
targets: (success, c)
}
cleanup: cleanup,
destination: if diverges {
None
} else {
Some ((destination.clone(), success))
}
});
success.unit()

View File

@ -601,7 +601,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
ty: var_ty.clone(),
});
let index = index as u32;
self.schedule_drop(span, var_extent, DropKind::Deep, &Lvalue::Var(index), var_ty);
self.schedule_drop(span, var_extent, &Lvalue::Var(index), var_ty);
self.var_indices.insert(var_id, index);
debug!("declare_binding: index={:?}", index);

View File

@ -86,10 +86,10 @@ should go to.
*/
use build::{BlockAnd, BlockAndExtension, Builder};
use build::{BlockAnd, BlockAndExtension, Builder, CFG};
use rustc::middle::region::CodeExtent;
use rustc::middle::lang_items;
use rustc::middle::subst::Substs;
use rustc::middle::subst::{Substs, Subst, VecPerParamSpace};
use rustc::middle::ty::{self, Ty};
use rustc::mir::repr::*;
use syntax::codemap::{Span, DUMMY_SP};
@ -97,8 +97,37 @@ use syntax::parse::token::intern_and_get_ident;
pub struct Scope<'tcx> {
extent: CodeExtent,
drops: Vec<(DropKind, Span, Lvalue<'tcx>)>,
cached_block: Option<BasicBlock>,
drops: Vec<DropData<'tcx>>,
// A scope may only have one associated free, because:
// 1. We require a `free` to only be scheduled in the scope of `EXPR` in `box EXPR`;
// 2. It only makes sense to have it translated into the diverge-path.
//
// This kind of drop will be run *after* all the regular drops scheduled onto this scope,
// because drops may have dependencies on the allocated memory.
//
// This is expected to go away once `box EXPR` becomes a sugar for placement protocol and gets
// desugared in some earlier stage.
free: Option<FreeData<'tcx>>,
}
struct DropData<'tcx> {
value: Lvalue<'tcx>,
// NB: per-drop “cache” is necessary for the build_scope_drops function below.
/// The cached block for the cleanups-on-diverge path. This block contains code to run the
/// current drop and all the preceding drops (i.e. those having lower index in Drops
/// Scope drop array)
cached_block: Option<BasicBlock>
}
struct FreeData<'tcx> {
span: Span,
/// Lvalue containing the allocated box.
value: Lvalue<'tcx>,
/// type of item for which the box was allocated for (i.e. the T in Box<T>).
item_ty: Ty<'tcx>,
/// The cached block containing code to run the free. The block will also execute all the drops
/// in the scope.
cached_block: Option<BasicBlock>
}
#[derive(Clone, Debug)]
@ -114,7 +143,38 @@ pub struct LoopScope {
pub might_break: bool
}
impl<'tcx> Scope<'tcx> {
/// Invalidate all the cached blocks in the scope.
///
/// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
/// larger extent of code.
fn invalidate_cache(&mut self) {
for dropdata in &mut self.drops {
dropdata.cached_block = None;
}
if let Some(ref mut freedata) = self.free {
freedata.cached_block = None;
}
}
/// Returns the cached block for this scope.
///
/// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
/// this method to work correctly.
fn cached_block(&self) -> Option<BasicBlock> {
if let Some(data) = self.drops.last() {
Some(data.cached_block.expect("drop cache is not filled"))
} else if let Some(ref data) = self.free {
Some(data.cached_block.expect("free cache is not filled"))
} else {
None
}
}
}
impl<'a,'tcx> Builder<'a,'tcx> {
// Adding and removing scopes
// ==========================
/// Start a loop scope, which tracks where `continue` and `break`
/// should branch to. See module comment for more details.
///
@ -146,9 +206,9 @@ impl<'a,'tcx> Builder<'a,'tcx> {
where F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>
{
debug!("in_scope(extent={:?}, block={:?})", extent, block);
self.push_scope(extent, block);
self.push_scope(extent);
let rv = unpack!(block = f(self));
self.pop_scope(extent, block);
unpack!(block = self.pop_scope(extent, block));
debug!("in_scope: exiting extent={:?} block={:?}", extent, block);
block.and(rv)
}
@ -157,35 +217,51 @@ impl<'a,'tcx> Builder<'a,'tcx> {
/// scope and call `pop_scope` afterwards. Note that these two
/// calls must be paired; using `in_scope` as a convenience
/// wrapper maybe preferable.
pub fn push_scope(&mut self, extent: CodeExtent, block: BasicBlock) {
debug!("push_scope({:?}, {:?})", extent, block);
// push scope, execute `f`, then pop scope again
pub fn push_scope(&mut self, extent: CodeExtent) {
debug!("push_scope({:?})", extent);
self.scopes.push(Scope {
extent: extent.clone(),
drops: vec![],
cached_block: None,
free: None
});
}
/// Pops a scope, which should have extent `extent`, adding any
/// drops onto the end of `block` that are needed. This must
/// match 1-to-1 with `push_scope`.
pub fn pop_scope(&mut self, extent: CodeExtent, block: BasicBlock) {
pub fn pop_scope(&mut self, extent: CodeExtent, block: BasicBlock) -> BlockAnd<()> {
debug!("pop_scope({:?}, {:?})", extent, block);
// We need to have `cached_block`s available for all the drops, so we call diverge_cleanup
// to make sure all the `cached_block`s are filled in.
self.diverge_cleanup();
let scope = self.scopes.pop().unwrap();
assert_eq!(scope.extent, extent);
// add in any drops needed on the fallthrough path (any other
// exiting paths, such as those that arise from `break`, will
// have drops already)
for (kind, span, lvalue) in scope.drops {
self.cfg.push_drop(block, span, kind, &lvalue);
}
build_scope_drops(block, &scope, &self.scopes[..], &mut self.cfg)
}
/// Branch out of `block` to `target`, exiting all scopes up to
/// and including `extent`. This will insert whatever drops are
/// needed, as well as tracking this exit for the SEME region. See
/// module comment for details.
pub fn exit_scope(&mut self,
span: Span,
extent: CodeExtent,
mut block: BasicBlock,
target: BasicBlock) {
let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent)
.unwrap_or_else(||{
self.hir.span_bug(span, &format!("extent {:?} does not enclose", extent))
});
for (idx, ref scope) in self.scopes.iter().enumerate().rev().take(scope_count) {
unpack!(block = build_scope_drops(block, scope, &self.scopes[..idx], &mut self.cfg));
}
self.cfg.terminate(block, Terminator::Goto { target: target });
}
// Finding scopes
// ==============
/// Finds the loop scope for a given label. This is used for
/// resolving `break` and `continue`.
pub fn find_loop_scope(&mut self,
@ -208,30 +284,81 @@ impl<'a,'tcx> Builder<'a,'tcx> {
}.unwrap_or_else(|| hir.span_bug(span, "no enclosing loop scope found?"))
}
/// Branch out of `block` to `target`, exiting all scopes up to
/// and including `extent`. This will insert whatever drops are
/// needed, as well as tracking this exit for the SEME region. See
/// module comment for details.
pub fn exit_scope(&mut self,
span: Span,
extent: CodeExtent,
block: BasicBlock,
target: BasicBlock) {
let Builder { ref mut scopes, ref mut cfg, ref mut hir, .. } = *self;
let scope_count = 1 + scopes.iter().rev().position(|scope| scope.extent == extent)
.unwrap_or_else(||{
hir.span_bug(span, &format!("extent {:?} does not enclose", extent))
});
for scope in scopes.iter_mut().rev().take(scope_count) {
for &(kind, drop_span, ref lvalue) in &scope.drops {
cfg.push_drop(block, drop_span, kind, lvalue);
}
}
cfg.terminate(block, Terminator::Goto { target: target });
pub fn extent_of_innermost_scope(&self) -> CodeExtent {
self.scopes.last().map(|scope| scope.extent).unwrap()
}
pub fn extent_of_outermost_scope(&self) -> CodeExtent {
self.scopes.first().map(|scope| scope.extent).unwrap()
}
// Scheduling drops
// ================
/// Indicates that `lvalue` should be dropped on exit from
/// `extent`.
pub fn schedule_drop(&mut self,
span: Span,
extent: CodeExtent,
lvalue: &Lvalue<'tcx>,
lvalue_ty: Ty<'tcx>) {
if !self.hir.needs_drop(lvalue_ty) {
return
}
for scope in self.scopes.iter_mut().rev() {
if scope.extent == extent {
// No need to invalidate any caches here. The just-scheduled drop will branch into
// the drop that comes before it in the vector.
scope.drops.push(DropData {
value: lvalue.clone(),
cached_block: None
});
return;
} else {
// We must invalidate all the cached_blocks leading up to the scope were
// looking for, because all of the blocks in the chain will become incorrect.
scope.invalidate_cache()
}
}
self.hir.span_bug(span,
&format!("extent {:?} not in scope to drop {:?}", extent, lvalue));
}
/// Schedule dropping of a not-yet-fully-initialised box.
///
/// This cleanup will only be translated into unwind branch.
/// The extent should be for the `EXPR` inside `box EXPR`.
/// There may only be one “free” scheduled in any given scope.
pub fn schedule_box_free(&mut self,
span: Span,
extent: CodeExtent,
value: &Lvalue<'tcx>,
item_ty: Ty<'tcx>) {
for scope in self.scopes.iter_mut().rev() {
if scope.extent == extent {
assert!(scope.free.is_none(), "scope already has a scheduled free!");
// We also must invalidate the caches in the scope for which the free is scheduled
// because the drops must branch into the free we schedule here.
scope.invalidate_cache();
scope.free = Some(FreeData {
span: span,
value: value.clone(),
item_ty: item_ty,
cached_block: None
});
return;
} else {
// We must invalidate all the cached_blocks leading up to the scope were looking
// for, because otherwise some/most of the blocks in the chain will become
// incorrect.
scope.invalidate_cache();
}
}
self.hir.span_bug(span,
&format!("extent {:?} not in scope to free {:?}", extent, value));
}
// Other
// =====
/// Creates a path that performs all required cleanup for unwinding.
///
/// This path terminates in Resume. Returns the start of the path.
@ -241,64 +368,40 @@ impl<'a,'tcx> Builder<'a,'tcx> {
if self.scopes.is_empty() {
return None;
}
let unit_temp = self.get_unit_temp();
let Builder { ref mut hir, ref mut cfg, ref mut scopes, .. } = *self;
let mut next_block = None;
let mut terminator = Terminator::Resume;
// Given an array of scopes, we generate these from the outermost scope to the innermost
// one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will
// generate B0 <- B1 <- B2 in left-to-right order. The outermost scope (B0) will always
// terminate with a Resume terminator.
for scope in self.scopes.iter_mut().filter(|s| !s.drops.is_empty()) {
if let Some(b) = scope.cached_block {
terminator = Terminator::Goto { target: b };
continue;
} else {
let new_block = self.cfg.start_new_block();
self.cfg.block_data_mut(new_block).is_cleanup = true;
self.cfg.terminate(new_block, terminator);
terminator = Terminator::Goto { target: new_block };
for &(kind, span, ref lvalue) in scope.drops.iter().rev() {
self.cfg.push_drop(new_block, span, kind, lvalue);
}
scope.cached_block = Some(new_block);
}
// generate B0 <- B1 <- B2 in left-to-right order. Control flow of the generated blocks
// always ends up at a block with the Resume terminator.
for scope in scopes.iter_mut().filter(|s| !s.drops.is_empty() || s.free.is_some()) {
next_block = Some(build_diverge_scope(hir.tcx(),
cfg,
unit_temp.clone(),
scope,
next_block));
}
// Return the innermost cached block, most likely the one we just generated.
// Note that if there are no cleanups in scope we return None.
self.scopes.iter().rev().flat_map(|b| b.cached_block).next()
scopes.iter().rev().flat_map(|x| x.cached_block()).next()
}
/// Indicates that `lvalue` should be dropped on exit from
/// `extent`.
pub fn schedule_drop(&mut self,
span: Span,
extent: CodeExtent,
kind: DropKind,
lvalue: &Lvalue<'tcx>,
lvalue_ty: Ty<'tcx>) {
if self.hir.needs_drop(lvalue_ty) {
for scope in self.scopes.iter_mut().rev() {
// We must invalidate all the cached_blocks leading up to the scope were looking
// for, because otherwise some/most of the blocks in the chain might become
// incorrect (i.e. they still are pointing at old cached_block).
scope.cached_block = None;
if scope.extent == extent {
scope.drops.push((kind, span, lvalue.clone()));
return;
}
}
self.hir.span_bug(span,
&format!("extent {:?} not in scope to drop {:?}", extent, lvalue));
}
/// Utility function for *non*-scope code to build their own drops
pub fn build_drop(&mut self, block: BasicBlock, value: Lvalue<'tcx>) -> BlockAnd<()> {
let next_target = self.cfg.start_new_block();
let diverge_target = self.diverge_cleanup();
self.cfg.terminate(block, Terminator::Drop {
value: value,
target: next_target,
unwind: diverge_target,
});
next_target.unit()
}
pub fn extent_of_innermost_scope(&self) -> CodeExtent {
self.scopes.last().map(|scope| scope.extent).unwrap()
}
pub fn extent_of_outermost_scope(&self) -> CodeExtent {
self.scopes.first().map(|scope| scope.extent).unwrap()
}
// Panicking
// =========
// FIXME: should be moved into their own module
pub fn panic_bounds_check(&mut self,
block: BasicBlock,
index: Operand<'tcx>,
@ -328,10 +431,8 @@ impl<'a,'tcx> Builder<'a,'tcx> {
self.cfg.terminate(block, Terminator::Call {
func: Operand::Constant(func),
args: vec![Operand::Consume(tuple_ref), index, len],
kind: match cleanup {
None => CallKind::Diverging,
Some(c) => CallKind::DivergingCleanup(c)
}
destination: None,
cleanup: cleanup,
});
}
@ -368,10 +469,8 @@ impl<'a,'tcx> Builder<'a,'tcx> {
self.cfg.terminate(block, Terminator::Call {
func: Operand::Constant(func),
args: vec![Operand::Consume(tuple_ref)],
kind: match cleanup {
None => CallKind::Diverging,
Some(c) => CallKind::DivergingCleanup(c)
}
cleanup: cleanup,
destination: None,
});
}
@ -405,4 +504,135 @@ impl<'a,'tcx> Builder<'a,'tcx> {
literal: self.hir.usize_literal(span_lines.line)
})
}
}
/// Builds drops for pop_scope and exit_scope.
fn build_scope_drops<'tcx>(mut block: BasicBlock,
scope: &Scope<'tcx>,
earlier_scopes: &[Scope<'tcx>],
cfg: &mut CFG<'tcx>)
-> BlockAnd<()> {
let mut iter = scope.drops.iter().rev().peekable();
while let Some(drop_data) = iter.next() {
// Try to find the next block with its cached block for us to diverge into in case the
// drop panics.
let on_diverge = iter.peek().iter().flat_map(|dd| dd.cached_block.into_iter()).next();
// If theres no `cached_block`s within current scope, we must look for one in the
// enclosing scope.
let on_diverge = on_diverge.or_else(||{
earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next()
});
let next = cfg.start_new_block();
cfg.terminate(block, Terminator::Drop {
value: drop_data.value.clone(),
target: next,
unwind: on_diverge
});
block = next;
}
block.unit()
}
fn build_diverge_scope<'tcx>(tcx: &ty::ctxt<'tcx>,
cfg: &mut CFG<'tcx>,
unit_temp: Lvalue<'tcx>,
scope: &mut Scope<'tcx>,
target: Option<BasicBlock>)
-> BasicBlock {
debug_assert!(!scope.drops.is_empty() || scope.free.is_some());
// First, we build the drops, iterating the drops array in reverse. We do that so that as soon
// as we find a `cached_block`, we know that were finished and dont need to do anything else.
let mut previous = None;
let mut last_drop_block = None;
for drop_data in scope.drops.iter_mut().rev() {
if let Some(cached_block) = drop_data.cached_block {
if let Some((previous_block, previous_value)) = previous {
cfg.terminate(previous_block, Terminator::Drop {
value: previous_value,
target: cached_block,
unwind: None
});
return last_drop_block.unwrap();
} else {
return cached_block;
}
} else {
let block = cfg.start_new_cleanup_block();
drop_data.cached_block = Some(block);
if let Some((previous_block, previous_value)) = previous {
cfg.terminate(previous_block, Terminator::Drop {
value: previous_value,
target: block,
unwind: None
});
} else {
last_drop_block = Some(block);
}
previous = Some((block, drop_data.value.clone()));
}
}
// Prepare the end target for this chain.
let mut target = target.unwrap_or_else(||{
let b = cfg.start_new_cleanup_block();
cfg.terminate(b, Terminator::Resume);
b
});
// Then, build the free branching into the prepared target.
if let Some(ref mut free_data) = scope.free {
target = if let Some(cached_block) = free_data.cached_block {
cached_block
} else {
let t = build_free(tcx, cfg, unit_temp, free_data, target);
free_data.cached_block = Some(t);
t
}
};
if let Some((previous_block, previous_value)) = previous {
// Finally, branch into that just-built `target` from the `previous_block`.
cfg.terminate(previous_block, Terminator::Drop {
value: previous_value,
target: target,
unwind: None
});
last_drop_block.unwrap()
} else {
// If `previous.is_none()`, there were no drops in this scope we return the
// target.
target
}
}
fn build_free<'tcx>(tcx: &ty::ctxt<'tcx>,
cfg: &mut CFG<'tcx>,
unit_temp: Lvalue<'tcx>,
data: &FreeData<'tcx>,
target: BasicBlock)
-> BasicBlock {
let free_func = tcx.lang_items.box_free_fn()
.expect("box_free language item is missing");
let substs = tcx.mk_substs(Substs::new(
VecPerParamSpace::new(vec![], vec![], vec![data.item_ty]),
VecPerParamSpace::new(vec![], vec![], vec![])
));
let block = cfg.start_new_cleanup_block();
cfg.terminate(block, Terminator::Call {
func: Operand::Constant(Constant {
span: data.span,
ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs),
literal: Literal::Item {
def_id: free_func,
kind: ItemKind::Function,
substs: substs
}
}),
args: vec![Operand::Consume(data.value.clone())],
destination: Some((unit_temp, target)),
cleanup: None
});
block
}

View File

@ -42,16 +42,16 @@ impl<'a,'tcx> Builder<'a,'tcx> {
None => {
let (extent, _) = stmt_lists.pop().unwrap();
if let Some(extent) = extent {
this.pop_scope(extent, block);
unpack!(block = this.pop_scope(extent, block));
}
continue
}
};
let Stmt { span, kind } = this.hir.mirror(stmt);
let Stmt { span: _, kind } = this.hir.mirror(stmt);
match kind {
StmtKind::Let { remainder_scope, init_scope, pattern, initializer, stmts } => {
this.push_scope(remainder_scope, block);
this.push_scope(remainder_scope);
stmt_lists.push((Some(remainder_scope), stmts.into_iter()));
unpack!(block = this.in_scope(init_scope, block, move |this| {
// FIXME #30046 ^~~~
@ -72,7 +72,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
let expr = this.hir.mirror(expr);
let temp = this.temp(expr.ty.clone());
unpack!(block = this.into(&temp, block, expr));
this.cfg.push_drop(block, span, DropKind::Deep, &temp);
unpack!(block = this.build_drop(block, temp));
block.unit()
}));
}

View File

@ -375,7 +375,10 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
hir::ExprType(ref source, _) =>
return source.make_mirror(cx),
hir::ExprBox(ref value) =>
ExprKind::Box { value: value.to_ref() },
ExprKind::Box {
value: value.to_ref(),
value_extents: cx.tcx.region_maps.node_extent(value.id)
},
hir::ExprVec(ref fields) =>
ExprKind::Vec { fields: fields.to_ref() },
hir::ExprTup(ref fields) =>

View File

@ -123,6 +123,7 @@ pub enum ExprKind<'tcx> {
},
Box {
value: ExprRef<'tcx>,
value_extents: CodeExtent,
},
Call {
ty: ty::Ty<'tcx>,

View File

@ -69,9 +69,6 @@ impl<'a, 'tcx> EraseRegions<'a, 'tcx> {
self.erase_regions_lvalue(lvalue);
self.erase_regions_rvalue(rvalue);
}
StatementKind::Drop(_, ref mut lvalue) => {
self.erase_regions_lvalue(lvalue);
}
}
}
@ -93,8 +90,11 @@ impl<'a, 'tcx> EraseRegions<'a, 'tcx> {
self.erase_regions_lvalue(discr);
*switch_ty = self.tcx.erase_regions(switch_ty);
},
Terminator::Call { ref mut func, ref mut args, ref mut kind } => {
if let Some(destination) = kind.destination_mut() {
Terminator::Drop { ref mut value, .. } => {
self.erase_regions_lvalue(value);
}
Terminator::Call { ref mut func, ref mut args, ref mut destination, .. } => {
if let Some((ref mut destination, _)) = *destination {
self.erase_regions_lvalue(destination);
}
self.erase_regions_operand(func);

View File

@ -29,7 +29,7 @@ impl SimplifyCfg {
let mut worklist = vec![START_BLOCK];
while let Some(bb) = worklist.pop() {
for succ in mir.basic_block_data(bb).terminator().successors() {
for succ in mir.basic_block_data(bb).terminator().successors().iter() {
if !seen[succ.index()] {
seen[succ.index()] = true;
worklist.push(*succ);

View File

@ -18,10 +18,11 @@ use trans::base;
use trans::build;
use trans::common::{self, Block, LandingPad};
use trans::debuginfo::DebugLoc;
use trans::Disr;
use trans::foreign;
use trans::glue;
use trans::type_of;
use trans::type_::Type;
use trans::Disr;
use super::MirContext;
use super::operand::OperandValue::{FatPtr, Immediate, Ref};
@ -94,7 +95,39 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
base::build_return_block(bcx.fcx, bcx, return_ty, DebugLoc::None);
}
mir::Terminator::Call { ref func, ref args, ref kind } => {
mir::Terminator::Drop { ref value, target, unwind } => {
let lvalue = self.trans_lvalue(bcx, value);
let ty = lvalue.ty.to_ty(bcx.tcx());
// Double check for necessity to drop
if !glue::type_needs_drop(bcx.tcx(), ty) {
build::Br(bcx, self.llblock(target), DebugLoc::None);
return;
}
let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
let drop_ty = glue::get_drop_glue_type(bcx.ccx(), ty);
let llvalue = if drop_ty != ty {
build::PointerCast(bcx, lvalue.llval,
type_of::type_of(bcx.ccx(), drop_ty).ptr_to())
} else {
lvalue.llval
};
if let Some(unwind) = unwind {
let uwbcx = self.bcx(unwind);
let unwind = self.make_landing_pad(uwbcx);
build::Invoke(bcx,
drop_fn,
&[llvalue],
self.llblock(target),
unwind.llbb,
None,
DebugLoc::None);
} else {
build::Call(bcx, drop_fn, &[llvalue], None, DebugLoc::None);
build::Br(bcx, self.llblock(target), DebugLoc::None);
}
}
mir::Terminator::Call { ref func, ref args, ref destination, ref cleanup } => {
// Create the callee. This will always be a fn ptr and hence a kind of scalar.
let callee = self.trans_operand(bcx, func);
let attrs = attributes::from_fn_type(bcx.ccx(), callee.ty);
@ -115,7 +148,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
};
// Prepare the return value destination
let (ret_dest_ty, must_copy_dest) = if let Some(d) = kind.destination() {
let (ret_dest_ty, must_copy_dest) = if let Some((ref d, _)) = *destination {
let dest = self.trans_lvalue(bcx, d);
let ret_ty = dest.ty.to_ty(bcx.tcx());
if !is_foreign && type_of::return_uses_outptr(bcx.ccx(), ret_ty) {
@ -144,9 +177,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
// Many different ways to call a function handled here
match (is_foreign, base::avoid_invoke(bcx), kind) {
match (is_foreign, base::avoid_invoke(bcx), cleanup, destination) {
// The two cases below are the only ones to use LLVMs `invoke`.
(false, false, &mir::CallKind::DivergingCleanup(cleanup)) => {
(false, false, &Some(cleanup), &None) => {
let cleanup = self.bcx(cleanup);
let landingpad = self.make_landing_pad(cleanup);
let unreachable_blk = self.unreachable_block();
@ -158,14 +191,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
Some(attrs),
debugloc);
},
(false, false, &mir::CallKind::ConvergingCleanup { ref targets, .. }) => {
let cleanup = self.bcx(targets.1);
(false, false, &Some(cleanup), &Some((_, success))) => {
let cleanup = self.bcx(cleanup);
let landingpad = self.make_landing_pad(cleanup);
let (target, postinvoke) = if must_copy_dest {
(bcx.fcx.new_block("", None),
Some(self.bcx(targets.0)))
(bcx.fcx.new_block("", None), Some(self.bcx(success)))
} else {
(self.bcx(targets.0), None)
(self.bcx(success), None)
};
let invokeret = build::Invoke(bcx,
callee.immediate(),
@ -205,19 +237,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
build::Br(target, postinvoketarget.llbb, debugloc);
}
},
(false, _, &mir::CallKind::DivergingCleanup(_)) |
(false, _, &mir::CallKind::Diverging) => {
(false, _, _, &None) => {
build::Call(bcx, callee.immediate(), &llargs[..], Some(attrs), debugloc);
build::Unreachable(bcx);
}
(false, _, k@&mir::CallKind::ConvergingCleanup { .. }) |
(false, _, k@&mir::CallKind::Converging { .. }) => {
// FIXME: Bug #20046
let target = match *k {
mir::CallKind::ConvergingCleanup { targets, .. } => targets.0,
mir::CallKind::Converging { target, .. } => target,
_ => unreachable!()
};
(false, _, _, &Some((_, target))) => {
let llret = build::Call(bcx,
callee.immediate(),
&llargs[..],
@ -231,7 +255,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
build::Br(bcx, self.llblock(target), debugloc);
}
// Foreign functions
(true, _, k) => {
(true, _, _, destination) => {
let (dest, _) = ret_dest_ty
.expect("return destination is not set");
bcx = foreign::trans_native_call(bcx,
@ -241,13 +265,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
&llargs[..],
arg_tys,
debugloc);
match *k {
mir::CallKind::ConvergingCleanup { targets, .. } =>
build::Br(bcx, self.llblock(targets.0), debugloc),
mir::CallKind::Converging { target, .. } =>
build::Br(bcx, self.llblock(target), debugloc),
_ => ()
};
if let Some((_, target)) = *destination {
build::Br(bcx, self.llblock(target), debugloc);
}
},
}
}

View File

@ -82,13 +82,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
constant: &mir::Constant<'tcx>)
-> OperandRef<'tcx>
{
let ty = bcx.monomorphize(&constant.ty);
match constant.literal {
mir::Literal::Item { def_id, kind, substs } => {
let substs = bcx.tcx().mk_substs(bcx.monomorphize(&substs));
self.trans_item_ref(bcx, ty, kind, substs, def_id)
self.trans_item_ref(bcx, constant.ty, kind, substs, def_id)
}
mir::Literal::Value { ref value } => {
let ty = bcx.monomorphize(&constant.ty);
self.trans_constval(bcx, value, ty)
}
}

View File

@ -65,7 +65,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
assert!(lvalue.llextra != ptr::null_mut());
lvalue.llextra
}
_ => bcx.sess().bug("unexpected type in get_base_and_len"),
_ => bcx.sess().bug("unexpected type in lvalue_len"),
}
}

View File

@ -195,8 +195,8 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>,
mod analyze;
mod block;
mod constant;
mod lvalue;
mod rvalue;
mod operand;
mod statement;
mod did;
mod lvalue;
mod operand;
mod rvalue;
mod statement;

View File

@ -8,11 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::middle::ty::LvaluePreference;
use rustc::mir::repr as mir;
use trans::common::Block;
use trans::debuginfo::DebugLoc;
use trans::glue;
use super::MirContext;
use super::TempRef;
@ -51,20 +48,6 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
}
}
mir::StatementKind::Drop(mir::DropKind::Deep, ref lvalue) => {
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
glue::drop_ty(bcx, tr_lvalue.llval, ty, DebugLoc::None)
}
mir::StatementKind::Drop(mir::DropKind::Free, ref lvalue) => {
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
let content_ty = ty.builtin_deref(true, LvaluePreference::NoPreference);
let content_ty = content_ty.unwrap().ty;
glue::trans_exchange_free_ty(bcx, tr_lvalue.llval, content_ty, DebugLoc::None)
}
}
}
}

View File

@ -0,0 +1,36 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs)]
// ignore-msvc: FIXME(#30941)
// error-pattern:panic 1
// error-pattern:drop 2
use std::io::{self, Write};
struct Droppable(u32);
impl Drop for Droppable {
fn drop(&mut self) {
if self.0 == 1 {
panic!("panic 1");
} else {
write!(io::stderr(), "drop {}", self.0);
}
}
}
#[rustc_mir]
fn mir() {
let x = Droppable(2);
let y = Droppable(1);
}
fn main() {
mir();
}