Convert Drop statement into terminator

The structure of the old translator as well as MIR assumed that drop glue cannot possibly panic and
translated the drops accordingly. However, in presence of `Drop::drop` this assumption can be
trivially shown to be untrue. As such, the Rust code like the following would never print number 2:

```rust
struct Droppable(u32);
impl Drop for Droppable {
    fn drop(&mut self) {
        if self.0 == 1 { panic!("Droppable(1)") } else { println!("{}", self.0) }
    }
}
fn main() {
    let x = Droppable(2);
    let y = Droppable(1);
}
```

While the behaviour is allowed according to the language rules (we allow drops to not run), that’s
a very counter-intuitive behaviour. We fix this in MIR by allowing `Drop` to have a target to take
on divergence and connect the drops in such a way so the leftover drops are executed when some drop
unwinds.

Note, that this commit still does not implement the translator part of changes necessary for the
grand scheme of things to fully work, so the actual observed behaviour does not change yet. Coming
soon™.

See #14875.
This commit is contained in:
Simonas Kazlauskas 2016-01-30 00:18:47 +02:00
parent 65dd5e6a84
commit 98265d3385
9 changed files with 314 additions and 166 deletions

View File

@ -262,6 +262,13 @@ pub enum Terminator<'tcx> {
/// `END_BLOCK`.
Return,
/// Drop the Lvalue
Drop {
value: Lvalue<'tcx>,
target: BasicBlock,
unwind: Option<BasicBlock>
},
/// Block ends with a call of a converging function
Call {
/// The function thats being called
@ -290,6 +297,8 @@ impl<'tcx> Terminator<'tcx> {
slice::ref_slice(t).into_cow(),
Call { destination: None, cleanup: Some(ref c), .. } => slice::ref_slice(c).into_cow(),
Call { destination: None, cleanup: None, .. } => (&[]).into_cow(),
Drop { target, unwind: Some(unwind), .. } => vec![target, unwind].into_cow(),
Drop { ref target, .. } => slice::ref_slice(target).into_cow(),
}
}
@ -308,6 +317,8 @@ impl<'tcx> Terminator<'tcx> {
Call { destination: Some((_, ref mut t)), cleanup: None, .. } => vec![t],
Call { destination: None, cleanup: Some(ref mut c), .. } => vec![c],
Call { destination: None, cleanup: None, .. } => vec![],
Drop { ref mut target, unwind: Some(ref mut unwind), .. } => vec![target, unwind],
Drop { ref mut target, .. } => vec![target]
}
}
}
@ -374,6 +385,7 @@ impl<'tcx> Terminator<'tcx> {
SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv),
Return => write!(fmt, "return"),
Resume => write!(fmt, "resume"),
Drop { ref value, .. } => write!(fmt, "drop({:?})", value),
Call { ref func, ref args, ref destination, .. } => {
if let Some((ref destination, _)) = *destination {
try!(write!(fmt, "{:?} = ", destination));
@ -418,6 +430,8 @@ impl<'tcx> Terminator<'tcx> {
Call { destination: Some(_), cleanup: None, .. } => vec!["return".into_cow()],
Call { destination: None, cleanup: Some(_), .. } => vec!["unwind".into_cow()],
Call { destination: None, cleanup: None, .. } => vec![],
Drop { unwind: None, .. } => vec!["return".into_cow()],
Drop { .. } => vec!["return".into_cow(), "unwind".into_cow()],
}
}
}
@ -435,15 +449,13 @@ pub struct Statement<'tcx> {
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub enum StatementKind<'tcx> {
Assign(Lvalue<'tcx>, Rvalue<'tcx>),
Drop(Lvalue<'tcx>),
}
impl<'tcx> Debug for Statement<'tcx> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
use self::StatementKind::*;
match self.kind {
Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv),
Drop(ref lv) => write!(fmt, "drop {:?}", lv),
Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv)
}
}
}

View File

@ -124,9 +124,6 @@ macro_rules! make_mir_visitor {
ref $($mutability)* rvalue) => {
self.visit_assign(block, lvalue, rvalue);
}
StatementKind::Drop(ref $($mutability)* lvalue) => {
self.visit_lvalue(lvalue, LvalueContext::Drop);
}
}
}
@ -177,10 +174,16 @@ macro_rules! make_mir_visitor {
Terminator::Return => {
}
Terminator::Drop { ref $($mutability)* value, target, unwind } => {
self.visit_lvalue(value, LvalueContext::Drop);
self.visit_branch(block, target);
unwind.map(|t| self.visit_branch(block, t));
}
Terminator::Call { ref $($mutability)* func,
ref $($mutability)* args,
ref $($mutability)* destination,
ref $($mutability)* cleanup } => {
cleanup } => {
self.visit_operand(func);
for arg in args {
self.visit_operand(arg);

View File

@ -43,13 +43,6 @@ impl<'tcx> CFG<'tcx> {
self.block_data_mut(block).statements.push(statement);
}
pub fn push_drop(&mut self, block: BasicBlock, span: Span, lvalue: &Lvalue<'tcx>) {
self.push(block, Statement {
span: span,
kind: StatementKind::Drop(lvalue.clone())
});
}
pub fn push_assign(&mut self,
block: BasicBlock,
span: Span,

View File

@ -188,7 +188,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
// operators like x[j] = x[i].
let rhs = unpack!(block = this.as_operand(block, rhs));
let lhs = unpack!(block = this.as_lvalue(block, lhs));
this.cfg.push_drop(block, expr_span, &lhs);
unpack!(block = this.build_drop(block, lhs.clone()));
this.cfg.push_assign(block, expr_span, &lhs, Rvalue::Use(rhs));
block.unit()
}

View File

@ -86,10 +86,10 @@ should go to.
*/
use build::{BlockAnd, BlockAndExtension, Builder};
use build::{BlockAnd, BlockAndExtension, Builder, CFG};
use rustc::middle::region::CodeExtent;
use rustc::middle::lang_items;
use rustc::middle::subst::{Substs, VecPerParamSpace};
use rustc::middle::subst::{Substs, Subst, VecPerParamSpace};
use rustc::middle::ty::{self, Ty};
use rustc::mir::repr::*;
use syntax::codemap::{Span, DUMMY_SP};
@ -97,9 +97,30 @@ use syntax::parse::token::intern_and_get_ident;
pub struct Scope<'tcx> {
extent: CodeExtent,
drops: Vec<(Span, Lvalue<'tcx>)>,
frees: Vec<(Span, Lvalue<'tcx>, Ty<'tcx>)>,
cached_block: Option<BasicBlock>,
drops: Vec<DropData<'tcx>>,
// A scope may only have one associated free, because:
// 1. We require a `free` to only be scheduled in the scope of `EXPR` in `box EXPR`;
// 2. It only makes sense to have it translated into the diverge-path.
free: Option<FreeData<'tcx>>,
}
struct DropData<'tcx> {
value: Lvalue<'tcx>,
/// The cached block for the cleanups-on-diverge path. This block contains code to run the
/// current drop and all the preceding drops (i.e. those having lower index in Drops
/// Scope drop array)
cached_block: Option<BasicBlock>
}
struct FreeData<'tcx> {
span: Span,
/// Lvalue containing the allocated box.
value: Lvalue<'tcx>,
/// type of item for which the box was allocated for (i.e. the T in Box<T>).
item_ty: Ty<'tcx>,
/// The cached block containing code to run the free. The block will also execute all the drops
/// in the scope.
cached_block: Option<BasicBlock>
}
#[derive(Clone, Debug)]
@ -115,7 +136,36 @@ pub struct LoopScope {
pub might_break: bool
}
impl<'tcx> Scope<'tcx> {
/// Invalidate cached blocks in the scope. Should always be run for all inner scopes when a
/// drop is pushed into some scope enclosing a larger extent of code.
fn invalidate_cache(&mut self, only_free: bool) {
if let Some(ref mut freedata) = self.free {
freedata.cached_block = None;
}
if !only_free {
for dropdata in &mut self.drops {
dropdata.cached_block = None;
}
}
}
/// Returns the cached block for this scope.
///
/// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
/// this method to work correctly.
fn cached_block(&self) -> Option<BasicBlock> {
if let Some(ref free_data) = self.free {
free_data.cached_block
} else {
self.drops.last().iter().flat_map(|dd| dd.cached_block).next()
}
}
}
impl<'a,'tcx> Builder<'a,'tcx> {
// Adding and removing scopes
// ==========================
/// Start a loop scope, which tracks where `continue` and `break`
/// should branch to. See module comment for more details.
///
@ -147,9 +197,9 @@ impl<'a,'tcx> Builder<'a,'tcx> {
where F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>
{
debug!("in_scope(extent={:?}, block={:?})", extent, block);
self.push_scope(extent, block);
self.push_scope(extent);
let rv = unpack!(block = f(self));
self.pop_scope(extent, block);
unpack!(block = self.pop_scope(extent, block));
debug!("in_scope: exiting extent={:?} block={:?}", extent, block);
block.and(rv)
}
@ -158,36 +208,51 @@ impl<'a,'tcx> Builder<'a,'tcx> {
/// scope and call `pop_scope` afterwards. Note that these two
/// calls must be paired; using `in_scope` as a convenience
/// wrapper maybe preferable.
pub fn push_scope(&mut self, extent: CodeExtent, block: BasicBlock) {
debug!("push_scope({:?}, {:?})", extent, block);
// push scope, execute `f`, then pop scope again
pub fn push_scope(&mut self, extent: CodeExtent) {
debug!("push_scope({:?})", extent);
self.scopes.push(Scope {
extent: extent.clone(),
drops: vec![],
frees: vec![],
cached_block: None,
free: None
});
}
/// Pops a scope, which should have extent `extent`, adding any
/// drops onto the end of `block` that are needed. This must
/// match 1-to-1 with `push_scope`.
pub fn pop_scope(&mut self, extent: CodeExtent, block: BasicBlock) {
pub fn pop_scope(&mut self, extent: CodeExtent, block: BasicBlock) -> BlockAnd<()> {
debug!("pop_scope({:?}, {:?})", extent, block);
// We need to have `cached_block`s available for all the drops, so we call diverge_cleanup
// to make sure all the `cached_block`s are filled in.
self.diverge_cleanup();
let scope = self.scopes.pop().unwrap();
assert_eq!(scope.extent, extent);
// add in any drops needed on the fallthrough path (any other
// exiting paths, such as those that arise from `break`, will
// have drops already)
for (span, lvalue) in scope.drops {
self.cfg.push_drop(block, span, &lvalue);
}
build_scope_drops(block, &scope, &self.scopes[..], &mut self.cfg)
}
/// Branch out of `block` to `target`, exiting all scopes up to
/// and including `extent`. This will insert whatever drops are
/// needed, as well as tracking this exit for the SEME region. See
/// module comment for details.
pub fn exit_scope(&mut self,
span: Span,
extent: CodeExtent,
mut block: BasicBlock,
target: BasicBlock) {
let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent)
.unwrap_or_else(||{
self.hir.span_bug(span, &format!("extent {:?} does not enclose", extent))
});
for (idx, ref scope) in self.scopes.iter().enumerate().rev().take(scope_count) {
unpack!(block = build_scope_drops(block, scope, &self.scopes[..idx], &mut self.cfg));
}
self.cfg.terminate(block, Terminator::Goto { target: target });
}
// Finding scopes
// ==============
/// Finds the loop scope for a given label. This is used for
/// resolving `break` and `continue`.
pub fn find_loop_scope(&mut self,
@ -210,30 +275,79 @@ impl<'a,'tcx> Builder<'a,'tcx> {
}.unwrap_or_else(|| hir.span_bug(span, "no enclosing loop scope found?"))
}
/// Branch out of `block` to `target`, exiting all scopes up to
/// and including `extent`. This will insert whatever drops are
/// needed, as well as tracking this exit for the SEME region. See
/// module comment for details.
pub fn exit_scope(&mut self,
span: Span,
extent: CodeExtent,
block: BasicBlock,
target: BasicBlock) {
let Builder { ref mut scopes, ref mut cfg, ref mut hir, .. } = *self;
let scope_count = 1 + scopes.iter().rev().position(|scope| scope.extent == extent)
.unwrap_or_else(||{
hir.span_bug(span, &format!("extent {:?} does not enclose", extent))
});
for scope in scopes.iter_mut().rev().take(scope_count) {
for &(drop_span, ref lvalue) in &scope.drops {
cfg.push_drop(block, drop_span, lvalue);
}
}
cfg.terminate(block, Terminator::Goto { target: target });
pub fn extent_of_innermost_scope(&self) -> CodeExtent {
self.scopes.last().map(|scope| scope.extent).unwrap()
}
pub fn extent_of_outermost_scope(&self) -> CodeExtent {
self.scopes.first().map(|scope| scope.extent).unwrap()
}
// Scheduling drops
// ================
/// Indicates that `lvalue` should be dropped on exit from
/// `extent`.
pub fn schedule_drop(&mut self,
span: Span,
extent: CodeExtent,
lvalue: &Lvalue<'tcx>,
lvalue_ty: Ty<'tcx>) {
if !self.hir.needs_drop(lvalue_ty) {
return
}
for scope in self.scopes.iter_mut().rev() {
if scope.extent == extent {
// We only invalidate cached block of free here; all other drops cached blocks to
// not become invalid (this drop will branch into them).
scope.invalidate_cache(true);
scope.drops.push(DropData {
value: lvalue.clone(),
cached_block: None
});
return;
} else {
// We must invalidate all the cached_blocks leading up to the scope were
// looking for, because all of the blocks in the chain become incorrect.
scope.invalidate_cache(false)
}
}
self.hir.span_bug(span,
&format!("extent {:?} not in scope to drop {:?}", extent, lvalue));
}
/// Schedule dropping of a not-yet-fully-initialised box.
///
/// This cleanup will only be translated into unwind branch.
/// The extent should be for the `EXPR` inside `box EXPR`.
/// There may only be one “free” scheduled in any given scope.
pub fn schedule_box_free(&mut self,
span: Span,
extent: CodeExtent,
value: &Lvalue<'tcx>,
item_ty: Ty<'tcx>) {
for scope in self.scopes.iter_mut().rev() {
if scope.extent == extent {
assert!(scope.free.is_none(), "scope already has a scheduled free!");
scope.free = Some(FreeData {
span: span,
value: value.clone(),
item_ty: item_ty,
cached_block: None
});
return;
} else {
// We must invalidate all the cached_blocks leading up to the scope were looking
// for, because otherwise some/most of the blocks in the chain might become
// incorrect.
scope.invalidate_cache(false);
}
}
self.hir.span_bug(span,
&format!("extent {:?} not in scope to free {:?}", extent, value));
}
// Other
// =====
/// Creates a path that performs all required cleanup for unwinding.
///
/// This path terminates in Resume. Returns the start of the path.
@ -244,114 +358,113 @@ impl<'a,'tcx> Builder<'a,'tcx> {
return None;
}
let tcx = self.hir.tcx();
let unit_tmp = self.get_unit_temp();
let mut terminator = Terminator::Resume;
let Builder { ref mut scopes, ref mut cfg, ref mut hir, .. } = *self;
let tcx = hir.tcx();
let mut next_block = None;
// Given an array of scopes, we generate these from the outermost scope to the innermost
// one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will
// generate B0 <- B1 <- B2 in left-to-right order. The outermost scope (B0) will always
// terminate with a Resume terminator.
for scope in self.scopes.iter_mut().filter(|s| !s.drops.is_empty() || !s.frees.is_empty()) {
if let Some(b) = scope.cached_block {
terminator = Terminator::Goto { target: b };
// generate B0 <- B1 <- B2 in left-to-right order. Control flow of the generated blocks
// always ends up at a block with the Resume terminator.
//
// Similarly to the scopes, we translate drops so:
// * Scheduled free drop is executed first;
// * Drops are executed after the free drop in the decreasing order (decreasing as
// from higher index in drops array to lower index);
//
// NB: We do the building backwards here. We will first produce a block containing the
// Resume terminator (which is executed last for any given chain of cleanups) and then go
// on building the drops from the outermost one to the innermost one. Similar note applies
// to the drops within the scope too.
{
let iter = scopes.iter_mut().filter(|s| !s.drops.is_empty() || s.free.is_some());
for scope in iter {
// Try using the cached free drop if any…
if let Some(FreeData { cached_block: Some(cached_block), .. }) = scope.free {
next_block = Some(cached_block);
continue;
} else {
let mut new_block = self.cfg.start_new_cleanup_block();
self.cfg.terminate(new_block, terminator);
terminator = Terminator::Goto { target: new_block };
for &(span, ref lvalue) in scope.drops.iter().rev() {
self.cfg.push_drop(new_block, span, lvalue);
}
// otherwise look for the cached regular drop (fast path)…
if let Some(&DropData { cached_block: Some(cached_block), .. }) = scope.drops.last() {
next_block = Some(cached_block);
continue;
}
// otherwise build the blocks…
for drop_data in scope.drops.iter_mut() {
// skipping them if theyre already built…
if let Some(cached_block) = drop_data.cached_block {
next_block = Some(cached_block);
continue;
}
let block = cfg.start_new_cleanup_block();
let target = next_block.unwrap_or_else(|| {
let b = cfg.start_new_cleanup_block();
cfg.terminate(b, Terminator::Resume);
b
});
cfg.terminate(block, Terminator::Drop {
value: drop_data.value.clone(),
target: target,
unwind: None
});
drop_data.cached_block = Some(block);
next_block = Some(block);
}
for &(_, ref lvalue, ref item_ty) in scope.frees.iter().rev() {
let item = lang_items::SpannedLangItems::box_free_fn(&tcx.lang_items)
.expect("box_free language item required");
let substs = tcx.mk_substs(Substs::new(
VecPerParamSpace::new(vec![], vec![], vec![item_ty]),
VecPerParamSpace::new(vec![], vec![], vec![])
));
let func = Constant {
span: item.1,
ty: tcx.lookup_item_type(item.0).ty,
if let Some(ref mut free_data) = scope.free {
// The free was not cached yet. It must be translated the last and will be executed
// first.
let free_func = tcx.lang_items.box_free_fn()
.expect("box_free language item is missing");
let substs = tcx.mk_substs(Substs::new(
VecPerParamSpace::new(vec![], vec![], vec![free_data.item_ty]),
VecPerParamSpace::new(vec![], vec![], vec![])
));
let block = cfg.start_new_cleanup_block();
let target = next_block.unwrap_or_else(|| {
let b = cfg.start_new_cleanup_block();
cfg.terminate(b, Terminator::Resume);
b
});
cfg.terminate(block, Terminator::Call {
func: Operand::Constant(Constant {
span: free_data.span,
ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs),
literal: Literal::Item {
def_id: item.0,
def_id: free_func,
kind: ItemKind::Function,
substs: substs
}
};
let old_block = new_block;
new_block = self.cfg.start_new_cleanup_block();
self.cfg.terminate(new_block, Terminator::Call {
func: Operand::Constant(func),
args: vec![Operand::Consume(lvalue.clone())],
destination: Some((unit_tmp.clone(), old_block)),
cleanup: None // were already doing divergence cleanups
});
terminator = Terminator::Goto { target: new_block };
}
scope.cached_block = Some(new_block);
}),
args: vec![Operand::Consume(free_data.value.clone())],
destination: Some((unit_tmp.clone(), target)),
cleanup: None
});
free_data.cached_block = Some(block);
next_block = Some(block);
}
}
// Return the innermost cached block, most likely the one we just generated.
// Note that if there are no cleanups in scope we return None.
self.scopes.iter().rev().flat_map(|b| b.cached_block).next()
}
/// Indicates that `lvalue` should be dropped on exit from
/// `extent`.
pub fn schedule_drop(&mut self,
span: Span,
extent: CodeExtent,
lvalue: &Lvalue<'tcx>,
lvalue_ty: Ty<'tcx>) {
if self.hir.needs_drop(lvalue_ty) {
for scope in self.scopes.iter_mut().rev() {
// We must invalidate all the cached_blocks leading up to the scope were looking
// for, because otherwise some/most of the blocks in the chain might become
// incorrect (i.e. they still are pointing at old cached_block).
scope.cached_block = None;
if scope.extent == extent {
scope.drops.push((span, lvalue.clone()));
return;
}
}
self.hir.span_bug(span,
&format!("extent {:?} not in scope to drop {:?}", extent, lvalue));
}
scopes.iter().rev().flat_map(|x| x.cached_block()).next()
}
/// Schedule dropping of a not yet fully initialised box. This cleanup will (and should) only
/// be translated into unwind branch. The extent should be for the `EXPR` inside `box EXPR`.
pub fn schedule_box_free(&mut self,
span: Span,
extent: CodeExtent,
lvalue: &Lvalue<'tcx>,
item_ty: Ty<'tcx>) {
for scope in self.scopes.iter_mut().rev() {
// We must invalidate all the cached_blocks leading up to the scope were looking
// for, because otherwise some/most of the blocks in the chain might become
// incorrect (i.e. they still are pointing at old cached_block).
scope.cached_block = None;
if scope.extent == extent {
scope.frees.push((span, lvalue.clone(), item_ty));
return;
}
}
self.hir.span_bug(span,
&format!("extent {:?} not in scope to drop {:?}", extent, lvalue));
/// Utility function for *non*-scope code to build their own drops
pub fn build_drop(&mut self, block: BasicBlock, value: Lvalue<'tcx>) -> BlockAnd<()> {
let next_target = self.cfg.start_new_block();
let diverge_target = self.diverge_cleanup();
self.cfg.terminate(block, Terminator::Drop {
value: value,
target: next_target,
unwind: diverge_target,
});
next_target.unit()
}
pub fn extent_of_innermost_scope(&self) -> CodeExtent {
self.scopes.last().map(|scope| scope.extent).unwrap()
}
pub fn extent_of_outermost_scope(&self) -> CodeExtent {
self.scopes.first().map(|scope| scope.extent).unwrap()
}
// Panicking
// =========
// FIXME: should be moved into their own module
pub fn panic_bounds_check(&mut self,
block: BasicBlock,
index: Operand<'tcx>,
@ -456,3 +569,30 @@ impl<'a,'tcx> Builder<'a,'tcx> {
}
}
/// Builds drops for pop_scope and exit_scope.
fn build_scope_drops<'tcx>(mut block: BasicBlock,
scope: &Scope<'tcx>,
earlier_scopes: &[Scope<'tcx>],
cfg: &mut CFG<'tcx>)
-> BlockAnd<()> {
let mut iter = scope.drops.iter().rev().peekable();
while let Some(drop_data) = iter.next() {
// Try to find the next block with its cached block for us to diverge into in case the
// drop panics.
let on_diverge = iter.peek().iter().flat_map(|dd| dd.cached_block.into_iter()).next();
// If theres no `cached_block`s within current scope, we must look for one in the
// enclosing scope.
let on_diverge = on_diverge.or_else(||{
earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next()
});
let next = cfg.start_new_block();
cfg.terminate(block, Terminator::Drop {
value: drop_data.value.clone(),
target: next,
unwind: on_diverge
});
block = next;
}
block.unit()
}

View File

@ -42,16 +42,16 @@ impl<'a,'tcx> Builder<'a,'tcx> {
None => {
let (extent, _) = stmt_lists.pop().unwrap();
if let Some(extent) = extent {
this.pop_scope(extent, block);
unpack!(block = this.pop_scope(extent, block));
}
continue
}
};
let Stmt { span, kind } = this.hir.mirror(stmt);
let Stmt { span: _, kind } = this.hir.mirror(stmt);
match kind {
StmtKind::Let { remainder_scope, init_scope, pattern, initializer, stmts } => {
this.push_scope(remainder_scope, block);
this.push_scope(remainder_scope);
stmt_lists.push((Some(remainder_scope), stmts.into_iter()));
unpack!(block = this.in_scope(init_scope, block, move |this| {
// FIXME #30046 ^~~~
@ -72,7 +72,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
let expr = this.hir.mirror(expr);
let temp = this.temp(expr.ty.clone());
unpack!(block = this.into(&temp, block, expr));
this.cfg.push_drop(block, span, &temp);
unpack!(block = this.build_drop(block, temp));
block.unit()
}));
}

View File

@ -69,9 +69,6 @@ impl<'a, 'tcx> EraseRegions<'a, 'tcx> {
self.erase_regions_lvalue(lvalue);
self.erase_regions_rvalue(rvalue);
}
StatementKind::Drop(ref mut lvalue) => {
self.erase_regions_lvalue(lvalue);
}
}
}
@ -93,6 +90,9 @@ impl<'a, 'tcx> EraseRegions<'a, 'tcx> {
self.erase_regions_lvalue(discr);
*switch_ty = self.tcx.erase_regions(switch_ty);
},
Terminator::Drop { ref mut value, .. } => {
self.erase_regions_lvalue(value);
}
Terminator::Call { ref mut func, ref mut args, ref mut destination, .. } => {
if let Some((ref mut destination, _)) = *destination {
self.erase_regions_lvalue(destination);

View File

@ -18,10 +18,11 @@ use trans::base;
use trans::build;
use trans::common::{self, Block, LandingPad};
use trans::debuginfo::DebugLoc;
use trans::Disr;
use trans::foreign;
use trans::glue;
use trans::type_of;
use trans::type_::Type;
use trans::Disr;
use super::MirContext;
use super::operand::OperandValue::{FatPtr, Immediate, Ref};
@ -94,6 +95,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
base::build_return_block(bcx.fcx, bcx, return_ty, DebugLoc::None);
}
mir::Terminator::Drop { ref value, target, unwind: _ } => {
let lvalue = self.trans_lvalue(bcx, value);
// FIXME: this does not account for possibility of unwinding (and totally should).
glue::drop_ty(bcx, lvalue.llval, lvalue.ty.to_ty(bcx.tcx()), DebugLoc::None);
build::Br(bcx, self.llblock(target), DebugLoc::None);
}
mir::Terminator::Call { ref func, ref args, ref destination, ref cleanup } => {
// Create the callee. This will always be a fn ptr and hence a kind of scalar.
let callee = self.trans_operand(bcx, func);

View File

@ -10,8 +10,6 @@
use rustc::mir::repr as mir;
use trans::common::Block;
use trans::debuginfo::DebugLoc;
use trans::glue;
use super::MirContext;
use super::TempRef;
@ -50,12 +48,6 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
}
}
mir::StatementKind::Drop(ref lvalue) => {
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
glue::drop_ty(bcx, tr_lvalue.llval, ty, DebugLoc::None)
}
}
}
}