From 432460a6fc92e8baecbc4fa175345e78232fe2ed Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Thu, 28 Jan 2016 23:59:00 +0200 Subject: [PATCH 1/7] Synthesize calls to box_free language item This gets rid of Drop(Free, _) MIR construct by synthesizing a call to language item which takes care of dropping instead. --- src/doc/book/lang-items.md | 6 ++ src/liballoc/heap.rs | 13 ++++ src/librustc/middle/dead.rs | 2 +- src/librustc/middle/lang_items.rs | 8 +- src/librustc/middle/reachable.rs | 2 +- src/librustc/mir/repr.rs | 12 +-- src/librustc/mir/visit.rs | 2 +- src/librustc_metadata/encoder.rs | 2 +- src/librustc_mir/build/cfg.rs | 11 ++- src/librustc_mir/build/expr/as_rvalue.rs | 25 +++---- src/librustc_mir/build/expr/as_temp.rs | 2 +- src/librustc_mir/build/expr/into.rs | 2 +- src/librustc_mir/build/matches/mod.rs | 2 +- src/librustc_mir/build/scope.rs | 82 +++++++++++++++++---- src/librustc_mir/build/stmt.rs | 2 +- src/librustc_mir/hair/cx/expr.rs | 5 +- src/librustc_mir/hair/mod.rs | 1 + src/librustc_mir/transform/erase_regions.rs | 2 +- src/librustc_trans/trans/mir/constant.rs | 4 +- src/librustc_trans/trans/mir/statement.rs | 11 +-- 20 files changed, 127 insertions(+), 69 deletions(-) diff --git a/src/doc/book/lang-items.md b/src/doc/book/lang-items.md index e492bd3e782..b948567ac5b 100644 --- a/src/doc/book/lang-items.md +++ b/src/doc/book/lang-items.md @@ -39,11 +39,17 @@ unsafe fn allocate(size: usize, _align: usize) -> *mut u8 { p } + #[lang = "exchange_free"] unsafe fn deallocate(ptr: *mut u8, _size: usize, _align: usize) { libc::free(ptr as *mut libc::c_void) } +#[lang = "box_free"] +unsafe fn box_free(ptr: *mut T) { + deallocate(ptr as *mut u8, ::core::mem::size_of::(), ::core::mem::align_of::()); +} + #[start] fn main(argc: isize, argv: *const *const u8) -> isize { let x = box 1; diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs index 7e7e3c619cb..08b403a60f3 100644 --- a/src/liballoc/heap.rs +++ b/src/liballoc/heap.rs @@ -16,6 +16,8 @@ issue = "27700")] use core::{isize, usize}; +#[cfg(not(test))] +use core::intrinsics::{size_of, min_align_of}; #[allow(improper_ctypes)] extern "C" { @@ -147,6 +149,17 @@ unsafe fn exchange_free(ptr: *mut u8, old_size: usize, align: usize) { deallocate(ptr, old_size, align); } +#[cfg(not(test))] +#[lang = "box_free"] +#[inline] +unsafe fn box_free(ptr: *mut T) { + let size = size_of::(); + // We do not allocate for Box when T is ZST, so deallocation is also not necessary. + if size != 0 { + deallocate(ptr as *mut u8, size, min_align_of::()); + } +} + #[cfg(test)] mod tests { extern crate test; diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index 1fa4093853b..7ed931265f2 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -435,7 +435,7 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { let is_named = node.name().is_some(); let field_type = self.tcx.node_id_to_type(node.id); let is_marker_field = match field_type.ty_to_def_id() { - Some(def_id) => self.tcx.lang_items.items().any(|(_, item)| *item == Some(def_id)), + Some(def_id) => self.tcx.lang_items.items().iter().any(|item| *item == Some(def_id)), _ => false }; is_named diff --git a/src/librustc/middle/lang_items.rs b/src/librustc/middle/lang_items.rs index 6e57d5dd1ba..b445f549ba2 100644 --- a/src/librustc/middle/lang_items.rs +++ b/src/librustc/middle/lang_items.rs @@ -36,9 +36,6 @@ use syntax::parse::token::InternedString; use rustc_front::intravisit::Visitor; use rustc_front::hir; -use std::iter::Enumerate; -use std::slice; - // The actual lang items defined come at the end of this file in one handy table. // So you probably just want to nip down to the end. macro_rules! lets_do_this { @@ -69,8 +66,8 @@ impl LanguageItems { } } - pub fn items<'a>(&'a self) -> Enumerate>> { - self.items.iter().enumerate() + pub fn items(&self) -> &[Option] { + &*self.items } pub fn item_name(index: usize) -> &'static str { @@ -334,6 +331,7 @@ lets_do_this! { ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn; ExchangeFreeFnLangItem, "exchange_free", exchange_free_fn; + BoxFreeFnLangItem, "box_free", box_free_fn; StrDupUniqFnLangItem, "strdup_uniq", strdup_uniq_fn; StartFnLangItem, "start", start_fn; diff --git a/src/librustc/middle/reachable.rs b/src/librustc/middle/reachable.rs index 6373bfbc55e..6dd447e3b68 100644 --- a/src/librustc/middle/reachable.rs +++ b/src/librustc/middle/reachable.rs @@ -362,7 +362,7 @@ pub fn find_reachable(tcx: &ty::ctxt, for (id, _) in &access_levels.map { reachable_context.worklist.push(*id); } - for (_, item) in tcx.lang_items.items() { + for item in tcx.lang_items.items().iter() { if let Some(did) = *item { if let Some(node_id) = tcx.map.as_local_node_id(did) { reachable_context.worklist.push(node_id); diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index 988bf2c76a8..b9a94ad0068 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -490,14 +490,7 @@ pub struct Statement<'tcx> { #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] pub enum StatementKind<'tcx> { Assign(Lvalue<'tcx>, Rvalue<'tcx>), - Drop(DropKind, Lvalue<'tcx>), -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] -pub enum DropKind { - /// free a partially constructed box, should go away eventually - Free, - Deep + Drop(Lvalue<'tcx>), } impl<'tcx> Debug for Statement<'tcx> { @@ -505,8 +498,7 @@ impl<'tcx> Debug for Statement<'tcx> { use self::StatementKind::*; match self.kind { Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv), - Drop(DropKind::Free, ref lv) => write!(fmt, "free {:?}", lv), - Drop(DropKind::Deep, ref lv) => write!(fmt, "drop {:?}", lv), + Drop(ref lv) => write!(fmt, "drop {:?}", lv), } } } diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index a38ef078c6f..494eac44b1a 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -124,7 +124,7 @@ macro_rules! make_mir_visitor { ref $($mutability)* rvalue) => { self.visit_assign(block, lvalue, rvalue); } - StatementKind::Drop(_, ref $($mutability)* lvalue) => { + StatementKind::Drop(ref $($mutability)* lvalue) => { self.visit_lvalue(lvalue, LvalueContext::Drop); } } diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index 1e50868f664..8df72016078 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -1649,7 +1649,7 @@ fn encode_crate_deps(rbml_w: &mut Encoder, cstore: &cstore::CStore) { fn encode_lang_items(ecx: &EncodeContext, rbml_w: &mut Encoder) { rbml_w.start_tag(tag_lang_items); - for (i, &opt_def_id) in ecx.tcx.lang_items.items() { + for (i, &opt_def_id) in ecx.tcx.lang_items.items().iter().enumerate() { if let Some(def_id) = opt_def_id { if def_id.is_local() { rbml_w.start_tag(tag_lang_items_item); diff --git a/src/librustc_mir/build/cfg.rs b/src/librustc_mir/build/cfg.rs index 523ac85cdc5..5be2b8394ee 100644 --- a/src/librustc_mir/build/cfg.rs +++ b/src/librustc_mir/build/cfg.rs @@ -32,16 +32,21 @@ impl<'tcx> CFG<'tcx> { BasicBlock::new(node_index) } + pub fn start_new_cleanup_block(&mut self) -> BasicBlock { + let bb = self.start_new_block(); + self.block_data_mut(bb).is_cleanup = true; + bb + } + pub fn push(&mut self, block: BasicBlock, statement: Statement<'tcx>) { debug!("push({:?}, {:?})", block, statement); self.block_data_mut(block).statements.push(statement); } - pub fn push_drop(&mut self, block: BasicBlock, span: Span, - kind: DropKind, lvalue: &Lvalue<'tcx>) { + pub fn push_drop(&mut self, block: BasicBlock, span: Span, lvalue: &Lvalue<'tcx>) { self.push(block, Statement { span: span, - kind: StatementKind::Drop(kind, lvalue.clone()) + kind: StatementKind::Drop(lvalue.clone()) }); } diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index e556d707a2d..dea2d750b98 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -59,25 +59,18 @@ impl<'a,'tcx> Builder<'a,'tcx> { let arg = unpack!(block = this.as_operand(block, arg)); block.and(Rvalue::UnaryOp(op, arg)) } - ExprKind::Box { value } => { + ExprKind::Box { value, value_extents } => { let value = this.hir.mirror(value); let result = this.temp(expr.ty); - // to start, malloc some memory of suitable type (thus far, uninitialized): - let rvalue = Rvalue::Box(value.ty); - this.cfg.push_assign(block, expr_span, &result, rvalue); - - // schedule a shallow free of that memory, lest we unwind: - let extent = this.extent_of_innermost_scope(); - this.schedule_drop(expr_span, extent, DropKind::Free, &result, value.ty); - - // initialize the box contents: - let contents = result.clone().deref(); - unpack!(block = this.into(&contents, block, value)); - - // now that the result is fully initialized, cancel the drop - // by "using" the result (which is linear): - block.and(Rvalue::Use(Operand::Consume(result))) + this.cfg.push_assign(block, expr_span, &result, Rvalue::Box(value.ty)); + this.in_scope(value_extents, block, |this| { + // schedule a shallow free of that memory, lest we unwind: + this.schedule_box_free(expr_span, value_extents, &result, value.ty); + // initialize the box contents: + unpack!(block = this.into(&result.clone().deref(), block, value)); + block.and(Rvalue::Use(Operand::Consume(result))) + }) } ExprKind::Cast { source } => { let source = unpack!(block = this.as_operand(block, source)); diff --git a/src/librustc_mir/build/expr/as_temp.rs b/src/librustc_mir/build/expr/as_temp.rs index 53f8090ad0f..27c374e1ac2 100644 --- a/src/librustc_mir/build/expr/as_temp.rs +++ b/src/librustc_mir/build/expr/as_temp.rs @@ -41,7 +41,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { this.hir.span_bug(expr.span, "no temp_lifetime for expr"); } }; - this.schedule_drop(expr.span, temp_lifetime, DropKind::Deep, &temp, expr_ty); + this.schedule_drop(expr.span, temp_lifetime, &temp, expr_ty); // Careful here not to cause an infinite cycle. If we always // called `into`, then for lvalues like `x.f`, it would diff --git a/src/librustc_mir/build/expr/into.rs b/src/librustc_mir/build/expr/into.rs index 4261efe8215..7ecd8a26507 100644 --- a/src/librustc_mir/build/expr/into.rs +++ b/src/librustc_mir/build/expr/into.rs @@ -188,7 +188,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { // operators like x[j] = x[i]. let rhs = unpack!(block = this.as_operand(block, rhs)); let lhs = unpack!(block = this.as_lvalue(block, lhs)); - this.cfg.push_drop(block, expr_span, DropKind::Deep, &lhs); + this.cfg.push_drop(block, expr_span, &lhs); this.cfg.push_assign(block, expr_span, &lhs, Rvalue::Use(rhs)); block.unit() } diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index c2c87fcbd20..e6430b7d634 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -601,7 +601,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { ty: var_ty.clone(), }); let index = index as u32; - self.schedule_drop(span, var_extent, DropKind::Deep, &Lvalue::Var(index), var_ty); + self.schedule_drop(span, var_extent, &Lvalue::Var(index), var_ty); self.var_indices.insert(var_id, index); debug!("declare_binding: index={:?}", index); diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index dae525c84d0..35153bc8a90 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -89,7 +89,7 @@ should go to. use build::{BlockAnd, BlockAndExtension, Builder}; use rustc::middle::region::CodeExtent; use rustc::middle::lang_items; -use rustc::middle::subst::Substs; +use rustc::middle::subst::{Substs, VecPerParamSpace}; use rustc::middle::ty::{self, Ty}; use rustc::mir::repr::*; use syntax::codemap::{Span, DUMMY_SP}; @@ -97,7 +97,8 @@ use syntax::parse::token::intern_and_get_ident; pub struct Scope<'tcx> { extent: CodeExtent, - drops: Vec<(DropKind, Span, Lvalue<'tcx>)>, + drops: Vec<(Span, Lvalue<'tcx>)>, + frees: Vec<(Span, Lvalue<'tcx>, Ty<'tcx>)>, cached_block: Option, } @@ -164,6 +165,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { self.scopes.push(Scope { extent: extent.clone(), drops: vec![], + frees: vec![], cached_block: None, }); } @@ -180,8 +182,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { // add in any drops needed on the fallthrough path (any other // exiting paths, such as those that arise from `break`, will // have drops already) - for (kind, span, lvalue) in scope.drops { - self.cfg.push_drop(block, span, kind, &lvalue); + for (span, lvalue) in scope.drops { + self.cfg.push_drop(block, span, &lvalue); } } @@ -225,8 +227,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { }); for scope in scopes.iter_mut().rev().take(scope_count) { - for &(kind, drop_span, ref lvalue) in &scope.drops { - cfg.push_drop(block, drop_span, kind, lvalue); + for &(drop_span, ref lvalue) in &scope.drops { + cfg.push_drop(block, drop_span, lvalue); } } cfg.terminate(block, Terminator::Goto { target: target }); @@ -242,23 +244,55 @@ impl<'a,'tcx> Builder<'a,'tcx> { return None; } + let tcx = self.hir.tcx(); + let unit_tmp = self.get_unit_temp(); let mut terminator = Terminator::Resume; // Given an array of scopes, we generate these from the outermost scope to the innermost // one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will // generate B0 <- B1 <- B2 in left-to-right order. The outermost scope (B0) will always // terminate with a Resume terminator. - for scope in self.scopes.iter_mut().filter(|s| !s.drops.is_empty()) { + for scope in self.scopes.iter_mut().filter(|s| !s.drops.is_empty() || !s.frees.is_empty()) { if let Some(b) = scope.cached_block { terminator = Terminator::Goto { target: b }; continue; } else { - let new_block = self.cfg.start_new_block(); - self.cfg.block_data_mut(new_block).is_cleanup = true; + let mut new_block = self.cfg.start_new_cleanup_block(); self.cfg.terminate(new_block, terminator); terminator = Terminator::Goto { target: new_block }; - for &(kind, span, ref lvalue) in scope.drops.iter().rev() { - self.cfg.push_drop(new_block, span, kind, lvalue); + + for &(span, ref lvalue) in scope.drops.iter().rev() { + self.cfg.push_drop(new_block, span, lvalue); } + + for &(_, ref lvalue, ref item_ty) in scope.frees.iter().rev() { + let item = lang_items::SpannedLangItems::box_free_fn(&tcx.lang_items) + .expect("box_free language item required"); + let substs = tcx.mk_substs(Substs::new( + VecPerParamSpace::new(vec![], vec![], vec![item_ty]), + VecPerParamSpace::new(vec![], vec![], vec![]) + )); + let func = Constant { + span: item.1, + ty: tcx.lookup_item_type(item.0).ty.subst(substs), + literal: Literal::Item { + def_id: item.0, + kind: ItemKind::Function, + substs: substs + } + }; + let old_block = new_block; + new_block = self.cfg.start_new_cleanup_block(); + self.cfg.terminate(new_block, Terminator::Call { + func: Operand::Constant(func), + args: vec![Operand::Consume(lvalue.clone())], + kind: CallKind::Converging { + target: old_block, + destination: unit_tmp.clone() + } + }); + terminator = Terminator::Goto { target: new_block }; + } + scope.cached_block = Some(new_block); } } @@ -272,7 +306,6 @@ impl<'a,'tcx> Builder<'a,'tcx> { pub fn schedule_drop(&mut self, span: Span, extent: CodeExtent, - kind: DropKind, lvalue: &Lvalue<'tcx>, lvalue_ty: Ty<'tcx>) { if self.hir.needs_drop(lvalue_ty) { @@ -282,7 +315,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { // incorrect (i.e. they still are pointing at old cached_block). scope.cached_block = None; if scope.extent == extent { - scope.drops.push((kind, span, lvalue.clone())); + scope.drops.push((span, lvalue.clone())); return; } } @@ -291,6 +324,27 @@ impl<'a,'tcx> Builder<'a,'tcx> { } } + /// Schedule dropping of a not yet fully initialised box. This cleanup will (and should) only + /// be translated into unwind branch. The extent should be for the `EXPR` inside `box EXPR`. + pub fn schedule_box_free(&mut self, + span: Span, + extent: CodeExtent, + lvalue: &Lvalue<'tcx>, + item_ty: Ty<'tcx>) { + for scope in self.scopes.iter_mut().rev() { + // We must invalidate all the cached_blocks leading up to the scope we’re looking + // for, because otherwise some/most of the blocks in the chain might become + // incorrect (i.e. they still are pointing at old cached_block). + scope.cached_block = None; + if scope.extent == extent { + scope.frees.push((span, lvalue.clone(), item_ty)); + return; + } + } + self.hir.span_bug(span, + &format!("extent {:?} not in scope to drop {:?}", extent, lvalue)); + } + pub fn extent_of_innermost_scope(&self) -> CodeExtent { self.scopes.last().map(|scope| scope.extent).unwrap() } @@ -299,6 +353,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { self.scopes.first().map(|scope| scope.extent).unwrap() } + pub fn panic_bounds_check(&mut self, block: BasicBlock, index: Operand<'tcx>, @@ -405,4 +460,5 @@ impl<'a,'tcx> Builder<'a,'tcx> { literal: self.hir.usize_literal(span_lines.line) }) } + } diff --git a/src/librustc_mir/build/stmt.rs b/src/librustc_mir/build/stmt.rs index c70b22893ae..3a50acec94f 100644 --- a/src/librustc_mir/build/stmt.rs +++ b/src/librustc_mir/build/stmt.rs @@ -72,7 +72,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { let expr = this.hir.mirror(expr); let temp = this.temp(expr.ty.clone()); unpack!(block = this.into(&temp, block, expr)); - this.cfg.push_drop(block, span, DropKind::Deep, &temp); + this.cfg.push_drop(block, span, &temp); block.unit() })); } diff --git a/src/librustc_mir/hair/cx/expr.rs b/src/librustc_mir/hair/cx/expr.rs index d8a1930fd5c..f9a9c99f63d 100644 --- a/src/librustc_mir/hair/cx/expr.rs +++ b/src/librustc_mir/hair/cx/expr.rs @@ -375,7 +375,10 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { hir::ExprType(ref source, _) => return source.make_mirror(cx), hir::ExprBox(ref value) => - ExprKind::Box { value: value.to_ref() }, + ExprKind::Box { + value: value.to_ref(), + value_extents: cx.tcx.region_maps.node_extent(value.id) + }, hir::ExprVec(ref fields) => ExprKind::Vec { fields: fields.to_ref() }, hir::ExprTup(ref fields) => diff --git a/src/librustc_mir/hair/mod.rs b/src/librustc_mir/hair/mod.rs index b356e3886d1..0891c2845b0 100644 --- a/src/librustc_mir/hair/mod.rs +++ b/src/librustc_mir/hair/mod.rs @@ -123,6 +123,7 @@ pub enum ExprKind<'tcx> { }, Box { value: ExprRef<'tcx>, + value_extents: CodeExtent, }, Call { ty: ty::Ty<'tcx>, diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index 66604786d46..3cd1bd76c54 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -69,7 +69,7 @@ impl<'a, 'tcx> EraseRegions<'a, 'tcx> { self.erase_regions_lvalue(lvalue); self.erase_regions_rvalue(rvalue); } - StatementKind::Drop(_, ref mut lvalue) => { + StatementKind::Drop(ref mut lvalue) => { self.erase_regions_lvalue(lvalue); } } diff --git a/src/librustc_trans/trans/mir/constant.rs b/src/librustc_trans/trans/mir/constant.rs index 84cc87e9b13..3b763599f77 100644 --- a/src/librustc_trans/trans/mir/constant.rs +++ b/src/librustc_trans/trans/mir/constant.rs @@ -82,13 +82,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { constant: &mir::Constant<'tcx>) -> OperandRef<'tcx> { - let ty = bcx.monomorphize(&constant.ty); match constant.literal { mir::Literal::Item { def_id, kind, substs } => { let substs = bcx.tcx().mk_substs(bcx.monomorphize(&substs)); - self.trans_item_ref(bcx, ty, kind, substs, def_id) + self.trans_item_ref(bcx, constant.ty, kind, substs, def_id) } mir::Literal::Value { ref value } => { + let ty = bcx.monomorphize(&constant.ty); self.trans_constval(bcx, value, ty) } } diff --git a/src/librustc_trans/trans/mir/statement.rs b/src/librustc_trans/trans/mir/statement.rs index dae0d3b55c0..cd186de7ca0 100644 --- a/src/librustc_trans/trans/mir/statement.rs +++ b/src/librustc_trans/trans/mir/statement.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc::middle::ty::LvaluePreference; use rustc::mir::repr as mir; use trans::common::Block; use trans::debuginfo::DebugLoc; @@ -52,19 +51,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } } - mir::StatementKind::Drop(mir::DropKind::Deep, ref lvalue) => { + mir::StatementKind::Drop(ref lvalue) => { let tr_lvalue = self.trans_lvalue(bcx, lvalue); let ty = tr_lvalue.ty.to_ty(bcx.tcx()); glue::drop_ty(bcx, tr_lvalue.llval, ty, DebugLoc::None) } - - mir::StatementKind::Drop(mir::DropKind::Free, ref lvalue) => { - let tr_lvalue = self.trans_lvalue(bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); - let content_ty = ty.builtin_deref(true, LvaluePreference::NoPreference); - let content_ty = content_ty.unwrap().ty; - glue::trans_exchange_free_ty(bcx, tr_lvalue.llval, content_ty, DebugLoc::None) - } } } } From 02365fe753708534229ff780e80133e256f3bbe3 Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Fri, 29 Jan 2016 19:06:23 +0200 Subject: [PATCH 2/7] Change successor{,_mut} to return a Vec This helps to avoid the unpleasant restriction of being unable to have multiple successors in non-contiguous block of memory. --- src/librustc/mir/repr.rs | 53 +++++++++++----------- src/librustc_mir/build/cfg.rs | 1 - src/librustc_mir/transform/simplify_cfg.rs | 2 +- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index b9a94ad0068..ed6c4e80c2d 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -8,21 +8,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use graphviz::IntoCow; use middle::const_eval::ConstVal; use middle::def_id::DefId; use middle::subst::Substs; use middle::ty::{self, AdtDef, ClosureSubsts, FnOutput, Region, Ty}; use rustc_back::slice; -use rustc_data_structures::tuple_slice::TupleSlice; use rustc_front::hir::InlineAsm; -use syntax::ast::{self, Name}; -use syntax::codemap::Span; -use graphviz::IntoCow; use std::ascii; -use std::borrow::Cow; +use std::borrow::{Cow}; use std::fmt::{self, Debug, Formatter, Write}; use std::{iter, u32}; use std::ops::{Index, IndexMut}; +use syntax::ast::{self, Name}; +use syntax::codemap::Span; /// Lowered representation of a single function. #[derive(Clone, RustcEncodable, RustcDecodable)] @@ -335,29 +334,31 @@ impl<'tcx> CallKind<'tcx> { } impl<'tcx> Terminator<'tcx> { - pub fn successors(&self) -> &[BasicBlock] { + pub fn successors(&self) -> Cow<[BasicBlock]> { use self::Terminator::*; match *self { - Goto { target: ref b } => slice::ref_slice(b), - If { targets: ref b, .. } => b.as_slice(), - Switch { targets: ref b, .. } => b, - SwitchInt { targets: ref b, .. } => b, - Resume => &[], - Return => &[], - Call { ref kind, .. } => kind.successors(), + Goto { target: ref b } => slice::ref_slice(b).into_cow(), + If { targets: (b1, b2), .. } => vec![b1, b2].into_cow(), + Switch { targets: ref b, .. } => b[..].into_cow(), + SwitchInt { targets: ref b, .. } => b[..].into_cow(), + Resume => (&[]).into_cow(), + Return => (&[]).into_cow(), + Call { ref kind, .. } => kind.successors()[..].into_cow(), } } - pub fn successors_mut(&mut self) -> &mut [BasicBlock] { + // FIXME: no mootable cow. I’m honestly not sure what a “cow” between `&mut [BasicBlock]` and + // `Vec<&mut BasicBlock>` would look like in the first place. + pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> { use self::Terminator::*; match *self { - Goto { target: ref mut b } => slice::mut_ref_slice(b), - If { targets: ref mut b, .. } => b.as_mut_slice(), - Switch { targets: ref mut b, .. } => b, - SwitchInt { targets: ref mut b, .. } => b, - Resume => &mut [], - Return => &mut [], - Call { ref mut kind, .. } => kind.successors_mut(), + Goto { target: ref mut b } => vec![b], + If { targets: (ref mut b1, ref mut b2), .. } => vec![b1, b2], + Switch { targets: ref mut b, .. } => b.iter_mut().collect(), + SwitchInt { targets: ref mut b, .. } => b.iter_mut().collect(), + Resume => Vec::new(), + Return => Vec::new(), + Call { ref mut kind, .. } => kind.successors_mut().iter_mut().collect(), } } } @@ -445,12 +446,12 @@ impl<'tcx> Terminator<'tcx> { use self::Terminator::*; match *self { Return | Resume => vec![], - Goto { .. } => vec!["".into_cow()], - If { .. } => vec!["true".into_cow(), "false".into_cow()], + Goto { .. } => vec!["".into()], + If { .. } => vec!["true".into(), "false".into()], Switch { ref adt_def, .. } => { adt_def.variants .iter() - .map(|variant| variant.name.to_string().into_cow()) + .map(|variant| variant.name.to_string().into()) .collect() } SwitchInt { ref values, .. } => { @@ -458,9 +459,9 @@ impl<'tcx> Terminator<'tcx> { .map(|const_val| { let mut buf = String::new(); fmt_const_val(&mut buf, const_val).unwrap(); - buf.into_cow() + buf.into() }) - .chain(iter::once(String::from("otherwise").into_cow())) + .chain(iter::once(String::from("otherwise").into())) .collect() } Call { ref kind, .. } => match *kind { diff --git a/src/librustc_mir/build/cfg.rs b/src/librustc_mir/build/cfg.rs index 5be2b8394ee..365fca23dda 100644 --- a/src/librustc_mir/build/cfg.rs +++ b/src/librustc_mir/build/cfg.rs @@ -86,4 +86,3 @@ impl<'tcx> CFG<'tcx> { self.block_data_mut(block).terminator = Some(terminator); } } - diff --git a/src/librustc_mir/transform/simplify_cfg.rs b/src/librustc_mir/transform/simplify_cfg.rs index 7a5a00a8d56..9d4c73b90f8 100644 --- a/src/librustc_mir/transform/simplify_cfg.rs +++ b/src/librustc_mir/transform/simplify_cfg.rs @@ -29,7 +29,7 @@ impl SimplifyCfg { let mut worklist = vec![START_BLOCK]; while let Some(bb) = worklist.pop() { - for succ in mir.basic_block_data(bb).terminator().successors() { + for succ in mir.basic_block_data(bb).terminator().successors().iter() { if !seen[succ.index()] { seen[succ.index()] = true; worklist.push(*succ); From 65dd5e6a84aa8d9af477f21fa9797e3d0774a2c1 Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Fri, 29 Jan 2016 20:42:02 +0200 Subject: [PATCH 3/7] Remove the CallKind We used to have CallKind only because there was a requirement to have all successors in a contiguous memory block. Now that the requirement is gone, remove the CallKind and instead just have the necessary information inline. Awesome! --- src/librustc/mir/repr.rs | 96 +++++---------------- src/librustc/mir/visit.rs | 21 ++--- src/librustc_mir/build/expr/into.rs | 16 ++-- src/librustc_mir/build/scope.rs | 20 ++--- src/librustc_mir/transform/erase_regions.rs | 4 +- src/librustc_trans/trans/mir/block.rs | 41 +++------ 6 files changed, 53 insertions(+), 145 deletions(-) diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index ed6c4e80c2d..59d00b436c8 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -268,71 +268,13 @@ pub enum Terminator<'tcx> { func: Operand<'tcx>, /// Arguments the function is called with args: Vec>, - /// The kind of call with associated information - kind: CallKind<'tcx>, + /// Destination for the return value. If some, the call is converging. + destination: Option<(Lvalue<'tcx>, BasicBlock)>, + /// Cleanups to be done if the call unwinds. + cleanup: Option }, } -#[derive(Clone, RustcEncodable, RustcDecodable)] -pub enum CallKind<'tcx> { - /// Diverging function without associated cleanup - Diverging, - /// Diverging function with associated cleanup - DivergingCleanup(BasicBlock), - /// Converging function without associated cleanup - Converging { - /// Destination where the call result is written - destination: Lvalue<'tcx>, - /// Block to branch into on successful return - target: BasicBlock, - }, - ConvergingCleanup { - /// Destination where the call result is written - destination: Lvalue<'tcx>, - /// First target is branched to on successful return. - /// Second block contains the cleanups to do on unwind. - targets: (BasicBlock, BasicBlock) - } -} - -impl<'tcx> CallKind<'tcx> { - pub fn successors(&self) -> &[BasicBlock] { - match *self { - CallKind::Diverging => &[], - CallKind::DivergingCleanup(ref b) | - CallKind::Converging { target: ref b, .. } => slice::ref_slice(b), - CallKind::ConvergingCleanup { ref targets, .. } => targets.as_slice(), - } - } - - pub fn successors_mut(&mut self) -> &mut [BasicBlock] { - match *self { - CallKind::Diverging => &mut [], - CallKind::DivergingCleanup(ref mut b) | - CallKind::Converging { target: ref mut b, .. } => slice::mut_ref_slice(b), - CallKind::ConvergingCleanup { ref mut targets, .. } => targets.as_mut_slice(), - } - } - - pub fn destination(&self) -> Option<&Lvalue<'tcx>> { - match *self { - CallKind::Converging { ref destination, .. } | - CallKind::ConvergingCleanup { ref destination, .. } => Some(destination), - CallKind::Diverging | - CallKind::DivergingCleanup(_) => None - } - } - - pub fn destination_mut(&mut self) -> Option<&mut Lvalue<'tcx>> { - match *self { - CallKind::Converging { ref mut destination, .. } | - CallKind::ConvergingCleanup { ref mut destination, .. } => Some(destination), - CallKind::Diverging | - CallKind::DivergingCleanup(_) => None - } - } -} - impl<'tcx> Terminator<'tcx> { pub fn successors(&self) -> Cow<[BasicBlock]> { use self::Terminator::*; @@ -343,7 +285,11 @@ impl<'tcx> Terminator<'tcx> { SwitchInt { targets: ref b, .. } => b[..].into_cow(), Resume => (&[]).into_cow(), Return => (&[]).into_cow(), - Call { ref kind, .. } => kind.successors()[..].into_cow(), + Call { destination: Some((_, t)), cleanup: Some(c), .. } => vec![t, c].into_cow(), + Call { destination: Some((_, ref t)), cleanup: None, .. } => + slice::ref_slice(t).into_cow(), + Call { destination: None, cleanup: Some(ref c), .. } => slice::ref_slice(c).into_cow(), + Call { destination: None, cleanup: None, .. } => (&[]).into_cow(), } } @@ -358,7 +304,10 @@ impl<'tcx> Terminator<'tcx> { SwitchInt { targets: ref mut b, .. } => b.iter_mut().collect(), Resume => Vec::new(), Return => Vec::new(), - Call { ref mut kind, .. } => kind.successors_mut().iter_mut().collect(), + Call { destination: Some((_, ref mut t)), cleanup: Some(ref mut c), .. } => vec![t, c], + Call { destination: Some((_, ref mut t)), cleanup: None, .. } => vec![t], + Call { destination: None, cleanup: Some(ref mut c), .. } => vec![c], + Call { destination: None, cleanup: None, .. } => vec![], } } } @@ -425,8 +374,8 @@ impl<'tcx> Terminator<'tcx> { SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv), Return => write!(fmt, "return"), Resume => write!(fmt, "resume"), - Call { ref kind, ref func, ref args } => { - if let Some(destination) = kind.destination() { + Call { ref func, ref args, ref destination, .. } => { + if let Some((ref destination, _)) = *destination { try!(write!(fmt, "{:?} = ", destination)); } try!(write!(fmt, "{:?}(", func)); @@ -464,16 +413,11 @@ impl<'tcx> Terminator<'tcx> { .chain(iter::once(String::from("otherwise").into())) .collect() } - Call { ref kind, .. } => match *kind { - CallKind::Diverging => - vec![], - CallKind::DivergingCleanup(..) => - vec!["unwind".into_cow()], - CallKind::Converging { .. } => - vec!["return".into_cow()], - CallKind::ConvergingCleanup { .. } => - vec!["return".into_cow(), "unwind".into_cow()], - }, + Call { destination: Some(_), cleanup: Some(_), .. } => + vec!["return".into_cow(), "unwind".into_cow()], + Call { destination: Some(_), cleanup: None, .. } => vec!["return".into_cow()], + Call { destination: None, cleanup: Some(_), .. } => vec!["unwind".into_cow()], + Call { destination: None, cleanup: None, .. } => vec![], } } } diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index 494eac44b1a..73dd0de1c04 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -179,28 +179,17 @@ macro_rules! make_mir_visitor { Terminator::Call { ref $($mutability)* func, ref $($mutability)* args, - ref $($mutability)* kind } => { + ref $($mutability)* destination, + ref $($mutability)* cleanup } => { self.visit_operand(func); for arg in args { self.visit_operand(arg); } - match *kind { - CallKind::Converging { - ref $($mutability)* destination, - .. - } | - CallKind::ConvergingCleanup { - ref $($mutability)* destination, - .. - } => { - self.visit_lvalue(destination, LvalueContext::Store); - } - CallKind::Diverging | - CallKind::DivergingCleanup(_) => {} - } - for &target in kind.successors() { + if let Some((ref $($mutability)* destination, target)) = *destination { + self.visit_lvalue(destination, LvalueContext::Store); self.visit_branch(block, target); } + cleanup.map(|t| self.visit_branch(block, t)); } } } diff --git a/src/librustc_mir/build/expr/into.rs b/src/librustc_mir/build/expr/into.rs index 7ecd8a26507..3b453a214b6 100644 --- a/src/librustc_mir/build/expr/into.rs +++ b/src/librustc_mir/build/expr/into.rs @@ -253,17 +253,11 @@ impl<'a,'tcx> Builder<'a,'tcx> { this.cfg.terminate(block, Terminator::Call { func: fun, args: args, - kind: match (cleanup, diverges) { - (None, true) => CallKind::Diverging, - (Some(c), true) => CallKind::DivergingCleanup(c), - (None, false) => CallKind::Converging { - destination: destination.clone(), - target: success - }, - (Some(c), false) => CallKind::ConvergingCleanup { - destination: destination.clone(), - targets: (success, c) - } + cleanup: cleanup, + destination: if diverges { + None + } else { + Some ((destination.clone(), success)) } }); success.unit() diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 35153bc8a90..fef6837f3f7 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -273,7 +273,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { )); let func = Constant { span: item.1, - ty: tcx.lookup_item_type(item.0).ty.subst(substs), + ty: tcx.lookup_item_type(item.0).ty, literal: Literal::Item { def_id: item.0, kind: ItemKind::Function, @@ -285,10 +285,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { self.cfg.terminate(new_block, Terminator::Call { func: Operand::Constant(func), args: vec![Operand::Consume(lvalue.clone())], - kind: CallKind::Converging { - target: old_block, - destination: unit_tmp.clone() - } + destination: Some((unit_tmp.clone(), old_block)), + cleanup: None // we’re already doing divergence cleanups }); terminator = Terminator::Goto { target: new_block }; } @@ -383,10 +381,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { self.cfg.terminate(block, Terminator::Call { func: Operand::Constant(func), args: vec![Operand::Consume(tuple_ref), index, len], - kind: match cleanup { - None => CallKind::Diverging, - Some(c) => CallKind::DivergingCleanup(c) - } + destination: None, + cleanup: cleanup, }); } @@ -423,10 +419,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { self.cfg.terminate(block, Terminator::Call { func: Operand::Constant(func), args: vec![Operand::Consume(tuple_ref)], - kind: match cleanup { - None => CallKind::Diverging, - Some(c) => CallKind::DivergingCleanup(c) - } + cleanup: cleanup, + destination: None, }); } diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index 3cd1bd76c54..00162be816b 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -93,8 +93,8 @@ impl<'a, 'tcx> EraseRegions<'a, 'tcx> { self.erase_regions_lvalue(discr); *switch_ty = self.tcx.erase_regions(switch_ty); }, - Terminator::Call { ref mut func, ref mut args, ref mut kind } => { - if let Some(destination) = kind.destination_mut() { + Terminator::Call { ref mut func, ref mut args, ref mut destination, .. } => { + if let Some((ref mut destination, _)) = *destination { self.erase_regions_lvalue(destination); } self.erase_regions_operand(func); diff --git a/src/librustc_trans/trans/mir/block.rs b/src/librustc_trans/trans/mir/block.rs index 5651710aa80..c1ab55f9f53 100644 --- a/src/librustc_trans/trans/mir/block.rs +++ b/src/librustc_trans/trans/mir/block.rs @@ -94,7 +94,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { base::build_return_block(bcx.fcx, bcx, return_ty, DebugLoc::None); } - mir::Terminator::Call { ref func, ref args, ref kind } => { + mir::Terminator::Call { ref func, ref args, ref destination, ref cleanup } => { // Create the callee. This will always be a fn ptr and hence a kind of scalar. let callee = self.trans_operand(bcx, func); let attrs = attributes::from_fn_type(bcx.ccx(), callee.ty); @@ -115,7 +115,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { }; // Prepare the return value destination - let (ret_dest_ty, must_copy_dest) = if let Some(d) = kind.destination() { + let (ret_dest_ty, must_copy_dest) = if let Some((ref d, _)) = *destination { let dest = self.trans_lvalue(bcx, d); let ret_ty = dest.ty.to_ty(bcx.tcx()); if !is_foreign && type_of::return_uses_outptr(bcx.ccx(), ret_ty) { @@ -144,9 +144,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } // Many different ways to call a function handled here - match (is_foreign, base::avoid_invoke(bcx), kind) { + match (is_foreign, base::avoid_invoke(bcx), cleanup, destination) { // The two cases below are the only ones to use LLVM’s `invoke`. - (false, false, &mir::CallKind::DivergingCleanup(cleanup)) => { + (false, false, &Some(cleanup), &None) => { let cleanup = self.bcx(cleanup); let landingpad = self.make_landing_pad(cleanup); let unreachable_blk = self.unreachable_block(); @@ -158,14 +158,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { Some(attrs), debugloc); }, - (false, false, &mir::CallKind::ConvergingCleanup { ref targets, .. }) => { - let cleanup = self.bcx(targets.1); + (false, false, &Some(cleanup), &Some((_, success))) => { + let cleanup = self.bcx(cleanup); let landingpad = self.make_landing_pad(cleanup); let (target, postinvoke) = if must_copy_dest { - (bcx.fcx.new_block("", None), - Some(self.bcx(targets.0))) + (bcx.fcx.new_block("", None), Some(self.bcx(success))) } else { - (self.bcx(targets.0), None) + (self.bcx(success), None) }; let invokeret = build::Invoke(bcx, callee.immediate(), @@ -205,19 +204,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { build::Br(target, postinvoketarget.llbb, debugloc); } }, - (false, _, &mir::CallKind::DivergingCleanup(_)) | - (false, _, &mir::CallKind::Diverging) => { + (false, _, _, &None) => { build::Call(bcx, callee.immediate(), &llargs[..], Some(attrs), debugloc); build::Unreachable(bcx); } - (false, _, k@&mir::CallKind::ConvergingCleanup { .. }) | - (false, _, k@&mir::CallKind::Converging { .. }) => { - // FIXME: Bug #20046 - let target = match *k { - mir::CallKind::ConvergingCleanup { targets, .. } => targets.0, - mir::CallKind::Converging { target, .. } => target, - _ => unreachable!() - }; + (false, _, _, &Some((_, target))) => { let llret = build::Call(bcx, callee.immediate(), &llargs[..], @@ -231,7 +222,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { build::Br(bcx, self.llblock(target), debugloc); } // Foreign functions - (true, _, k) => { + (true, _, _, destination) => { let (dest, _) = ret_dest_ty .expect("return destination is not set"); bcx = foreign::trans_native_call(bcx, @@ -241,13 +232,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { &llargs[..], arg_tys, debugloc); - match *k { - mir::CallKind::ConvergingCleanup { targets, .. } => - build::Br(bcx, self.llblock(targets.0), debugloc), - mir::CallKind::Converging { target, .. } => - build::Br(bcx, self.llblock(target), debugloc), - _ => () - }; + if let Some((_, target)) = *destination { + build::Br(bcx, self.llblock(target), debugloc); + } }, } } From 98265d338556e32c8ab1f89e14c0b3fab6e52001 Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Sat, 30 Jan 2016 00:18:47 +0200 Subject: [PATCH 4/7] Convert Drop statement into terminator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The structure of the old translator as well as MIR assumed that drop glue cannot possibly panic and translated the drops accordingly. However, in presence of `Drop::drop` this assumption can be trivially shown to be untrue. As such, the Rust code like the following would never print number 2: ```rust struct Droppable(u32); impl Drop for Droppable { fn drop(&mut self) { if self.0 == 1 { panic!("Droppable(1)") } else { println!("{}", self.0) } } } fn main() { let x = Droppable(2); let y = Droppable(1); } ``` While the behaviour is allowed according to the language rules (we allow drops to not run), that’s a very counter-intuitive behaviour. We fix this in MIR by allowing `Drop` to have a target to take on divergence and connect the drops in such a way so the leftover drops are executed when some drop unwinds. Note, that this commit still does not implement the translator part of changes necessary for the grand scheme of things to fully work, so the actual observed behaviour does not change yet. Coming soon™. See #14875. --- src/librustc/mir/repr.rs | 18 +- src/librustc/mir/visit.rs | 11 +- src/librustc_mir/build/cfg.rs | 7 - src/librustc_mir/build/expr/into.rs | 2 +- src/librustc_mir/build/scope.rs | 410 +++++++++++++------- src/librustc_mir/build/stmt.rs | 8 +- src/librustc_mir/transform/erase_regions.rs | 6 +- src/librustc_trans/trans/mir/block.rs | 10 +- src/librustc_trans/trans/mir/statement.rs | 8 - 9 files changed, 314 insertions(+), 166 deletions(-) diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index 59d00b436c8..783c58469a1 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -262,6 +262,13 @@ pub enum Terminator<'tcx> { /// `END_BLOCK`. Return, + /// Drop the Lvalue + Drop { + value: Lvalue<'tcx>, + target: BasicBlock, + unwind: Option + }, + /// Block ends with a call of a converging function Call { /// The function that’s being called @@ -290,6 +297,8 @@ impl<'tcx> Terminator<'tcx> { slice::ref_slice(t).into_cow(), Call { destination: None, cleanup: Some(ref c), .. } => slice::ref_slice(c).into_cow(), Call { destination: None, cleanup: None, .. } => (&[]).into_cow(), + Drop { target, unwind: Some(unwind), .. } => vec![target, unwind].into_cow(), + Drop { ref target, .. } => slice::ref_slice(target).into_cow(), } } @@ -308,6 +317,8 @@ impl<'tcx> Terminator<'tcx> { Call { destination: Some((_, ref mut t)), cleanup: None, .. } => vec![t], Call { destination: None, cleanup: Some(ref mut c), .. } => vec![c], Call { destination: None, cleanup: None, .. } => vec![], + Drop { ref mut target, unwind: Some(ref mut unwind), .. } => vec![target, unwind], + Drop { ref mut target, .. } => vec![target] } } } @@ -374,6 +385,7 @@ impl<'tcx> Terminator<'tcx> { SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv), Return => write!(fmt, "return"), Resume => write!(fmt, "resume"), + Drop { ref value, .. } => write!(fmt, "drop({:?})", value), Call { ref func, ref args, ref destination, .. } => { if let Some((ref destination, _)) = *destination { try!(write!(fmt, "{:?} = ", destination)); @@ -418,6 +430,8 @@ impl<'tcx> Terminator<'tcx> { Call { destination: Some(_), cleanup: None, .. } => vec!["return".into_cow()], Call { destination: None, cleanup: Some(_), .. } => vec!["unwind".into_cow()], Call { destination: None, cleanup: None, .. } => vec![], + Drop { unwind: None, .. } => vec!["return".into_cow()], + Drop { .. } => vec!["return".into_cow(), "unwind".into_cow()], } } } @@ -435,15 +449,13 @@ pub struct Statement<'tcx> { #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] pub enum StatementKind<'tcx> { Assign(Lvalue<'tcx>, Rvalue<'tcx>), - Drop(Lvalue<'tcx>), } impl<'tcx> Debug for Statement<'tcx> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { use self::StatementKind::*; match self.kind { - Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv), - Drop(ref lv) => write!(fmt, "drop {:?}", lv), + Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv) } } } diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index 73dd0de1c04..fb4e0e97054 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -124,9 +124,6 @@ macro_rules! make_mir_visitor { ref $($mutability)* rvalue) => { self.visit_assign(block, lvalue, rvalue); } - StatementKind::Drop(ref $($mutability)* lvalue) => { - self.visit_lvalue(lvalue, LvalueContext::Drop); - } } } @@ -177,10 +174,16 @@ macro_rules! make_mir_visitor { Terminator::Return => { } + Terminator::Drop { ref $($mutability)* value, target, unwind } => { + self.visit_lvalue(value, LvalueContext::Drop); + self.visit_branch(block, target); + unwind.map(|t| self.visit_branch(block, t)); + } + Terminator::Call { ref $($mutability)* func, ref $($mutability)* args, ref $($mutability)* destination, - ref $($mutability)* cleanup } => { + cleanup } => { self.visit_operand(func); for arg in args { self.visit_operand(arg); diff --git a/src/librustc_mir/build/cfg.rs b/src/librustc_mir/build/cfg.rs index 365fca23dda..c7147d111aa 100644 --- a/src/librustc_mir/build/cfg.rs +++ b/src/librustc_mir/build/cfg.rs @@ -43,13 +43,6 @@ impl<'tcx> CFG<'tcx> { self.block_data_mut(block).statements.push(statement); } - pub fn push_drop(&mut self, block: BasicBlock, span: Span, lvalue: &Lvalue<'tcx>) { - self.push(block, Statement { - span: span, - kind: StatementKind::Drop(lvalue.clone()) - }); - } - pub fn push_assign(&mut self, block: BasicBlock, span: Span, diff --git a/src/librustc_mir/build/expr/into.rs b/src/librustc_mir/build/expr/into.rs index 3b453a214b6..f50e5689df0 100644 --- a/src/librustc_mir/build/expr/into.rs +++ b/src/librustc_mir/build/expr/into.rs @@ -188,7 +188,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { // operators like x[j] = x[i]. let rhs = unpack!(block = this.as_operand(block, rhs)); let lhs = unpack!(block = this.as_lvalue(block, lhs)); - this.cfg.push_drop(block, expr_span, &lhs); + unpack!(block = this.build_drop(block, lhs.clone())); this.cfg.push_assign(block, expr_span, &lhs, Rvalue::Use(rhs)); block.unit() } diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index fef6837f3f7..33397b483be 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -86,10 +86,10 @@ should go to. */ -use build::{BlockAnd, BlockAndExtension, Builder}; +use build::{BlockAnd, BlockAndExtension, Builder, CFG}; use rustc::middle::region::CodeExtent; use rustc::middle::lang_items; -use rustc::middle::subst::{Substs, VecPerParamSpace}; +use rustc::middle::subst::{Substs, Subst, VecPerParamSpace}; use rustc::middle::ty::{self, Ty}; use rustc::mir::repr::*; use syntax::codemap::{Span, DUMMY_SP}; @@ -97,9 +97,30 @@ use syntax::parse::token::intern_and_get_ident; pub struct Scope<'tcx> { extent: CodeExtent, - drops: Vec<(Span, Lvalue<'tcx>)>, - frees: Vec<(Span, Lvalue<'tcx>, Ty<'tcx>)>, - cached_block: Option, + drops: Vec>, + // A scope may only have one associated free, because: + // 1. We require a `free` to only be scheduled in the scope of `EXPR` in `box EXPR`; + // 2. It only makes sense to have it translated into the diverge-path. + free: Option>, +} + +struct DropData<'tcx> { + value: Lvalue<'tcx>, + /// The cached block for the cleanups-on-diverge path. This block contains code to run the + /// current drop and all the preceding drops (i.e. those having lower index in Drop’s + /// Scope drop array) + cached_block: Option +} + +struct FreeData<'tcx> { + span: Span, + /// Lvalue containing the allocated box. + value: Lvalue<'tcx>, + /// type of item for which the box was allocated for (i.e. the T in Box). + item_ty: Ty<'tcx>, + /// The cached block containing code to run the free. The block will also execute all the drops + /// in the scope. + cached_block: Option } #[derive(Clone, Debug)] @@ -115,7 +136,36 @@ pub struct LoopScope { pub might_break: bool } +impl<'tcx> Scope<'tcx> { + /// Invalidate cached blocks in the scope. Should always be run for all inner scopes when a + /// drop is pushed into some scope enclosing a larger extent of code. + fn invalidate_cache(&mut self, only_free: bool) { + if let Some(ref mut freedata) = self.free { + freedata.cached_block = None; + } + if !only_free { + for dropdata in &mut self.drops { + dropdata.cached_block = None; + } + } + } + + /// Returns the cached block for this scope. + /// + /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for + /// this method to work correctly. + fn cached_block(&self) -> Option { + if let Some(ref free_data) = self.free { + free_data.cached_block + } else { + self.drops.last().iter().flat_map(|dd| dd.cached_block).next() + } + } +} + impl<'a,'tcx> Builder<'a,'tcx> { + // Adding and removing scopes + // ========================== /// Start a loop scope, which tracks where `continue` and `break` /// should branch to. See module comment for more details. /// @@ -147,9 +197,9 @@ impl<'a,'tcx> Builder<'a,'tcx> { where F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd { debug!("in_scope(extent={:?}, block={:?})", extent, block); - self.push_scope(extent, block); + self.push_scope(extent); let rv = unpack!(block = f(self)); - self.pop_scope(extent, block); + unpack!(block = self.pop_scope(extent, block)); debug!("in_scope: exiting extent={:?} block={:?}", extent, block); block.and(rv) } @@ -158,36 +208,51 @@ impl<'a,'tcx> Builder<'a,'tcx> { /// scope and call `pop_scope` afterwards. Note that these two /// calls must be paired; using `in_scope` as a convenience /// wrapper maybe preferable. - pub fn push_scope(&mut self, extent: CodeExtent, block: BasicBlock) { - debug!("push_scope({:?}, {:?})", extent, block); - - // push scope, execute `f`, then pop scope again + pub fn push_scope(&mut self, extent: CodeExtent) { + debug!("push_scope({:?})", extent); self.scopes.push(Scope { extent: extent.clone(), drops: vec![], - frees: vec![], - cached_block: None, + free: None }); } /// Pops a scope, which should have extent `extent`, adding any /// drops onto the end of `block` that are needed. This must /// match 1-to-1 with `push_scope`. - pub fn pop_scope(&mut self, extent: CodeExtent, block: BasicBlock) { + pub fn pop_scope(&mut self, extent: CodeExtent, block: BasicBlock) -> BlockAnd<()> { debug!("pop_scope({:?}, {:?})", extent, block); + // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup + // to make sure all the `cached_block`s are filled in. + self.diverge_cleanup(); let scope = self.scopes.pop().unwrap(); - assert_eq!(scope.extent, extent); - - // add in any drops needed on the fallthrough path (any other - // exiting paths, such as those that arise from `break`, will - // have drops already) - for (span, lvalue) in scope.drops { - self.cfg.push_drop(block, span, &lvalue); - } + build_scope_drops(block, &scope, &self.scopes[..], &mut self.cfg) } + /// Branch out of `block` to `target`, exiting all scopes up to + /// and including `extent`. This will insert whatever drops are + /// needed, as well as tracking this exit for the SEME region. See + /// module comment for details. + pub fn exit_scope(&mut self, + span: Span, + extent: CodeExtent, + mut block: BasicBlock, + target: BasicBlock) { + let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent) + .unwrap_or_else(||{ + self.hir.span_bug(span, &format!("extent {:?} does not enclose", extent)) + }); + + for (idx, ref scope) in self.scopes.iter().enumerate().rev().take(scope_count) { + unpack!(block = build_scope_drops(block, scope, &self.scopes[..idx], &mut self.cfg)); + } + self.cfg.terminate(block, Terminator::Goto { target: target }); + } + + // Finding scopes + // ============== /// Finds the loop scope for a given label. This is used for /// resolving `break` and `continue`. pub fn find_loop_scope(&mut self, @@ -210,30 +275,79 @@ impl<'a,'tcx> Builder<'a,'tcx> { }.unwrap_or_else(|| hir.span_bug(span, "no enclosing loop scope found?")) } - /// Branch out of `block` to `target`, exiting all scopes up to - /// and including `extent`. This will insert whatever drops are - /// needed, as well as tracking this exit for the SEME region. See - /// module comment for details. - pub fn exit_scope(&mut self, - span: Span, - extent: CodeExtent, - block: BasicBlock, - target: BasicBlock) { - let Builder { ref mut scopes, ref mut cfg, ref mut hir, .. } = *self; - - let scope_count = 1 + scopes.iter().rev().position(|scope| scope.extent == extent) - .unwrap_or_else(||{ - hir.span_bug(span, &format!("extent {:?} does not enclose", extent)) - }); - - for scope in scopes.iter_mut().rev().take(scope_count) { - for &(drop_span, ref lvalue) in &scope.drops { - cfg.push_drop(block, drop_span, lvalue); - } - } - cfg.terminate(block, Terminator::Goto { target: target }); + pub fn extent_of_innermost_scope(&self) -> CodeExtent { + self.scopes.last().map(|scope| scope.extent).unwrap() } + pub fn extent_of_outermost_scope(&self) -> CodeExtent { + self.scopes.first().map(|scope| scope.extent).unwrap() + } + + // Scheduling drops + // ================ + /// Indicates that `lvalue` should be dropped on exit from + /// `extent`. + pub fn schedule_drop(&mut self, + span: Span, + extent: CodeExtent, + lvalue: &Lvalue<'tcx>, + lvalue_ty: Ty<'tcx>) { + if !self.hir.needs_drop(lvalue_ty) { + return + } + for scope in self.scopes.iter_mut().rev() { + if scope.extent == extent { + // We only invalidate cached block of free here; all other drops’ cached blocks to + // not become invalid (this drop will branch into them). + scope.invalidate_cache(true); + scope.drops.push(DropData { + value: lvalue.clone(), + cached_block: None + }); + return; + } else { + // We must invalidate all the cached_blocks leading up to the scope we’re + // looking for, because all of the blocks in the chain become incorrect. + scope.invalidate_cache(false) + } + } + self.hir.span_bug(span, + &format!("extent {:?} not in scope to drop {:?}", extent, lvalue)); + } + + /// Schedule dropping of a not-yet-fully-initialised box. + /// + /// This cleanup will only be translated into unwind branch. + /// The extent should be for the `EXPR` inside `box EXPR`. + /// There may only be one “free” scheduled in any given scope. + pub fn schedule_box_free(&mut self, + span: Span, + extent: CodeExtent, + value: &Lvalue<'tcx>, + item_ty: Ty<'tcx>) { + for scope in self.scopes.iter_mut().rev() { + if scope.extent == extent { + assert!(scope.free.is_none(), "scope already has a scheduled free!"); + scope.free = Some(FreeData { + span: span, + value: value.clone(), + item_ty: item_ty, + cached_block: None + }); + return; + } else { + // We must invalidate all the cached_blocks leading up to the scope we’re looking + // for, because otherwise some/most of the blocks in the chain might become + // incorrect. + scope.invalidate_cache(false); + } + } + self.hir.span_bug(span, + &format!("extent {:?} not in scope to free {:?}", extent, value)); + } + + // Other + // ===== /// Creates a path that performs all required cleanup for unwinding. /// /// This path terminates in Resume. Returns the start of the path. @@ -244,114 +358,113 @@ impl<'a,'tcx> Builder<'a,'tcx> { return None; } - let tcx = self.hir.tcx(); let unit_tmp = self.get_unit_temp(); - let mut terminator = Terminator::Resume; + let Builder { ref mut scopes, ref mut cfg, ref mut hir, .. } = *self; + + let tcx = hir.tcx(); + let mut next_block = None; + // Given an array of scopes, we generate these from the outermost scope to the innermost // one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will - // generate B0 <- B1 <- B2 in left-to-right order. The outermost scope (B0) will always - // terminate with a Resume terminator. - for scope in self.scopes.iter_mut().filter(|s| !s.drops.is_empty() || !s.frees.is_empty()) { - if let Some(b) = scope.cached_block { - terminator = Terminator::Goto { target: b }; + // generate B0 <- B1 <- B2 in left-to-right order. Control flow of the generated blocks + // always ends up at a block with the Resume terminator. + // + // Similarly to the scopes, we translate drops so: + // * Scheduled free drop is executed first; + // * Drops are executed after the free drop in the decreasing order (decreasing as + // from higher index in drops array to lower index); + // + // NB: We do the building backwards here. We will first produce a block containing the + // Resume terminator (which is executed last for any given chain of cleanups) and then go + // on building the drops from the outermost one to the innermost one. Similar note applies + // to the drops within the scope too. + { + let iter = scopes.iter_mut().filter(|s| !s.drops.is_empty() || s.free.is_some()); + for scope in iter { + // Try using the cached free drop if any… + if let Some(FreeData { cached_block: Some(cached_block), .. }) = scope.free { + next_block = Some(cached_block); continue; - } else { - let mut new_block = self.cfg.start_new_cleanup_block(); - self.cfg.terminate(new_block, terminator); - terminator = Terminator::Goto { target: new_block }; - - for &(span, ref lvalue) in scope.drops.iter().rev() { - self.cfg.push_drop(new_block, span, lvalue); + } + // otherwise look for the cached regular drop (fast path)… + if let Some(&DropData { cached_block: Some(cached_block), .. }) = scope.drops.last() { + next_block = Some(cached_block); + continue; + } + // otherwise build the blocks… + for drop_data in scope.drops.iter_mut() { + // skipping them if they’re already built… + if let Some(cached_block) = drop_data.cached_block { + next_block = Some(cached_block); + continue; } + let block = cfg.start_new_cleanup_block(); + let target = next_block.unwrap_or_else(|| { + let b = cfg.start_new_cleanup_block(); + cfg.terminate(b, Terminator::Resume); + b + }); + cfg.terminate(block, Terminator::Drop { + value: drop_data.value.clone(), + target: target, + unwind: None + }); + drop_data.cached_block = Some(block); + next_block = Some(block); + } - for &(_, ref lvalue, ref item_ty) in scope.frees.iter().rev() { - let item = lang_items::SpannedLangItems::box_free_fn(&tcx.lang_items) - .expect("box_free language item required"); - let substs = tcx.mk_substs(Substs::new( - VecPerParamSpace::new(vec![], vec![], vec![item_ty]), - VecPerParamSpace::new(vec![], vec![], vec![]) - )); - let func = Constant { - span: item.1, - ty: tcx.lookup_item_type(item.0).ty, + if let Some(ref mut free_data) = scope.free { + // The free was not cached yet. It must be translated the last and will be executed + // first. + let free_func = tcx.lang_items.box_free_fn() + .expect("box_free language item is missing"); + let substs = tcx.mk_substs(Substs::new( + VecPerParamSpace::new(vec![], vec![], vec![free_data.item_ty]), + VecPerParamSpace::new(vec![], vec![], vec![]) + )); + let block = cfg.start_new_cleanup_block(); + let target = next_block.unwrap_or_else(|| { + let b = cfg.start_new_cleanup_block(); + cfg.terminate(b, Terminator::Resume); + b + }); + cfg.terminate(block, Terminator::Call { + func: Operand::Constant(Constant { + span: free_data.span, + ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs), literal: Literal::Item { - def_id: item.0, + def_id: free_func, kind: ItemKind::Function, substs: substs } - }; - let old_block = new_block; - new_block = self.cfg.start_new_cleanup_block(); - self.cfg.terminate(new_block, Terminator::Call { - func: Operand::Constant(func), - args: vec![Operand::Consume(lvalue.clone())], - destination: Some((unit_tmp.clone(), old_block)), - cleanup: None // we’re already doing divergence cleanups - }); - terminator = Terminator::Goto { target: new_block }; - } - - scope.cached_block = Some(new_block); + }), + args: vec![Operand::Consume(free_data.value.clone())], + destination: Some((unit_tmp.clone(), target)), + cleanup: None + }); + free_data.cached_block = Some(block); + next_block = Some(block); } } - // Return the innermost cached block, most likely the one we just generated. - // Note that if there are no cleanups in scope we return None. - self.scopes.iter().rev().flat_map(|b| b.cached_block).next() - } - - /// Indicates that `lvalue` should be dropped on exit from - /// `extent`. - pub fn schedule_drop(&mut self, - span: Span, - extent: CodeExtent, - lvalue: &Lvalue<'tcx>, - lvalue_ty: Ty<'tcx>) { - if self.hir.needs_drop(lvalue_ty) { - for scope in self.scopes.iter_mut().rev() { - // We must invalidate all the cached_blocks leading up to the scope we’re looking - // for, because otherwise some/most of the blocks in the chain might become - // incorrect (i.e. they still are pointing at old cached_block). - scope.cached_block = None; - if scope.extent == extent { - scope.drops.push((span, lvalue.clone())); - return; - } - } - self.hir.span_bug(span, - &format!("extent {:?} not in scope to drop {:?}", extent, lvalue)); } + scopes.iter().rev().flat_map(|x| x.cached_block()).next() } - /// Schedule dropping of a not yet fully initialised box. This cleanup will (and should) only - /// be translated into unwind branch. The extent should be for the `EXPR` inside `box EXPR`. - pub fn schedule_box_free(&mut self, - span: Span, - extent: CodeExtent, - lvalue: &Lvalue<'tcx>, - item_ty: Ty<'tcx>) { - for scope in self.scopes.iter_mut().rev() { - // We must invalidate all the cached_blocks leading up to the scope we’re looking - // for, because otherwise some/most of the blocks in the chain might become - // incorrect (i.e. they still are pointing at old cached_block). - scope.cached_block = None; - if scope.extent == extent { - scope.frees.push((span, lvalue.clone(), item_ty)); - return; - } - } - self.hir.span_bug(span, - &format!("extent {:?} not in scope to drop {:?}", extent, lvalue)); + /// Utility function for *non*-scope code to build their own drops + pub fn build_drop(&mut self, block: BasicBlock, value: Lvalue<'tcx>) -> BlockAnd<()> { + let next_target = self.cfg.start_new_block(); + let diverge_target = self.diverge_cleanup(); + self.cfg.terminate(block, Terminator::Drop { + value: value, + target: next_target, + unwind: diverge_target, + }); + next_target.unit() } - pub fn extent_of_innermost_scope(&self) -> CodeExtent { - self.scopes.last().map(|scope| scope.extent).unwrap() - } - - pub fn extent_of_outermost_scope(&self) -> CodeExtent { - self.scopes.first().map(|scope| scope.extent).unwrap() - } - - + // Panicking + // ========= + // FIXME: should be moved into their own module pub fn panic_bounds_check(&mut self, block: BasicBlock, index: Operand<'tcx>, @@ -456,3 +569,30 @@ impl<'a,'tcx> Builder<'a,'tcx> { } } + +/// Builds drops for pop_scope and exit_scope. +fn build_scope_drops<'tcx>(mut block: BasicBlock, + scope: &Scope<'tcx>, + earlier_scopes: &[Scope<'tcx>], + cfg: &mut CFG<'tcx>) + -> BlockAnd<()> { + let mut iter = scope.drops.iter().rev().peekable(); + while let Some(drop_data) = iter.next() { + // Try to find the next block with its cached block for us to diverge into in case the + // drop panics. + let on_diverge = iter.peek().iter().flat_map(|dd| dd.cached_block.into_iter()).next(); + // If there’s no `cached_block`s within current scope, we must look for one in the + // enclosing scope. + let on_diverge = on_diverge.or_else(||{ + earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next() + }); + let next = cfg.start_new_block(); + cfg.terminate(block, Terminator::Drop { + value: drop_data.value.clone(), + target: next, + unwind: on_diverge + }); + block = next; + } + block.unit() +} diff --git a/src/librustc_mir/build/stmt.rs b/src/librustc_mir/build/stmt.rs index 3a50acec94f..6c0f1c7081b 100644 --- a/src/librustc_mir/build/stmt.rs +++ b/src/librustc_mir/build/stmt.rs @@ -42,16 +42,16 @@ impl<'a,'tcx> Builder<'a,'tcx> { None => { let (extent, _) = stmt_lists.pop().unwrap(); if let Some(extent) = extent { - this.pop_scope(extent, block); + unpack!(block = this.pop_scope(extent, block)); } continue } }; - let Stmt { span, kind } = this.hir.mirror(stmt); + let Stmt { span: _, kind } = this.hir.mirror(stmt); match kind { StmtKind::Let { remainder_scope, init_scope, pattern, initializer, stmts } => { - this.push_scope(remainder_scope, block); + this.push_scope(remainder_scope); stmt_lists.push((Some(remainder_scope), stmts.into_iter())); unpack!(block = this.in_scope(init_scope, block, move |this| { // FIXME #30046 ^~~~ @@ -72,7 +72,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { let expr = this.hir.mirror(expr); let temp = this.temp(expr.ty.clone()); unpack!(block = this.into(&temp, block, expr)); - this.cfg.push_drop(block, span, &temp); + unpack!(block = this.build_drop(block, temp)); block.unit() })); } diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index 00162be816b..d0dbdeb033d 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -69,9 +69,6 @@ impl<'a, 'tcx> EraseRegions<'a, 'tcx> { self.erase_regions_lvalue(lvalue); self.erase_regions_rvalue(rvalue); } - StatementKind::Drop(ref mut lvalue) => { - self.erase_regions_lvalue(lvalue); - } } } @@ -93,6 +90,9 @@ impl<'a, 'tcx> EraseRegions<'a, 'tcx> { self.erase_regions_lvalue(discr); *switch_ty = self.tcx.erase_regions(switch_ty); }, + Terminator::Drop { ref mut value, .. } => { + self.erase_regions_lvalue(value); + } Terminator::Call { ref mut func, ref mut args, ref mut destination, .. } => { if let Some((ref mut destination, _)) = *destination { self.erase_regions_lvalue(destination); diff --git a/src/librustc_trans/trans/mir/block.rs b/src/librustc_trans/trans/mir/block.rs index c1ab55f9f53..bb43c5ae97e 100644 --- a/src/librustc_trans/trans/mir/block.rs +++ b/src/librustc_trans/trans/mir/block.rs @@ -18,10 +18,11 @@ use trans::base; use trans::build; use trans::common::{self, Block, LandingPad}; use trans::debuginfo::DebugLoc; +use trans::Disr; use trans::foreign; +use trans::glue; use trans::type_of; use trans::type_::Type; -use trans::Disr; use super::MirContext; use super::operand::OperandValue::{FatPtr, Immediate, Ref}; @@ -94,6 +95,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { base::build_return_block(bcx.fcx, bcx, return_ty, DebugLoc::None); } + mir::Terminator::Drop { ref value, target, unwind: _ } => { + let lvalue = self.trans_lvalue(bcx, value); + // FIXME: this does not account for possibility of unwinding (and totally should). + glue::drop_ty(bcx, lvalue.llval, lvalue.ty.to_ty(bcx.tcx()), DebugLoc::None); + build::Br(bcx, self.llblock(target), DebugLoc::None); + } + mir::Terminator::Call { ref func, ref args, ref destination, ref cleanup } => { // Create the callee. This will always be a fn ptr and hence a kind of scalar. let callee = self.trans_operand(bcx, func); diff --git a/src/librustc_trans/trans/mir/statement.rs b/src/librustc_trans/trans/mir/statement.rs index cd186de7ca0..fc888564737 100644 --- a/src/librustc_trans/trans/mir/statement.rs +++ b/src/librustc_trans/trans/mir/statement.rs @@ -10,8 +10,6 @@ use rustc::mir::repr as mir; use trans::common::Block; -use trans::debuginfo::DebugLoc; -use trans::glue; use super::MirContext; use super::TempRef; @@ -50,12 +48,6 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } } } - - mir::StatementKind::Drop(ref lvalue) => { - let tr_lvalue = self.trans_lvalue(bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); - glue::drop_ty(bcx, tr_lvalue.llval, ty, DebugLoc::None) - } } } } From ebf6341d1dfe57a7ad6f7f87501b018ffe24bba8 Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Sat, 30 Jan 2016 19:32:50 +0200 Subject: [PATCH 5/7] Translation part of drop panic recovery With this commit we now finally execute all the leftover drops once some drop panics for some reason! --- src/librustc_trans/trans/mir/block.rs | 33 ++++++++++++++++++++++--- src/librustc_trans/trans/mir/lvalue.rs | 2 +- src/librustc_trans/trans/mir/mod.rs | 8 +++--- src/test/run-fail/mir_drop_panics.rs | 34 ++++++++++++++++++++++++++ 4 files changed, 68 insertions(+), 9 deletions(-) create mode 100644 src/test/run-fail/mir_drop_panics.rs diff --git a/src/librustc_trans/trans/mir/block.rs b/src/librustc_trans/trans/mir/block.rs index bb43c5ae97e..5be585c4189 100644 --- a/src/librustc_trans/trans/mir/block.rs +++ b/src/librustc_trans/trans/mir/block.rs @@ -95,11 +95,36 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { base::build_return_block(bcx.fcx, bcx, return_ty, DebugLoc::None); } - mir::Terminator::Drop { ref value, target, unwind: _ } => { + mir::Terminator::Drop { ref value, target, unwind } => { let lvalue = self.trans_lvalue(bcx, value); - // FIXME: this does not account for possibility of unwinding (and totally should). - glue::drop_ty(bcx, lvalue.llval, lvalue.ty.to_ty(bcx.tcx()), DebugLoc::None); - build::Br(bcx, self.llblock(target), DebugLoc::None); + let ty = lvalue.ty.to_ty(bcx.tcx()); + // Double check for necessity to drop + if !glue::type_needs_drop(bcx.tcx(), ty) { + build::Br(bcx, self.llblock(target), DebugLoc::None); + return; + } + let drop_fn = glue::get_drop_glue(bcx.ccx(), ty); + let drop_ty = glue::get_drop_glue_type(bcx.ccx(), ty); + let llvalue = if drop_ty != ty { + build::PointerCast(bcx, lvalue.llval, + type_of::type_of(bcx.ccx(), drop_ty).ptr_to()) + } else { + lvalue.llval + }; + if let Some(unwind) = unwind { + let uwbcx = self.bcx(unwind); + let unwind = self.make_landing_pad(uwbcx); + build::Invoke(bcx, + drop_fn, + &[llvalue], + self.llblock(target), + unwind.llbb, + None, + DebugLoc::None); + } else { + build::Call(bcx, drop_fn, &[llvalue], None, DebugLoc::None); + build::Br(bcx, self.llblock(target), DebugLoc::None); + } } mir::Terminator::Call { ref func, ref args, ref destination, ref cleanup } => { diff --git a/src/librustc_trans/trans/mir/lvalue.rs b/src/librustc_trans/trans/mir/lvalue.rs index a6ba069742d..d994f1ea7b0 100644 --- a/src/librustc_trans/trans/mir/lvalue.rs +++ b/src/librustc_trans/trans/mir/lvalue.rs @@ -65,7 +65,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { assert!(lvalue.llextra != ptr::null_mut()); lvalue.llextra } - _ => bcx.sess().bug("unexpected type in get_base_and_len"), + _ => bcx.sess().bug("unexpected type in lvalue_len"), } } diff --git a/src/librustc_trans/trans/mir/mod.rs b/src/librustc_trans/trans/mir/mod.rs index cfe48af35ca..b19ecc45a4e 100644 --- a/src/librustc_trans/trans/mir/mod.rs +++ b/src/librustc_trans/trans/mir/mod.rs @@ -195,8 +195,8 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>, mod analyze; mod block; mod constant; -mod lvalue; -mod rvalue; -mod operand; -mod statement; mod did; +mod lvalue; +mod operand; +mod rvalue; +mod statement; diff --git a/src/test/run-fail/mir_drop_panics.rs b/src/test/run-fail/mir_drop_panics.rs new file mode 100644 index 00000000000..9868ff4c241 --- /dev/null +++ b/src/test/run-fail/mir_drop_panics.rs @@ -0,0 +1,34 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![feature(rustc_attrs)] +// error-pattern:panic 1 +// error-pattern:drop 2 +use std::io::{self, Write}; + +struct Droppable(u32); +impl Drop for Droppable { + fn drop(&mut self) { + if self.0 == 1 { + panic!("panic 1"); + } else { + write!(io::stderr(), "drop {}", self.0); + } + } +} + +#[rustc_mir] +fn mir() { + let x = Droppable(2); + let y = Droppable(1); +} + +fn main() { + mir(); +} From 5638847ae30773dfd36dbeb57c8293a854f5075a Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Tue, 2 Feb 2016 22:50:26 +0200 Subject: [PATCH 6/7] Address nits on build/scope.rs --- src/librustc_mir/build/scope.rs | 242 +++++++++++++++++++------------- 1 file changed, 141 insertions(+), 101 deletions(-) diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 33397b483be..27654127a6c 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -101,11 +101,18 @@ pub struct Scope<'tcx> { // A scope may only have one associated free, because: // 1. We require a `free` to only be scheduled in the scope of `EXPR` in `box EXPR`; // 2. It only makes sense to have it translated into the diverge-path. + // + // This kind of drop will be run *after* all the regular drops scheduled onto this scope, + // because drops may have dependencies on the allocated memory. + // + // This is expected to go away once `box EXPR` becomes a sugar for placement protocol and gets + // desugared in some earlier stage. free: Option>, } struct DropData<'tcx> { value: Lvalue<'tcx>, + // NB: per-drop “cache” is necessary for the build_scope_drops function below. /// The cached block for the cleanups-on-diverge path. This block contains code to run the /// current drop and all the preceding drops (i.e. those having lower index in Drop’s /// Scope drop array) @@ -137,17 +144,17 @@ pub struct LoopScope { } impl<'tcx> Scope<'tcx> { - /// Invalidate cached blocks in the scope. Should always be run for all inner scopes when a - /// drop is pushed into some scope enclosing a larger extent of code. - fn invalidate_cache(&mut self, only_free: bool) { + /// Invalidate all the cached blocks in the scope. + /// + /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a + /// larger extent of code. + fn invalidate_cache(&mut self) { + for dropdata in &mut self.drops { + dropdata.cached_block = None; + } if let Some(ref mut freedata) = self.free { freedata.cached_block = None; } - if !only_free { - for dropdata in &mut self.drops { - dropdata.cached_block = None; - } - } } /// Returns the cached block for this scope. @@ -155,10 +162,12 @@ impl<'tcx> Scope<'tcx> { /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for /// this method to work correctly. fn cached_block(&self) -> Option { - if let Some(ref free_data) = self.free { - free_data.cached_block + if let Some(data) = self.drops.last() { + Some(data.cached_block.expect("drop cache is not filled")) + } else if let Some(ref data) = self.free { + Some(data.cached_block.expect("free cache is not filled")) } else { - self.drops.last().iter().flat_map(|dd| dd.cached_block).next() + None } } } @@ -297,9 +306,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { } for scope in self.scopes.iter_mut().rev() { if scope.extent == extent { - // We only invalidate cached block of free here; all other drops’ cached blocks to - // not become invalid (this drop will branch into them). - scope.invalidate_cache(true); + // No need to invalidate any caches here. The just-scheduled drop will branch into + // the drop that comes before it in the vector. scope.drops.push(DropData { value: lvalue.clone(), cached_block: None @@ -307,8 +315,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { return; } else { // We must invalidate all the cached_blocks leading up to the scope we’re - // looking for, because all of the blocks in the chain become incorrect. - scope.invalidate_cache(false) + // looking for, because all of the blocks in the chain will become incorrect. + scope.invalidate_cache() } } self.hir.span_bug(span, @@ -328,6 +336,9 @@ impl<'a,'tcx> Builder<'a,'tcx> { for scope in self.scopes.iter_mut().rev() { if scope.extent == extent { assert!(scope.free.is_none(), "scope already has a scheduled free!"); + // We also must invalidate the caches in the scope for which the free is scheduled + // because the drops must branch into the free we schedule here. + scope.invalidate_cache(); scope.free = Some(FreeData { span: span, value: value.clone(), @@ -337,9 +348,9 @@ impl<'a,'tcx> Builder<'a,'tcx> { return; } else { // We must invalidate all the cached_blocks leading up to the scope we’re looking - // for, because otherwise some/most of the blocks in the chain might become + // for, because otherwise some/most of the blocks in the chain will become // incorrect. - scope.invalidate_cache(false); + scope.invalidate_cache(); } } self.hir.span_bug(span, @@ -357,95 +368,20 @@ impl<'a,'tcx> Builder<'a,'tcx> { if self.scopes.is_empty() { return None; } - - let unit_tmp = self.get_unit_temp(); - let Builder { ref mut scopes, ref mut cfg, ref mut hir, .. } = *self; - - let tcx = hir.tcx(); + let unit_temp = self.get_unit_temp(); + let Builder { ref mut hir, ref mut cfg, ref mut scopes, .. } = *self; let mut next_block = None; // Given an array of scopes, we generate these from the outermost scope to the innermost // one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will // generate B0 <- B1 <- B2 in left-to-right order. Control flow of the generated blocks // always ends up at a block with the Resume terminator. - // - // Similarly to the scopes, we translate drops so: - // * Scheduled free drop is executed first; - // * Drops are executed after the free drop in the decreasing order (decreasing as - // from higher index in drops array to lower index); - // - // NB: We do the building backwards here. We will first produce a block containing the - // Resume terminator (which is executed last for any given chain of cleanups) and then go - // on building the drops from the outermost one to the innermost one. Similar note applies - // to the drops within the scope too. - { - let iter = scopes.iter_mut().filter(|s| !s.drops.is_empty() || s.free.is_some()); - for scope in iter { - // Try using the cached free drop if any… - if let Some(FreeData { cached_block: Some(cached_block), .. }) = scope.free { - next_block = Some(cached_block); - continue; - } - // otherwise look for the cached regular drop (fast path)… - if let Some(&DropData { cached_block: Some(cached_block), .. }) = scope.drops.last() { - next_block = Some(cached_block); - continue; - } - // otherwise build the blocks… - for drop_data in scope.drops.iter_mut() { - // skipping them if they’re already built… - if let Some(cached_block) = drop_data.cached_block { - next_block = Some(cached_block); - continue; - } - let block = cfg.start_new_cleanup_block(); - let target = next_block.unwrap_or_else(|| { - let b = cfg.start_new_cleanup_block(); - cfg.terminate(b, Terminator::Resume); - b - }); - cfg.terminate(block, Terminator::Drop { - value: drop_data.value.clone(), - target: target, - unwind: None - }); - drop_data.cached_block = Some(block); - next_block = Some(block); - } - - if let Some(ref mut free_data) = scope.free { - // The free was not cached yet. It must be translated the last and will be executed - // first. - let free_func = tcx.lang_items.box_free_fn() - .expect("box_free language item is missing"); - let substs = tcx.mk_substs(Substs::new( - VecPerParamSpace::new(vec![], vec![], vec![free_data.item_ty]), - VecPerParamSpace::new(vec![], vec![], vec![]) - )); - let block = cfg.start_new_cleanup_block(); - let target = next_block.unwrap_or_else(|| { - let b = cfg.start_new_cleanup_block(); - cfg.terminate(b, Terminator::Resume); - b - }); - cfg.terminate(block, Terminator::Call { - func: Operand::Constant(Constant { - span: free_data.span, - ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs), - literal: Literal::Item { - def_id: free_func, - kind: ItemKind::Function, - substs: substs - } - }), - args: vec![Operand::Consume(free_data.value.clone())], - destination: Some((unit_tmp.clone(), target)), - cleanup: None - }); - free_data.cached_block = Some(block); - next_block = Some(block); - } - } + for scope in scopes.iter_mut().filter(|s| !s.drops.is_empty() || s.free.is_some()) { + next_block = Some(build_diverge_scope(hir.tcx(), + cfg, + unit_temp.clone(), + scope, + next_block)); } scopes.iter().rev().flat_map(|x| x.cached_block()).next() } @@ -462,6 +398,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { next_target.unit() } + // Panicking // ========= // FIXME: should be moved into their own module @@ -596,3 +533,106 @@ fn build_scope_drops<'tcx>(mut block: BasicBlock, } block.unit() } + +fn build_diverge_scope<'tcx>(tcx: &ty::ctxt<'tcx>, + cfg: &mut CFG<'tcx>, + unit_temp: Lvalue<'tcx>, + scope: &mut Scope<'tcx>, + target: Option) + -> BasicBlock { + debug_assert!(!scope.drops.is_empty() || scope.free.is_some()); + + // First, we build the drops, iterating the drops array in reverse. We do that so that as soon + // as we find a `cached_block`, we know that we’re finished and don’t need to do anything else. + let mut previous = None; + let mut last_drop_block = None; + for drop_data in scope.drops.iter_mut().rev() { + if let Some(cached_block) = drop_data.cached_block { + if let Some((previous_block, previous_value)) = previous { + cfg.terminate(previous_block, Terminator::Drop { + value: previous_value, + target: cached_block, + unwind: None + }); + return last_drop_block.unwrap(); + } else { + return cached_block; + } + } else { + let block = cfg.start_new_cleanup_block(); + drop_data.cached_block = Some(block); + if let Some((previous_block, previous_value)) = previous { + cfg.terminate(previous_block, Terminator::Drop { + value: previous_value, + target: block, + unwind: None + }); + } else { + last_drop_block = Some(block); + } + previous = Some((block, drop_data.value.clone())); + } + } + + // Prepare the end target for this chain. + let mut target = target.unwrap_or_else(||{ + let b = cfg.start_new_cleanup_block(); + cfg.terminate(b, Terminator::Resume); + b + }); + + // Then, build the free branching into the prepared target. + if let Some(ref mut free_data) = scope.free { + target = if let Some(cached_block) = free_data.cached_block { + cached_block + } else { + let t = build_free(tcx, cfg, unit_temp, free_data, target); + free_data.cached_block = Some(t); + t + } + }; + + if let Some((previous_block, previous_value)) = previous { + // Finally, branch into that just-built `target` from the `previous_block`. + cfg.terminate(previous_block, Terminator::Drop { + value: previous_value, + target: target, + unwind: None + }); + last_drop_block.unwrap() + } else { + // If `previous.is_none()`, there were no drops in this scope – we return the + // target. + target + } +} + +fn build_free<'tcx>(tcx: &ty::ctxt<'tcx>, + cfg: &mut CFG<'tcx>, + unit_temp: Lvalue<'tcx>, + data: &FreeData<'tcx>, + target: BasicBlock) + -> BasicBlock { + let free_func = tcx.lang_items.box_free_fn() + .expect("box_free language item is missing"); + let substs = tcx.mk_substs(Substs::new( + VecPerParamSpace::new(vec![], vec![], vec![data.item_ty]), + VecPerParamSpace::new(vec![], vec![], vec![]) + )); + let block = cfg.start_new_cleanup_block(); + cfg.terminate(block, Terminator::Call { + func: Operand::Constant(Constant { + span: data.span, + ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs), + literal: Literal::Item { + def_id: free_func, + kind: ItemKind::Function, + substs: substs + } + }), + args: vec![Operand::Consume(data.value.clone())], + destination: Some((unit_temp, target)), + cleanup: None + }); + block +} From caf62ef9846edfd41177b883181c5c045ca69859 Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Sat, 6 Feb 2016 02:31:19 +0200 Subject: [PATCH 7/7] Ignore a test on MSVC The MSVC SEH is still not implemented, so we go ahead and ignore it. --- src/test/run-fail/mir_drop_panics.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/test/run-fail/mir_drop_panics.rs b/src/test/run-fail/mir_drop_panics.rs index 9868ff4c241..df20700e016 100644 --- a/src/test/run-fail/mir_drop_panics.rs +++ b/src/test/run-fail/mir_drop_panics.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(rustc_attrs)] + +// ignore-msvc: FIXME(#30941) // error-pattern:panic 1 // error-pattern:drop 2 use std::io::{self, Write};