From 9da2aaccfe7dd3452dd066bbc3829af6bd76ace4 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Thu, 11 May 2017 01:02:52 +0300 Subject: [PATCH 01/11] translate array drop glue using MIR This fixes leakage on panic with arrays & slices. I am using a C-style for-loop instead of a pointer-based loop because that would be ugly-er to implement. --- src/librustc/ty/util.rs | 9 ++ src/librustc_mir/util/elaborate_drops.rs | 135 ++++++++++++++++++++++- src/librustc_trans/collector.rs | 12 +- src/librustc_trans/mir/block.rs | 31 +----- src/test/run-pass/dynamic-drop.rs | 11 ++ 5 files changed, 152 insertions(+), 46 deletions(-) diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index 01fed11fc97..c36a77736ab 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -584,6 +584,15 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id) }) } + + pub fn const_usize(&self, val: usize) -> ConstInt { + match self.sess.target.uint_type { + ast::UintTy::U16 => ConstInt::Usize(ConstUsize::Us16(val as u16)), + ast::UintTy::U32 => ConstInt::Usize(ConstUsize::Us32(val as u32)), + ast::UintTy::U64 => ConstInt::Usize(ConstUsize::Us64(val as u64)), + _ => bug!(), + } + } } pub struct TypeIdHasher<'a, 'gcx: 'a+'tcx, 'tcx: 'a, W> { diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index 585840ce1e5..4569569c820 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -11,7 +11,7 @@ use std::fmt; use rustc::hir; use rustc::mir::*; -use rustc::middle::const_val::ConstInt; +use rustc::middle::const_val::{ConstInt, ConstVal}; use rustc::middle::lang_items; use rustc::ty::{self, Ty}; use rustc::ty::subst::{Kind, Substs}; @@ -535,6 +535,114 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> }) } + /// create a loop that drops an array: + /// + /// loop-block: + /// can_go = index < len + /// if can_go then drop-block else succ + /// drop-block: + /// ptr = &mut LV[len] + /// index = index + 1 + /// drop(ptr) + fn drop_loop(&mut self, + unwind: Option, + succ: BasicBlock, + index: &Lvalue<'tcx>, + length: &Lvalue<'tcx>, + ety: Ty<'tcx>, + is_cleanup: bool) + -> BasicBlock + { + let use_ = |lv: &Lvalue<'tcx>| Operand::Consume(lv.clone()); + let tcx = self.tcx(); + + let ref_ty = tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut { + ty: ety, + mutbl: hir::Mutability::MutMutable + }); + let ptr = &Lvalue::Local(self.new_temp(ref_ty)); + let can_go = &Lvalue::Local(self.new_temp(tcx.types.bool)); + + let one = self.constant_usize(1); + let drop_block = self.elaborator.patch().new_block(BasicBlockData { + statements: vec![ + Statement { source_info: self.source_info, kind: StatementKind::Assign( + ptr.clone(), Rvalue::Ref( + tcx.types.re_erased, BorrowKind::Mut, + self.lvalue.clone().index(use_(index)) + ), + )}, + Statement { source_info: self.source_info, kind: StatementKind::Assign( + index.clone(), Rvalue::BinaryOp(BinOp::Add, use_(index), one) + )}, + ], + is_cleanup, + terminator: Some(Terminator { + source_info: self.source_info, + kind: TerminatorKind::Resume, + }) + }); + + let loop_block = self.elaborator.patch().new_block(BasicBlockData { + statements: vec![ + Statement { source_info: self.source_info, kind: StatementKind::Assign( + can_go.clone(), Rvalue::BinaryOp(BinOp::Lt, use_(index), use_(length)) + )}, + ], + is_cleanup, + terminator: Some(Terminator { + source_info: self.source_info, + kind: TerminatorKind::if_(tcx, use_(can_go), drop_block, succ) + }) + }); + + self.elaborator.patch().patch_terminator(drop_block, TerminatorKind::Drop { + location: ptr.clone().deref(), + target: loop_block, + unwind: unwind + }); + + loop_block + } + + fn open_drop_for_array(&mut self, ety: Ty<'tcx>) -> BasicBlock { + debug!("open_drop_for_array({:?})", ety); + // FIXME: using an index instead of a pointer to avoid + // special-casing ZSTs. + let tcx = self.tcx(); + let index = &Lvalue::Local(self.new_temp(tcx.types.usize)); + let length = &Lvalue::Local(self.new_temp(tcx.types.usize)); + + let unwind = self.unwind.map(|unwind| { + self.drop_loop(None, unwind, index, length, ety, true) + }); + + let is_cleanup = self.is_cleanup; + let succ = self.succ; // FIXME(#6393) + let loop_block = self.drop_loop(unwind, succ, index, length, ety, is_cleanup); + + let zero = self.constant_usize(0); + let drop_block = self.elaborator.patch().new_block(BasicBlockData { + statements: vec![ + Statement { source_info: self.source_info, kind: StatementKind::Assign( + length.clone(), Rvalue::Len(self.lvalue.clone()) + )}, + Statement { source_info: self.source_info, kind: StatementKind::Assign( + index.clone(), Rvalue::Use(zero), + )}, + ], + is_cleanup, + terminator: Some(Terminator { + source_info: self.source_info, + kind: TerminatorKind::Goto { target: loop_block } + }) + }); + + // FIXME(#34708): handle partially-dropped array/slice elements. + self.drop_flag_test_and_reset_block( + is_cleanup, Some(DropFlagMode::Deep), drop_block, succ) + } + /// The slow-path - create an "open", elaborated drop for a type /// which is moved-out-of only partially, and patch `bb` to a jump /// to it. This must not be called on ADTs with a destructor, @@ -564,10 +672,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> ty::TyDynamic(..) => { self.complete_drop(is_cleanup, Some(DropFlagMode::Deep), succ) } - ty::TyArray(..) | ty::TySlice(..) => { - // FIXME(#34708): handle partially-dropped - // array/slice elements. - self.complete_drop(is_cleanup, Some(DropFlagMode::Deep), succ) + ty::TyArray(ety, _) | ty::TySlice(ety) => { + self.open_drop_for_array(ety) } _ => bug!("open drop from non-ADT `{:?}`", ty) } @@ -588,6 +694,17 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> debug!("complete_drop({:?},{:?})", self, drop_mode); let drop_block = self.drop_block(is_cleanup, succ); + self.drop_flag_test_and_reset_block(is_cleanup, drop_mode, drop_block, succ) + } + + fn drop_flag_test_and_reset_block(&mut self, + is_cleanup: bool, + drop_mode: Option, + drop_block: BasicBlock, + succ: BasicBlock) -> BasicBlock + { + debug!("drop_flag_test_and_reset_block({:?},{:?})", self, drop_mode); + if let Some(mode) = drop_mode { let block_start = Location { block: drop_block, statement_index: 0 }; self.elaborator.clear_drop_flag(block_start, self.path, mode); @@ -691,4 +808,12 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let mir = self.elaborator.mir(); self.elaborator.patch().terminator_loc(mir, bb) } + + fn constant_usize(&self, val: usize) -> Operand<'tcx> { + Operand::Constant(box Constant { + span: self.source_info.span, + ty: self.tcx().types.usize, + literal: Literal::Value { value: ConstVal::Integral(self.tcx().const_usize(val)) } + }) + } } diff --git a/src/librustc_trans/collector.rs b/src/librustc_trans/collector.rs index 5f8b79a994a..429e7b01610 100644 --- a/src/librustc_trans/collector.rs +++ b/src/librustc_trans/collector.rs @@ -612,17 +612,7 @@ fn visit_instance_use<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, output.push(create_fn_trans_item(instance)); } } - ty::InstanceDef::DropGlue(_, Some(ty)) => { - match ty.sty { - ty::TyArray(ety, _) | - ty::TySlice(ety) - if is_direct_call => - { - // drop of arrays/slices is translated in-line. - visit_drop_use(scx, ety, false, output); - } - _ => {} - }; + ty::InstanceDef::DropGlue(_, Some(_)) => { output.push(create_fn_trans_item(instance)); } ty::InstanceDef::ClosureOnceShim { .. } | diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index a3fa1279ffb..724ff2f2134 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -20,13 +20,12 @@ use base::{self, Lifetime}; use callee; use builder::Builder; use common::{self, Funclet}; -use common::{C_bool, C_str_slice, C_struct, C_u32, C_uint, C_undef}; +use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; use machine::llalign_of_min; use meth; use monomorphize; use type_of; -use tvec; use type_::Type; use rustc_data_structures::indexed_vec::IndexVec; @@ -222,34 +221,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (drop_fn, need_extra) = match ty.sty { ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra), false), - ty::TyArray(ety, _) | ty::TySlice(ety) => { - // FIXME: handle panics - let drop_fn = monomorphize::resolve_drop_in_place( - bcx.ccx.shared(), ety); - let drop_fn = callee::get_fn(bcx.ccx, drop_fn); - let bcx = tvec::slice_for_each( - &bcx, - lvalue.project_index(&bcx, C_uint(bcx.ccx, 0u64)), - ety, - lvalue.len(bcx.ccx), - |bcx, llval, loop_bb| { - self.set_debug_loc(&bcx, terminator.source_info); - if let Some(unwind) = unwind { - bcx.invoke( - drop_fn, - &[llval], - loop_bb, - llblock(self, unwind), - cleanup_bundle - ); - } else { - bcx.call(drop_fn, &[llval], cleanup_bundle); - bcx.br(loop_bb); - } - }); - funclet_br(self, bcx, target); - return - } _ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra()) }; let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize]; diff --git a/src/test/run-pass/dynamic-drop.rs b/src/test/run-pass/dynamic-drop.rs index a2cca206409..26e5fe987ee 100644 --- a/src/test/run-pass/dynamic-drop.rs +++ b/src/test/run-pass/dynamic-drop.rs @@ -125,6 +125,14 @@ fn union1(a: &Allocator) { } } +fn array_simple(a: &Allocator) { + let _x = [a.alloc(), a.alloc(), a.alloc(), a.alloc()]; +} + +fn vec_simple(a: &Allocator) { + let _x = vec![a.alloc(), a.alloc(), a.alloc(), a.alloc()]; +} + fn run_test(mut f: F) where F: FnMut(&Allocator) { @@ -171,5 +179,8 @@ fn main() { run_test(|a| assignment1(a, false)); run_test(|a| assignment1(a, true)); + run_test(|a| array_simple(a)); + run_test(|a| vec_simple(a)); + run_test_nopanic(|a| union1(a)); } From 24c1a07c729911997397b47c7fbc3fa0a657a3aa Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Thu, 11 May 2017 02:01:25 +0300 Subject: [PATCH 02/11] refactor trans::mir::block to trans all calls through the same code --- src/librustc_trans/abi.rs | 12 ++- src/librustc_trans/mir/block.rs | 139 +++++++++++++++++--------------- 2 files changed, 84 insertions(+), 67 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 9b94a3b2f23..120f201a9c8 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef, AttributePlace}; use base; use builder::Builder; -use common::{type_is_fat_ptr, C_uint}; +use common::{instance_ty, ty_fn_sig, type_is_fat_ptr, C_uint}; use context::CrateContext; use cabi_x86; use cabi_x86_64; @@ -610,6 +610,14 @@ pub struct FnType<'tcx> { } impl<'a, 'tcx> FnType<'tcx> { + pub fn of_instance(ccx: &CrateContext<'a, 'tcx>, instance: &ty::Instance<'tcx>) + -> Self { + let fn_ty = instance_ty(ccx.shared(), &instance); + let sig = ty_fn_sig(ccx, fn_ty); + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); + Self::new(ccx, sig, &[]) + } + pub fn new(ccx: &CrateContext<'a, 'tcx>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx> { @@ -631,6 +639,8 @@ impl<'a, 'tcx> FnType<'tcx> { pub fn unadjusted(ccx: &CrateContext<'a, 'tcx>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx> { + debug!("FnType::unadjusted({:?}, {:?})", sig, extra_args); + use self::Abi::*; let cconv = match ccx.sess().target.target.adjust_abi(sig.abi) { RustIntrinsic | PlatformIntrinsic | diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 724ff2f2134..f6c8ee0c825 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -53,7 +53,23 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { _ => funclets[bb].as_ref(), }; + for statement in &data.statements { + bcx = self.trans_statement(bcx, statement); + } + + self.trans_terminator(bcx, bb, data.terminator(), funclet); + } + + fn trans_terminator(&mut self, + mut bcx: Builder<'a, 'tcx>, + bb: mir::BasicBlock, + terminator: &mir::Terminator<'tcx>, + funclet: Option<&Funclet>) + { + debug!("trans_terminator: {:?}", terminator); + // Create the cleanup bundle, if needed. + let tcx = bcx.tcx(); let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); let cleanup_bundle = funclet.map(|l| l.bundle()); @@ -104,12 +120,53 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } }; - for statement in &data.statements { - bcx = self.trans_statement(bcx, statement); - } + let do_call = | + this: &mut Self, + bcx: Builder<'a, 'tcx>, + fn_ty: FnType<'tcx>, + fn_ptr: ValueRef, + llargs: &[ValueRef], + destination: Option<(ReturnDest, ty::Ty<'tcx>, mir::BasicBlock)>, + cleanup: Option + | { + if let Some(cleanup) = cleanup { + let ret_bcx = if let Some((_, _, target)) = destination { + this.blocks[target] + } else { + this.unreachable_block() + }; + let invokeret = bcx.invoke(fn_ptr, + &llargs, + ret_bcx, + llblock(this, cleanup), + cleanup_bundle); + fn_ty.apply_attrs_callsite(invokeret); - let terminator = data.terminator(); - debug!("trans_block: terminator: {:?}", terminator); + if let Some((ret_dest, ret_ty, target)) = destination { + let ret_bcx = this.get_builder(target); + this.set_debug_loc(&ret_bcx, terminator.source_info); + let op = OperandRef { + val: Immediate(invokeret), + ty: ret_ty, + }; + this.store_return(&ret_bcx, ret_dest, &fn_ty.ret, op); + } + } else { + let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); + fn_ty.apply_attrs_callsite(llret); + + if let Some((ret_dest, ret_ty, target)) = destination { + let op = OperandRef { + val: Immediate(llret), + ty: ret_ty, + }; + this.store_return(&bcx, ret_dest, &fn_ty.ret, op); + funclet_br(this, bcx, target); + } else { + bcx.unreachable(); + } + } + }; let span = terminator.source_info.span; self.set_debug_loc(&bcx, terminator.source_info); @@ -218,24 +275,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } let lvalue = self.trans_lvalue(&bcx, location); + let fn_ty = FnType::of_instance(bcx.ccx, &drop_fn); let (drop_fn, need_extra) = match ty.sty { ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra), false), _ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra()) }; let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize]; - if let Some(unwind) = unwind { - bcx.invoke( - drop_fn, - args, - self.blocks[target], - llblock(self, unwind), - cleanup_bundle - ); - } else { - bcx.call(drop_fn, args, cleanup_bundle); - funclet_br(self, bcx, target); - } + do_call(self, bcx, fn_ty, drop_fn, args, + Some((ReturnDest::Nothing, tcx.mk_nil(), target)), + unwind); } mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { @@ -342,26 +391,18 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Obtain the panic entry point. let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item); let instance = ty::Instance::mono(bcx.tcx(), def_id); + let fn_ty = FnType::of_instance(bcx.ccx, &instance); let llfn = callee::get_fn(bcx.ccx, instance); // Translate the actual panic invoke/call. - if let Some(unwind) = cleanup { - bcx.invoke(llfn, - &args, - self.unreachable_block(), - llblock(self, unwind), - cleanup_bundle); - } else { - bcx.call(llfn, &args, cleanup_bundle); - bcx.unreachable(); - } + do_call(self, bcx, fn_ty, llfn, &args, None, cleanup); } mir::TerminatorKind::DropAndReplace { .. } => { - bug!("undesugared DropAndReplace in trans: {:?}", data); + bug!("undesugared DropAndReplace in trans: {:?}", terminator); } - mir::TerminatorKind::Call { ref func, ref args, ref destination, ref cleanup } => { + mir::TerminatorKind::Call { ref func, ref args, ref destination, cleanup } => { // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. let callee = self.trans_operand(&bcx, func); @@ -514,43 +555,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { _ => span_bug!(span, "no llfn for call"), }; - // Many different ways to call a function handled here - if let &Some(cleanup) = cleanup { - let ret_bcx = if let Some((_, target)) = *destination { - self.blocks[target] - } else { - self.unreachable_block() - }; - let invokeret = bcx.invoke(fn_ptr, - &llargs, - ret_bcx, - llblock(self, cleanup), - cleanup_bundle); - fn_ty.apply_attrs_callsite(invokeret); - - if let Some((_, target)) = *destination { - let ret_bcx = self.get_builder(target); - self.set_debug_loc(&ret_bcx, terminator.source_info); - let op = OperandRef { - val: Immediate(invokeret), - ty: sig.output(), - }; - self.store_return(&ret_bcx, ret_dest, &fn_ty.ret, op); - } - } else { - let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); - fn_ty.apply_attrs_callsite(llret); - if let Some((_, target)) = *destination { - let op = OperandRef { - val: Immediate(llret), - ty: sig.output(), - }; - self.store_return(&bcx, ret_dest, &fn_ty.ret, op); - funclet_br(self, bcx, target); - } else { - bcx.unreachable(); - } - } + do_call(self, bcx, fn_ty, fn_ptr, &llargs, + destination.as_ref().map(|&(_, target)| (ret_dest, sig.output(), target)), + cleanup); } } } From c6d0b5bdd81a2b87351afb587e02c557e9e6355e Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Fri, 12 May 2017 15:00:36 +0300 Subject: [PATCH 03/11] address review comments --- src/librustc/ty/util.rs | 2 +- .../borrowck/mir/elaborate_drops.rs | 13 +- src/librustc_mir/shim.rs | 3 +- src/librustc_mir/util/elaborate_drops.rs | 247 +++++++++--------- 4 files changed, 138 insertions(+), 127 deletions(-) diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index c36a77736ab..d69494206c5 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -585,7 +585,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }) } - pub fn const_usize(&self, val: usize) -> ConstInt { + pub fn const_usize(&self, val: u16) -> ConstInt { match self.sess.target.uint_type { ast::UintTy::U16 => ConstInt::Usize(ConstUsize::Us16(val as u16)), ast::UintTy::U32 => ConstInt::Usize(ConstUsize::Us32(val as u32)), diff --git a/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs index e0d86ff23f8..b03d34819f6 100644 --- a/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs +++ b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs @@ -22,7 +22,7 @@ use rustc::util::nodemap::FxHashMap; use rustc_data_structures::indexed_set::IdxSetBuf; use rustc_data_structures::indexed_vec::Idx; use rustc_mir::util::patch::MirPatch; -use rustc_mir::util::elaborate_drops::{DropFlagState, elaborate_drop}; +use rustc_mir::util::elaborate_drops::{DropFlagState, Unwind, elaborate_drop}; use rustc_mir::util::elaborate_drops::{DropElaborator, DropStyle, DropFlagMode}; use syntax::ast; use syntax_pos::Span; @@ -399,14 +399,13 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { ctxt: self }, terminator.source_info, - data.is_cleanup, location, path, target, if data.is_cleanup { - None + Unwind::InCleanup } else { - Some(Option::unwrap_or(unwind, resume_block)) + Unwind::To(Option::unwrap_or(unwind, resume_block)) }, bb) } @@ -455,6 +454,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { let bb = loc.block; let data = &self.mir[bb]; let terminator = data.terminator(); + assert!(!data.is_cleanup, "DropAndReplace in unwind path not supported"); let assign = Statement { kind: StatementKind::Assign(location.clone(), Rvalue::Use(value.clone())), @@ -477,7 +477,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { kind: TerminatorKind::Goto { target: target }, ..*terminator }), - is_cleanup: data.is_cleanup, + is_cleanup: false, }); match self.move_data().rev_lookup.find(location) { @@ -491,11 +491,10 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { ctxt: self }, terminator.source_info, - data.is_cleanup, location, path, target, - Some(unwind), + Unwind::To(unwind), bb); on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| { self.set_drop_flag(Location { block: target, statement_index: 0 }, diff --git a/src/librustc_mir/shim.rs b/src/librustc_mir/shim.rs index 428685d7f50..54779cbe301 100644 --- a/src/librustc_mir/shim.rs +++ b/src/librustc_mir/shim.rs @@ -198,11 +198,10 @@ fn build_drop_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, elaborate_drops::elaborate_drop( &mut elaborator, source_info, - false, &dropee, (), return_block, - Some(resume_block), + elaborate_drops::Unwind::To(resume_block), START_BLOCK ); elaborator.patch diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index 4569569c820..afec04dafe3 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -50,6 +50,35 @@ pub enum DropFlagMode { Deep } +#[derive(Copy, Clone, Debug)] +pub enum Unwind { + To(BasicBlock), + InCleanup +} + +impl Unwind { + fn is_cleanup(self) -> bool { + match self { + Unwind::To(..) => false, + Unwind::InCleanup => true + } + } + + fn into_option(self) -> Option { + match self { + Unwind::To(bb) => Some(bb), + Unwind::InCleanup => None, + } + } + + fn map(self, f: F) -> Self where F: FnOnce(BasicBlock) -> BasicBlock { + match self { + Unwind::To(bb) => Unwind::To(f(bb)), + Unwind::InCleanup => Unwind::InCleanup + } + } +} + pub trait DropElaborator<'a, 'tcx: 'a> : fmt::Debug { type Path : Copy + fmt::Debug; @@ -75,28 +104,25 @@ struct DropCtxt<'l, 'b: 'l, 'tcx: 'b, D> elaborator: &'l mut D, source_info: SourceInfo, - is_cleanup: bool, lvalue: &'l Lvalue<'tcx>, path: D::Path, succ: BasicBlock, - unwind: Option, + unwind: Unwind, } pub fn elaborate_drop<'b, 'tcx, D>( elaborator: &mut D, source_info: SourceInfo, - is_cleanup: bool, lvalue: &Lvalue<'tcx>, path: D::Path, succ: BasicBlock, - unwind: Option, + unwind: Unwind, bb: BasicBlock) where D: DropElaborator<'b, 'tcx> { - assert_eq!(unwind.is_none(), is_cleanup); DropCtxt { - elaborator, source_info, is_cleanup, lvalue, path, succ, unwind + elaborator, source_info, lvalue, path, succ, unwind }.elaborate_drop(bb) } @@ -145,14 +171,13 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.elaborator.patch().patch_terminator(bb, TerminatorKind::Drop { location: self.lvalue.clone(), target: self.succ, - unwind: self.unwind + unwind: self.unwind.into_option(), }); } DropStyle::Conditional => { - let is_cleanup = self.is_cleanup; // FIXME(#6393) + let unwind = self.unwind; // FIXME(#6393) let succ = self.succ; - let drop_bb = self.complete_drop( - is_cleanup, Some(DropFlagMode::Deep), succ); + let drop_bb = self.complete_drop(Some(DropFlagMode::Deep), succ, unwind); self.elaborator.patch().patch_terminator(bb, TerminatorKind::Goto { target: drop_bb }); @@ -189,11 +214,10 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } fn drop_subpath(&mut self, - is_cleanup: bool, lvalue: &Lvalue<'tcx>, path: Option, succ: BasicBlock, - unwind: Option) + unwind: Unwind) -> BasicBlock { if let Some(path) = path { @@ -202,7 +226,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> DropCtxt { elaborator: self.elaborator, source_info: self.source_info, - path, lvalue, succ, unwind, is_cleanup + path, lvalue, succ, unwind, }.elaborated_drop_block() } else { debug!("drop_subpath: for rest field {:?}", lvalue); @@ -210,11 +234,11 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> DropCtxt { elaborator: self.elaborator, source_info: self.source_info, - lvalue, succ, unwind, is_cleanup, + lvalue, succ, unwind, // Using `self.path` here to condition the drop on // our own drop flag. path: self.path - }.complete_drop(is_cleanup, None, succ) + }.complete_drop(None, succ, unwind) } } @@ -222,24 +246,15 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// the list of steps in it in reverse order. /// /// `unwind_ladder` is such a list of steps in reverse order, - /// which is called instead of the next step if the drop unwinds - /// (the first field is never reached). If it is `None`, all - /// unwind targets are left blank. - fn drop_halfladder<'a>(&mut self, - unwind_ladder: Option<&[BasicBlock]>, - succ: BasicBlock, - fields: &[(Lvalue<'tcx>, Option)], - is_cleanup: bool) - -> Vec + /// which is called if the matching step of the drop glue panics. + fn drop_halfladder(&mut self, + unwind_ladder: &[Unwind], + succ: BasicBlock, + fields: &[(Lvalue<'tcx>, Option)]) + -> Vec { - let mut unwind_succ = if is_cleanup { - None - } else { - self.unwind - }; - let goto = TerminatorKind::Goto { target: succ }; - let mut succ = self.new_block(is_cleanup, goto); + let mut succ = self.new_block(unwind_ladder[0], goto); // Always clear the "master" drop flag at the bottom of the // ladder. This is needed because the "master" drop flag @@ -248,9 +263,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let succ_loc = Location { block: succ, statement_index: 0 }; self.elaborator.clear_drop_flag(succ_loc, self.path, DropFlagMode::Shallow); - fields.iter().rev().enumerate().map(|(i, &(ref lv, path))| { - succ = self.drop_subpath(is_cleanup, lv, path, succ, unwind_succ); - unwind_succ = unwind_ladder.as_ref().map(|p| p[i]); + fields.iter().rev().zip(unwind_ladder).map(|(&(ref lv, path), &unwind_succ)| { + succ = self.drop_subpath(lv, path, succ, unwind_succ); succ }).collect() } @@ -271,7 +285,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// ELAB(drop location.2 [target=`self.unwind`]) fn drop_ladder<'a>(&mut self, fields: Vec<(Lvalue<'tcx>, Option)>) - -> (BasicBlock, Option) + -> (BasicBlock, Unwind) { debug!("drop_ladder({:?}, {:?})", self, fields); @@ -282,21 +296,21 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> debug!("drop_ladder - fields needing drop: {:?}", fields); - let unwind_ladder = if self.is_cleanup { - None + let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1]; + let unwind_ladder: Vec<_> = if let Unwind::To(target) = self.unwind { + let halfladder = self.drop_halfladder(&unwind_ladder, target, &fields); + Some(self.unwind).into_iter().chain(halfladder.into_iter().map(Unwind::To)) + .collect() } else { - let unwind = self.unwind.unwrap(); // FIXME(#6393) - Some(self.drop_halfladder(None, unwind, &fields, true)) + unwind_ladder }; let succ = self.succ; // FIXME(#6393) - let is_cleanup = self.is_cleanup; let normal_ladder = - self.drop_halfladder(unwind_ladder.as_ref().map(|x| &**x), - succ, &fields, is_cleanup); + self.drop_halfladder(&unwind_ladder, succ, &fields); (normal_ladder.last().cloned().unwrap_or(succ), - unwind_ladder.and_then(|l| l.last().cloned()).or(self.unwind)) + unwind_ladder.last().cloned().unwrap_or(self.unwind)) } fn open_drop_for_tuple<'a>(&mut self, tys: &[Ty<'tcx>]) @@ -320,13 +334,13 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let interior_path = self.elaborator.deref_subpath(self.path); let succ = self.succ; // FIXME(#6393) - let is_cleanup = self.is_cleanup; - let succ = self.box_free_block(ty, succ, is_cleanup); - let unwind_succ = self.unwind.map(|u| { - self.box_free_block(ty, u, true) + let unwind = self.unwind; + let succ = self.box_free_block(ty, succ, unwind); + let unwind_succ = self.unwind.map(|unwind| { + self.box_free_block(ty, unwind, Unwind::InCleanup) }); - self.drop_subpath(is_cleanup, &interior, interior_path, succ, unwind_succ) + self.drop_subpath(&interior, interior_path, succ, unwind_succ) } fn open_drop_for_adt<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>) @@ -339,7 +353,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> source_info: self.source_info, kind: TerminatorKind::Unreachable }), - is_cleanup: self.is_cleanup + is_cleanup: self.unwind.is_cleanup() }); } @@ -358,7 +372,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn open_drop_for_adt_contents<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>) - -> (BasicBlock, Option) { + -> (BasicBlock, Unwind) { match adt.variants.len() { 1 => { let fields = self.move_paths_for_fields( @@ -370,13 +384,12 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.drop_ladder(fields) } _ => { - let is_cleanup = self.is_cleanup; let succ = self.succ; let unwind = self.unwind; // FIXME(#6393) let mut values = Vec::with_capacity(adt.variants.len()); let mut normal_blocks = Vec::with_capacity(adt.variants.len()); - let mut unwind_blocks = if is_cleanup { + let mut unwind_blocks = if unwind.is_cleanup() { None } else { Some(Vec::with_capacity(adt.variants.len())) @@ -396,7 +409,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> &adt.variants[variant_index], substs); values.push(discr); - if let Some(ref mut unwind_blocks) = unwind_blocks { + if let Unwind::To(unwind) = unwind { // We can't use the half-ladder from the original // drop ladder, because this breaks the // "funclet can't have 2 successor funclets" @@ -415,12 +428,11 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> // I want to minimize the divergence between MSVC // and non-MSVC. - let unwind = unwind.unwrap(); - let halfladder = self.drop_halfladder( - None, unwind, &fields, true); - unwind_blocks.push( - halfladder.last().cloned().unwrap_or(unwind) - ); + let unwind_blocks = unwind_blocks.as_mut().unwrap(); + let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1]; + let halfladder = + self.drop_halfladder(&unwind_ladder, unwind, &fields); + unwind_blocks.push(halfladder.last().cloned().unwrap_or(unwind)); } let (normal, _) = self.drop_ladder(fields); normal_blocks.push(normal); @@ -428,14 +440,16 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> // variant not found - drop the entire enum if let None = otherwise { otherwise = Some(self.complete_drop( - is_cleanup, Some(DropFlagMode::Shallow), - succ)); - unwind_otherwise = unwind.map(|unwind| self.complete_drop( - true, - Some(DropFlagMode::Shallow), - unwind - )); + succ, + unwind)); + if let Unwind::To(unwind) = unwind { + unwind_otherwise = Some(self.complete_drop( + Some(DropFlagMode::Shallow), + unwind, + Unwind::InCleanup + )); + } } } } @@ -448,10 +462,10 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> values.pop(); } - (self.adt_switch_block(is_cleanup, adt, normal_blocks, &values, succ), - unwind_blocks.map(|unwind_blocks| { + (self.adt_switch_block(adt, normal_blocks, &values, succ, unwind), + unwind.map(|unwind| { self.adt_switch_block( - is_cleanup, adt, unwind_blocks, &values, unwind.unwrap() + adt, unwind_blocks.unwrap(), &values, unwind, Unwind::InCleanup ) })) } @@ -459,11 +473,11 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } fn adt_switch_block(&mut self, - is_cleanup: bool, adt: &'tcx ty::AdtDef, blocks: Vec, values: &[ConstInt], - succ: BasicBlock) + succ: BasicBlock, + unwind: Unwind) -> BasicBlock { // If there are multiple variants, then if something // is present within the enum the discriminant, tracked @@ -491,12 +505,12 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> targets: blocks, } }), - is_cleanup: is_cleanup, + is_cleanup: unwind.is_cleanup(), }); - self.drop_flag_test_block(is_cleanup, switch_block, succ) + self.drop_flag_test_block(switch_block, succ, unwind) } - fn destructor_call_block<'a>(&mut self, (succ, unwind): (BasicBlock, Option)) + fn destructor_call_block<'a>(&mut self, (succ, unwind): (BasicBlock, Unwind)) -> BasicBlock { debug!("destructor_call_block({:?}, {:?})", self, succ); @@ -527,11 +541,11 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.source_info.span), args: vec![Operand::Consume(Lvalue::Local(ref_lvalue))], destination: Some((unit_temp, succ)), - cleanup: unwind, + cleanup: unwind.into_option(), }, source_info: self.source_info }), - is_cleanup: self.is_cleanup, + is_cleanup: unwind.is_cleanup(), }) } @@ -541,16 +555,15 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// can_go = index < len /// if can_go then drop-block else succ /// drop-block: - /// ptr = &mut LV[len] + /// ptr = &mut LV[index] /// index = index + 1 /// drop(ptr) fn drop_loop(&mut self, - unwind: Option, succ: BasicBlock, index: &Lvalue<'tcx>, length: &Lvalue<'tcx>, ety: Ty<'tcx>, - is_cleanup: bool) + unwind: Unwind) -> BasicBlock { let use_ = |lv: &Lvalue<'tcx>| Operand::Consume(lv.clone()); @@ -576,10 +589,11 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> index.clone(), Rvalue::BinaryOp(BinOp::Add, use_(index), one) )}, ], - is_cleanup, + is_cleanup: unwind.is_cleanup(), terminator: Some(Terminator { source_info: self.source_info, - kind: TerminatorKind::Resume, + // this gets overwritten by drop elaboration. + kind: TerminatorKind::Unreachable, }) }); @@ -589,7 +603,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> can_go.clone(), Rvalue::BinaryOp(BinOp::Lt, use_(index), use_(length)) )}, ], - is_cleanup, + is_cleanup: unwind.is_cleanup(), terminator: Some(Terminator { source_info: self.source_info, kind: TerminatorKind::if_(tcx, use_(can_go), drop_block, succ) @@ -599,7 +613,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.elaborator.patch().patch_terminator(drop_block, TerminatorKind::Drop { location: ptr.clone().deref(), target: loop_block, - unwind: unwind + unwind: unwind.into_option() }); loop_block @@ -614,12 +628,11 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let length = &Lvalue::Local(self.new_temp(tcx.types.usize)); let unwind = self.unwind.map(|unwind| { - self.drop_loop(None, unwind, index, length, ety, true) + self.drop_loop(unwind, index, length, ety, Unwind::InCleanup) }); - let is_cleanup = self.is_cleanup; let succ = self.succ; // FIXME(#6393) - let loop_block = self.drop_loop(unwind, succ, index, length, ety, is_cleanup); + let loop_block = self.drop_loop(succ, index, length, ety, unwind); let zero = self.constant_usize(0); let drop_block = self.elaborator.patch().new_block(BasicBlockData { @@ -631,7 +644,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> index.clone(), Rvalue::Use(zero), )}, ], - is_cleanup, + is_cleanup: unwind.is_cleanup(), terminator: Some(Terminator { source_info: self.source_info, kind: TerminatorKind::Goto { target: loop_block } @@ -640,7 +653,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> // FIXME(#34708): handle partially-dropped array/slice elements. self.drop_flag_test_and_reset_block( - is_cleanup, Some(DropFlagMode::Deep), drop_block, succ) + Some(DropFlagMode::Deep), drop_block, succ, unwind) } /// The slow-path - create an "open", elaborated drop for a type @@ -653,8 +666,6 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// ADT, both in the success case or if one of the destructors fail. fn open_drop<'a>(&mut self) -> BasicBlock { let ty = self.lvalue_ty(self.lvalue); - let is_cleanup = self.is_cleanup; // FIXME(#6393) - let succ = self.succ; match ty.sty { ty::TyClosure(def_id, substs) => { let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx()).collect(); @@ -670,7 +681,9 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.open_drop_for_adt(def, substs) } ty::TyDynamic(..) => { - self.complete_drop(is_cleanup, Some(DropFlagMode::Deep), succ) + let unwind = self.unwind; // FIXME(#6393) + let succ = self.succ; + self.complete_drop(Some(DropFlagMode::Deep), succ, unwind) } ty::TyArray(ety, _) | ty::TySlice(ety) => { self.open_drop_for_array(ety) @@ -687,21 +700,21 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// if let Some(mode) = mode: FLAG(self.path)[mode] = false /// drop(self.lv) fn complete_drop<'a>(&mut self, - is_cleanup: bool, drop_mode: Option, - succ: BasicBlock) -> BasicBlock + succ: BasicBlock, + unwind: Unwind) -> BasicBlock { debug!("complete_drop({:?},{:?})", self, drop_mode); - let drop_block = self.drop_block(is_cleanup, succ); - self.drop_flag_test_and_reset_block(is_cleanup, drop_mode, drop_block, succ) + let drop_block = self.drop_block(succ, unwind); + self.drop_flag_test_and_reset_block(drop_mode, drop_block, succ, unwind) } fn drop_flag_test_and_reset_block(&mut self, - is_cleanup: bool, drop_mode: Option, drop_block: BasicBlock, - succ: BasicBlock) -> BasicBlock + succ: BasicBlock, + unwind: Unwind) -> BasicBlock { debug!("drop_flag_test_and_reset_block({:?},{:?})", self, drop_mode); @@ -710,14 +723,14 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.elaborator.clear_drop_flag(block_start, self.path, mode); } - self.drop_flag_test_block(is_cleanup, drop_block, succ) + self.drop_flag_test_block(drop_block, succ, unwind) } fn elaborated_drop_block<'a>(&mut self) -> BasicBlock { debug!("elaborated_drop_block({:?})", self); - let is_cleanup = self.is_cleanup; // FIXME(#6393) + let unwind = self.unwind; // FIXME(#6393) let succ = self.succ; - let blk = self.drop_block(is_cleanup, succ); + let blk = self.drop_block(succ, unwind); self.elaborate_drop(blk); blk } @@ -726,17 +739,17 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> &mut self, ty: Ty<'tcx>, target: BasicBlock, - is_cleanup: bool + unwind: Unwind, ) -> BasicBlock { - let block = self.unelaborated_free_block(ty, target, is_cleanup); - self.drop_flag_test_block(is_cleanup, block, target) + let block = self.unelaborated_free_block(ty, target, unwind); + self.drop_flag_test_block(block, target, unwind) } fn unelaborated_free_block<'a>( &mut self, ty: Ty<'tcx>, target: BasicBlock, - is_cleanup: bool + unwind: Unwind ) -> BasicBlock { let tcx = self.tcx(); let unit_temp = Lvalue::Local(self.new_temp(tcx.mk_nil())); @@ -749,31 +762,31 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> destination: Some((unit_temp, target)), cleanup: None }; // FIXME(#6393) - let free_block = self.new_block(is_cleanup, call); + let free_block = self.new_block(unwind, call); let block_start = Location { block: free_block, statement_index: 0 }; self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow); free_block } - fn drop_block<'a>(&mut self, is_cleanup: bool, succ: BasicBlock) -> BasicBlock { + fn drop_block<'a>(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock { let block = TerminatorKind::Drop { location: self.lvalue.clone(), - target: succ, - unwind: if is_cleanup { None } else { self.unwind } + target: target, + unwind: unwind.into_option() }; - self.new_block(is_cleanup, block) + self.new_block(unwind, block) } fn drop_flag_test_block(&mut self, - is_cleanup: bool, on_set: BasicBlock, - on_unset: BasicBlock) + on_unset: BasicBlock, + unwind: Unwind) -> BasicBlock { let style = self.elaborator.drop_style(self.path, DropFlagMode::Shallow); - debug!("drop_flag_test_block({:?},{:?},{:?}) - {:?}", - self, is_cleanup, on_set, style); + debug!("drop_flag_test_block({:?},{:?},{:?},{:?}) - {:?}", + self, on_set, on_unset, unwind, style); match style { DropStyle::Dead => on_unset, @@ -781,13 +794,13 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> DropStyle::Conditional | DropStyle::Open => { let flag = self.elaborator.get_drop_flag(self.path).unwrap(); let term = TerminatorKind::if_(self.tcx(), flag, on_set, on_unset); - self.new_block(is_cleanup, term) + self.new_block(unwind, term) } } } fn new_block<'a>(&mut self, - is_cleanup: bool, + unwind: Unwind, k: TerminatorKind<'tcx>) -> BasicBlock { @@ -796,7 +809,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> terminator: Some(Terminator { source_info: self.source_info, kind: k }), - is_cleanup: is_cleanup + is_cleanup: unwind.is_cleanup() }) } @@ -809,7 +822,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.elaborator.patch().terminator_loc(mir, bb) } - fn constant_usize(&self, val: usize) -> Operand<'tcx> { + fn constant_usize(&self, val: u16) -> Operand<'tcx> { Operand::Constant(box Constant { span: self.source_info.span, ty: self.tcx().types.usize, From 68b7475dc04d4429d4bfb4837a902090915b6584 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Mon, 15 May 2017 15:22:59 +0300 Subject: [PATCH 04/11] move "ADT master drop flag" logic to `open_drop_for_adt_contents` Fixes #41888. --- src/librustc_mir/util/elaborate_drops.rs | 275 ++++++++++++----------- src/test/mir-opt/issue-41888.rs | 186 +++++++++++++++ src/test/run-pass/dynamic-drop.rs | 25 +++ 3 files changed, 352 insertions(+), 134 deletions(-) create mode 100644 src/test/mir-opt/issue-41888.rs diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index afec04dafe3..3ec27db60c2 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -243,30 +243,37 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } /// Create one-half of the drop ladder for a list of fields, and return - /// the list of steps in it in reverse order. + /// the list of steps in it in reverse order, with the first step + /// dropping 0 fields and so on. /// /// `unwind_ladder` is such a list of steps in reverse order, /// which is called if the matching step of the drop glue panics. fn drop_halfladder(&mut self, unwind_ladder: &[Unwind], - succ: BasicBlock, + mut succ: BasicBlock, fields: &[(Lvalue<'tcx>, Option)]) -> Vec { - let goto = TerminatorKind::Goto { target: succ }; - let mut succ = self.new_block(unwind_ladder[0], goto); + Some(succ).into_iter().chain( + fields.iter().rev().zip(unwind_ladder) + .map(|(&(ref lv, path), &unwind_succ)| { + succ = self.drop_subpath(lv, path, succ, unwind_succ); + succ + }) + ).collect() + } - // Always clear the "master" drop flag at the bottom of the - // ladder. This is needed because the "master" drop flag - // protects the ADT's discriminant, which is invalidated - // after the ADT is dropped. - let succ_loc = Location { block: succ, statement_index: 0 }; - self.elaborator.clear_drop_flag(succ_loc, self.path, DropFlagMode::Shallow); - - fields.iter().rev().zip(unwind_ladder).map(|(&(ref lv, path), &unwind_succ)| { - succ = self.drop_subpath(lv, path, succ, unwind_succ); - succ - }).collect() + fn drop_ladder_bottom(&mut self) -> (BasicBlock, Unwind) { + // Clear the "master" drop flag at the end. This is needed + // because the "master" drop protects the ADT's discriminant, + // which is invalidated after the ADT is dropped. + let (succ, unwind) = (self.succ, self.unwind); // FIXME(#6393) + ( + self.drop_flag_reset_block(DropFlagMode::Shallow, succ, unwind), + unwind.map(|unwind| { + self.drop_flag_reset_block(DropFlagMode::Shallow, unwind, Unwind::InCleanup) + }) + ) } /// Create a full drop ladder, consisting of 2 connected half-drop-ladders @@ -283,8 +290,13 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// ELAB(drop location.1 [target=.c2]) /// .c2: /// ELAB(drop location.2 [target=`self.unwind`]) + /// + /// NOTE: this does not clear the master drop flag, so you need + /// to point succ/unwind on a `drop_ladder_bottom`. fn drop_ladder<'a>(&mut self, - fields: Vec<(Lvalue<'tcx>, Option)>) + fields: Vec<(Lvalue<'tcx>, Option)>, + succ: BasicBlock, + unwind: Unwind) -> (BasicBlock, Unwind) { debug!("drop_ladder({:?}, {:?})", self, fields); @@ -297,20 +309,17 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> debug!("drop_ladder - fields needing drop: {:?}", fields); let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1]; - let unwind_ladder: Vec<_> = if let Unwind::To(target) = self.unwind { + let unwind_ladder: Vec<_> = if let Unwind::To(target) = unwind { let halfladder = self.drop_halfladder(&unwind_ladder, target, &fields); - Some(self.unwind).into_iter().chain(halfladder.into_iter().map(Unwind::To)) - .collect() + halfladder.into_iter().map(Unwind::To).collect() } else { unwind_ladder }; - let succ = self.succ; // FIXME(#6393) let normal_ladder = self.drop_halfladder(&unwind_ladder, succ, &fields); - (normal_ladder.last().cloned().unwrap_or(succ), - unwind_ladder.last().cloned().unwrap_or(self.unwind)) + (*normal_ladder.last().unwrap(), *unwind_ladder.last().unwrap()) } fn open_drop_for_tuple<'a>(&mut self, tys: &[Ty<'tcx>]) @@ -323,7 +332,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.elaborator.field_subpath(self.path, Field::new(i))) }).collect(); - self.drop_ladder(fields).0 + let (succ, unwind) = self.drop_ladder_bottom(); + self.drop_ladder(fields, succ, unwind).0 } fn open_drop_for_box<'a>(&mut self, ty: Ty<'tcx>) -> BasicBlock @@ -370,106 +380,100 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } } - fn open_drop_for_adt_contents<'a>(&mut self, adt: &'tcx ty::AdtDef, - substs: &'tcx Substs<'tcx>) - -> (BasicBlock, Unwind) { - match adt.variants.len() { - 1 => { - let fields = self.move_paths_for_fields( - self.lvalue, - self.path, - &adt.variants[0], - substs - ); - self.drop_ladder(fields) - } - _ => { - let succ = self.succ; - let unwind = self.unwind; // FIXME(#6393) + fn open_drop_for_adt_contents(&mut self, adt: &'tcx ty::AdtDef, + substs: &'tcx Substs<'tcx>) + -> (BasicBlock, Unwind) { + let (succ, unwind) = self.drop_ladder_bottom(); + if adt.variants.len() == 1 { + let fields = self.move_paths_for_fields( + self.lvalue, + self.path, + &adt.variants[0], + substs + ); + self.drop_ladder(fields, succ, unwind) + } else { + self.open_drop_for_multivariant(adt, substs, succ, unwind) + } + } - let mut values = Vec::with_capacity(adt.variants.len()); - let mut normal_blocks = Vec::with_capacity(adt.variants.len()); - let mut unwind_blocks = if unwind.is_cleanup() { - None - } else { - Some(Vec::with_capacity(adt.variants.len())) - }; - let mut otherwise = None; - let mut unwind_otherwise = None; - for (variant_index, discr) in adt.discriminants(self.tcx()).enumerate() { - let subpath = self.elaborator.downcast_subpath( - self.path, variant_index); - if let Some(variant_path) = subpath { - let base_lv = self.lvalue.clone().elem( - ProjectionElem::Downcast(adt, variant_index) + fn open_drop_for_multivariant(&mut self, adt: &'tcx ty::AdtDef, + substs: &'tcx Substs<'tcx>, + succ: BasicBlock, + unwind: Unwind) + -> (BasicBlock, Unwind) { + let mut values = Vec::with_capacity(adt.variants.len()); + let mut normal_blocks = Vec::with_capacity(adt.variants.len()); + let mut unwind_blocks = if unwind.is_cleanup() { + None + } else { + Some(Vec::with_capacity(adt.variants.len())) + }; + + let mut have_otherwise = false; + + for (variant_index, discr) in adt.discriminants(self.tcx()).enumerate() { + let subpath = self.elaborator.downcast_subpath( + self.path, variant_index); + if let Some(variant_path) = subpath { + let base_lv = self.lvalue.clone().elem( + ProjectionElem::Downcast(adt, variant_index) ); - let fields = self.move_paths_for_fields( - &base_lv, - variant_path, - &adt.variants[variant_index], - substs); - values.push(discr); - if let Unwind::To(unwind) = unwind { - // We can't use the half-ladder from the original - // drop ladder, because this breaks the - // "funclet can't have 2 successor funclets" - // requirement from MSVC: - // - // switch unwind-switch - // / \ / \ - // v1.0 v2.0 v2.0-unwind v1.0-unwind - // | | / | - // v1.1-unwind v2.1-unwind | - // ^ | - // \-------------------------------/ - // - // Create a duplicate half-ladder to avoid that. We - // could technically only do this on MSVC, but I - // I want to minimize the divergence between MSVC - // and non-MSVC. + let fields = self.move_paths_for_fields( + &base_lv, + variant_path, + &adt.variants[variant_index], + substs); + values.push(discr); + if let Unwind::To(unwind) = unwind { + // We can't use the half-ladder from the original + // drop ladder, because this breaks the + // "funclet can't have 2 successor funclets" + // requirement from MSVC: + // + // switch unwind-switch + // / \ / \ + // v1.0 v2.0 v2.0-unwind v1.0-unwind + // | | / | + // v1.1-unwind v2.1-unwind | + // ^ | + // \-------------------------------/ + // + // Create a duplicate half-ladder to avoid that. We + // could technically only do this on MSVC, but I + // I want to minimize the divergence between MSVC + // and non-MSVC. - let unwind_blocks = unwind_blocks.as_mut().unwrap(); - let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1]; - let halfladder = - self.drop_halfladder(&unwind_ladder, unwind, &fields); - unwind_blocks.push(halfladder.last().cloned().unwrap_or(unwind)); - } - let (normal, _) = self.drop_ladder(fields); - normal_blocks.push(normal); - } else { - // variant not found - drop the entire enum - if let None = otherwise { - otherwise = Some(self.complete_drop( - Some(DropFlagMode::Shallow), - succ, - unwind)); - if let Unwind::To(unwind) = unwind { - unwind_otherwise = Some(self.complete_drop( - Some(DropFlagMode::Shallow), - unwind, - Unwind::InCleanup - )); - } - } - } + let unwind_blocks = unwind_blocks.as_mut().unwrap(); + let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1]; + let halfladder = + self.drop_halfladder(&unwind_ladder, unwind, &fields); + unwind_blocks.push(halfladder.last().cloned().unwrap()); } - if let Some(block) = otherwise { - normal_blocks.push(block); - if let Some(ref mut unwind_blocks) = unwind_blocks { - unwind_blocks.push(unwind_otherwise.unwrap()); - } - } else { - values.pop(); - } - - (self.adt_switch_block(adt, normal_blocks, &values, succ, unwind), - unwind.map(|unwind| { - self.adt_switch_block( - adt, unwind_blocks.unwrap(), &values, unwind, Unwind::InCleanup - ) - })) + let (normal, _) = self.drop_ladder(fields, succ, unwind); + normal_blocks.push(normal); + } else { + have_otherwise = true; } } + + if have_otherwise { + normal_blocks.push(self.drop_block(succ, unwind)); + if let Unwind::To(unwind) = unwind { + unwind_blocks.as_mut().unwrap().push( + self.drop_block(unwind, Unwind::InCleanup) + ); + } + } else { + values.pop(); + } + + (self.adt_switch_block(adt, normal_blocks, &values, succ, unwind), + unwind.map(|unwind| { + self.adt_switch_block( + adt, unwind_blocks.unwrap(), &values, unwind, Unwind::InCleanup + ) + })) } fn adt_switch_block(&mut self, @@ -652,8 +656,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> }); // FIXME(#34708): handle partially-dropped array/slice elements. - self.drop_flag_test_and_reset_block( - Some(DropFlagMode::Deep), drop_block, succ, unwind) + let reset_block = self.drop_flag_reset_block(DropFlagMode::Deep, drop_block, unwind); + self.drop_flag_test_block(reset_block, succ, unwind) } /// The slow-path - create an "open", elaborated drop for a type @@ -707,25 +711,28 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> debug!("complete_drop({:?},{:?})", self, drop_mode); let drop_block = self.drop_block(succ, unwind); - self.drop_flag_test_and_reset_block(drop_mode, drop_block, succ, unwind) - } - - fn drop_flag_test_and_reset_block(&mut self, - drop_mode: Option, - drop_block: BasicBlock, - succ: BasicBlock, - unwind: Unwind) -> BasicBlock - { - debug!("drop_flag_test_and_reset_block({:?},{:?})", self, drop_mode); - - if let Some(mode) = drop_mode { - let block_start = Location { block: drop_block, statement_index: 0 }; - self.elaborator.clear_drop_flag(block_start, self.path, mode); - } + let drop_block = if let Some(mode) = drop_mode { + self.drop_flag_reset_block(mode, drop_block, unwind) + } else { + drop_block + }; self.drop_flag_test_block(drop_block, succ, unwind) } + fn drop_flag_reset_block(&mut self, + mode: DropFlagMode, + succ: BasicBlock, + unwind: Unwind) -> BasicBlock + { + debug!("drop_flag_reset_block({:?},{:?})", self, mode); + + let block = self.new_block(unwind, TerminatorKind::Goto { target: succ }); + let block_start = Location { block: block, statement_index: 0 }; + self.elaborator.clear_drop_flag(block_start, self.path, mode); + block + } + fn elaborated_drop_block<'a>(&mut self) -> BasicBlock { debug!("elaborated_drop_block({:?})", self); let unwind = self.unwind; // FIXME(#6393) diff --git a/src/test/mir-opt/issue-41888.rs b/src/test/mir-opt/issue-41888.rs new file mode 100644 index 00000000000..ea4d7d3165d --- /dev/null +++ b/src/test/mir-opt/issue-41888.rs @@ -0,0 +1,186 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// check that we clear the "ADT master drop flag" even when there are +// no fields to be dropped. + +fn main() { + let e; + if cond() { + e = E::F(K); + if let E::F(_k) = e { + // older versions of rustc used to not clear the + // drop flag for `e` in this path. + } + } +} + +fn cond() -> bool { false } + +struct K; + +enum E { + F(K), + G(Box) +} + +// END RUST SOURCE +// fn main() -> () { +// let mut _0: (); +// scope 1 { +// let _1: E; // `e` +// scope 2 { +// let _6: K; +// } +// } +// let mut _2: bool; +// let mut _3: (); +// let mut _4: E; +// let mut _5: K; +// let mut _7: isize; +// let mut _8: bool; // drop flag for `e` +// let mut _9: bool; +// let mut _10: bool; +// let mut _11: isize; +// let mut _12: isize; +// +// bb0: { +// _8 = const false; +// _10 = const false; +// _9 = const false; +// StorageLive(_1); +// StorageLive(_2); +// _2 = const cond() -> [return: bb3, unwind: bb2]; +// } +// +// bb1: { +// resume; +// } +// +// bb2: { +// goto -> bb1; +// } +// +// bb3: { +// switchInt(_2) -> [0u8: bb5, otherwise: bb4]; +// } +// +// bb4: { +// StorageLive(_4); +// StorageLive(_5); +// _5 = K::{{constructor}}; +// _4 = E::F(_5,); +// StorageDead(_5); +// goto -> bb15; +// } +// +// bb5: { +// _0 = (); +// goto -> bb12; +// } +// +// bb6: { +// goto -> bb2; +// } +// +// bb7: { +// goto -> bb8; +// } +// +// bb8: { +// StorageDead(_4); +// _7 = discriminant(_1); +// switchInt(_7) -> [0isize: bb10, otherwise: bb9]; +// } +// +// bb9: { +// _0 = (); +// goto -> bb11; +// } +// +// bb10: { +// StorageLive(_6); +// _10 = const false; +// _6 = ((_1 as F).0: K); +// _0 = (); +// goto -> bb11; +// } +// +// bb11: { +// StorageDead(_6); +// goto -> bb12; +// } +// +// bb12: { +// StorageDead(_2); +// goto -> bb22; +// } +// +// bb13: { +// StorageDead(_1); +// return; +// } +// +// bb14: { +// _8 = const true; +// _9 = const true; +// _10 = const true; +// _1 = _4; +// goto -> bb6; +// } +// +// bb15: { +// _8 = const true; +// _9 = const true; +// _10 = const true; +// _1 = _4; +// goto -> bb7; +// } +// +// bb16: { +// _8 = const false; // clear the drop flag - must always be reached +// goto -> bb13; +// } +// +// bb17: { +// _8 = const false; +// goto -> bb1; +// } +// +// bb18: { +// goto -> bb17; +// } +// +// bb19: { +// drop(_1) -> [return: bb16, unwind: bb17]; +// } +// +// bb20: { +// drop(_1) -> bb17; +// } +// +// bb21: { +// _11 = discriminant(_1); +// switchInt(_11) -> [0isize: bb16, otherwise: bb19]; +// } +// +// bb22: { +// switchInt(_8) -> [0u8: bb16, otherwise: bb21]; +// } +// +// bb23: { +// _12 = discriminant(_1); +// switchInt(_12) -> [0isize: bb18, otherwise: bb20]; +// } +// +// bb24: { +// switchInt(_8) -> [0u8: bb17, otherwise: bb23]; +// } +// } diff --git a/src/test/run-pass/dynamic-drop.rs b/src/test/run-pass/dynamic-drop.rs index 26e5fe987ee..6725a0c547f 100644 --- a/src/test/run-pass/dynamic-drop.rs +++ b/src/test/run-pass/dynamic-drop.rs @@ -90,6 +90,22 @@ fn dynamic_drop(a: &Allocator, c: bool) { }; } +struct TwoPtrs<'a>(Ptr<'a>, Ptr<'a>); +fn struct_dynamic_drop(a: &Allocator, c0: bool, c1: bool, c: bool) { + for i in 0..2 { + let x; + let y; + if (c0 && i == 0) || (c1 && i == 1) { + x = (a.alloc(), a.alloc(), a.alloc()); + y = TwoPtrs(a.alloc(), a.alloc()); + if c { + drop(x.1); + drop(y.0); + } + } + } +} + fn assignment2(a: &Allocator, c0: bool, c1: bool) { let mut _v = a.alloc(); let mut _w = a.alloc(); @@ -182,5 +198,14 @@ fn main() { run_test(|a| array_simple(a)); run_test(|a| vec_simple(a)); + run_test(|a| struct_dynamic_drop(a, false, false, false)); + run_test(|a| struct_dynamic_drop(a, false, false, true)); + run_test(|a| struct_dynamic_drop(a, false, true, false)); + run_test(|a| struct_dynamic_drop(a, false, true, true)); + run_test(|a| struct_dynamic_drop(a, true, false, false)); + run_test(|a| struct_dynamic_drop(a, true, false, true)); + run_test(|a| struct_dynamic_drop(a, true, true, false)); + run_test(|a| struct_dynamic_drop(a, true, true, true)); + run_test_nopanic(|a| union1(a)); } From 3bcd6fa5712520061fcc2504e1f0aae62c09e514 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Mon, 15 May 2017 17:13:12 +0300 Subject: [PATCH 05/11] use Eq instead of Lt in loop --- src/librustc_mir/util/elaborate_drops.rs | 8 ++--- src/test/run-pass/issue-41888.rs | 43 ++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 4 deletions(-) create mode 100644 src/test/run-pass/issue-41888.rs diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index 3ec27db60c2..c1d8d087eac 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -556,8 +556,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// create a loop that drops an array: /// /// loop-block: - /// can_go = index < len - /// if can_go then drop-block else succ + /// can_go = index == len + /// if can_go then succ else drop-block /// drop-block: /// ptr = &mut LV[index] /// index = index + 1 @@ -604,13 +604,13 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let loop_block = self.elaborator.patch().new_block(BasicBlockData { statements: vec![ Statement { source_info: self.source_info, kind: StatementKind::Assign( - can_go.clone(), Rvalue::BinaryOp(BinOp::Lt, use_(index), use_(length)) + can_go.clone(), Rvalue::BinaryOp(BinOp::Eq, use_(index), use_(length)) )}, ], is_cleanup: unwind.is_cleanup(), terminator: Some(Terminator { source_info: self.source_info, - kind: TerminatorKind::if_(tcx, use_(can_go), drop_block, succ) + kind: TerminatorKind::if_(tcx, use_(can_go), succ, drop_block) }) }); diff --git a/src/test/run-pass/issue-41888.rs b/src/test/run-pass/issue-41888.rs new file mode 100644 index 00000000000..e145cde039d --- /dev/null +++ b/src/test/run-pass/issue-41888.rs @@ -0,0 +1,43 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn main() { let _ = g(Some(E::F(K))); } + +type R = Result<(), ()>; +struct K; + +enum E { + F(K), // must not be built-in type + #[allow(dead_code)] + G(Box, Box), +} + +fn translate(x: R) -> R { x } + +fn g(mut status: Option) -> R { + loop { + match status { + Some(infix_or_postfix) => match infix_or_postfix { + E::F(_op) => { // <- must be captured by value + match Ok(()) { + Err(err) => return Err(err), + Ok(_) => {}, + }; + } + _ => (), + }, + _ => match translate(Err(())) { + Err(err) => return Err(err), + Ok(_) => {}, + } + } + status = None; + } +} From 7b295eea4296eedf7858a001297eadfaace253d3 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Thu, 18 May 2017 18:43:52 +0300 Subject: [PATCH 06/11] add NullOp::SizeOf and BinOp::Offset --- src/librustc/ich/impls_mir.rs | 10 ++++++++-- src/librustc/mir/mod.rs | 20 +++++++++++++------ src/librustc/mir/tcx.rs | 13 ++++++------ src/librustc/mir/visit.rs | 2 +- .../borrowck/mir/gather_moves.rs | 3 ++- src/librustc_mir/build/expr/as_rvalue.rs | 3 ++- src/librustc_mir/transform/erase_regions.rs | 2 +- src/librustc_mir/transform/qualify_consts.rs | 9 ++++++--- src/librustc_passes/mir_stats.rs | 2 +- src/librustc_trans/collector.rs | 2 +- src/librustc_trans/glue.rs | 1 + src/librustc_trans/mir/constant.rs | 7 +++++++ src/librustc_trans/mir/operand.rs | 2 +- src/librustc_trans/mir/rvalue.rs | 15 ++++++++++++-- src/rustllvm/RustWrapper.cpp | 14 ++++++++----- 15 files changed, 74 insertions(+), 31 deletions(-) diff --git a/src/librustc/ich/impls_mir.rs b/src/librustc/ich/impls_mir.rs index 3ff8ffb3505..3563dbe5096 100644 --- a/src/librustc/ich/impls_mir.rs +++ b/src/librustc/ich/impls_mir.rs @@ -315,7 +315,8 @@ impl<'a, 'tcx> HashStable> for mir::Rvalue<'tcx> mir::Rvalue::Discriminant(ref lvalue) => { lvalue.hash_stable(hcx, hasher); } - mir::Rvalue::Box(ty) => { + mir::Rvalue::NullaryOp(op, ty) => { + op.hash_stable(hcx, hasher); ty.hash_stable(hcx, hasher); } mir::Rvalue::Aggregate(ref kind, ref operands) => { @@ -374,7 +375,8 @@ impl_stable_hash_for!(enum mir::BinOp { Le, Ne, Ge, - Gt + Gt, + Offset }); impl_stable_hash_for!(enum mir::UnOp { @@ -382,6 +384,10 @@ impl_stable_hash_for!(enum mir::UnOp { Neg }); +impl_stable_hash_for!(enum mir::NullOp { + Box, + SizeOf +}); impl_stable_hash_for!(struct mir::Constant<'tcx> { span, ty, literal }); diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index fe2ad498e99..80c42917196 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -1046,6 +1046,7 @@ pub enum Rvalue<'tcx> { BinaryOp(BinOp, Operand<'tcx>, Operand<'tcx>), CheckedBinaryOp(BinOp, Operand<'tcx>, Operand<'tcx>), + NullaryOp(NullOp, Ty<'tcx>), UnaryOp(UnOp, Operand<'tcx>), /// Read the discriminant of an ADT. @@ -1054,9 +1055,6 @@ pub enum Rvalue<'tcx> { /// be defined to return, say, a 0) if ADT is not an enum. Discriminant(Lvalue<'tcx>), - /// Creates an *uninitialized* Box - Box(Ty<'tcx>), - /// Create an aggregate value, like a tuple or struct. This is /// only needed because we want to distinguish `dest = Foo { x: /// ..., y: ... }` from `dest.x = ...; dest.y = ...;` in the case @@ -1132,6 +1130,8 @@ pub enum BinOp { Ge, /// The `>` operator (greater than) Gt, + /// The `ptr.offset` operator + Offset, } impl BinOp { @@ -1144,6 +1144,14 @@ impl BinOp { } } +#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] +pub enum NullOp { + /// Return the size of a value of that type + SizeOf, + /// Create a new uninitialized box for a value of that type + Box, +} + #[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub enum UnOp { /// The `!` operator for logical inversion @@ -1167,7 +1175,7 @@ impl<'tcx> Debug for Rvalue<'tcx> { } UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a), Discriminant(ref lval) => write!(fmt, "discriminant({:?})", lval), - Box(ref t) => write!(fmt, "Box({:?})", t), + NullaryOp(ref op, ref t) => write!(fmt, "{:?}({:?})", op, t), Ref(_, borrow_kind, ref lv) => { let kind_str = match borrow_kind { BorrowKind::Shared => "", @@ -1601,7 +1609,7 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)), UnaryOp(op, ref val) => UnaryOp(op, val.fold_with(folder)), Discriminant(ref lval) => Discriminant(lval.fold_with(folder)), - Box(ty) => Box(ty.fold_with(folder)), + NullaryOp(op, ty) => NullaryOp(op, ty.fold_with(folder)), Aggregate(ref kind, ref fields) => { let kind = box match **kind { AggregateKind::Array(ty) => AggregateKind::Array(ty.fold_with(folder)), @@ -1629,7 +1637,7 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { rhs.visit_with(visitor) || lhs.visit_with(visitor), UnaryOp(_, ref val) => val.visit_with(visitor), Discriminant(ref lval) => lval.visit_with(visitor), - Box(ty) => ty.visit_with(visitor), + NullaryOp(_, ty) => ty.visit_with(visitor), Aggregate(ref kind, ref fields) => { (match **kind { AggregateKind::Array(ty) => ty.visit_with(visitor), diff --git a/src/librustc/mir/tcx.rs b/src/librustc/mir/tcx.rs index 7bc1dc58c29..6078778a61d 100644 --- a/src/librustc/mir/tcx.rs +++ b/src/librustc/mir/tcx.rs @@ -166,7 +166,8 @@ impl<'tcx> Rvalue<'tcx> { let ty = op.ty(tcx, lhs_ty, rhs_ty); tcx.intern_tup(&[ty, tcx.types.bool], false) } - Rvalue::UnaryOp(_, ref operand) => { + Rvalue::UnaryOp(UnOp::Not, ref operand) | + Rvalue::UnaryOp(UnOp::Neg, ref operand) => { operand.ty(mir, tcx) } Rvalue::Discriminant(ref lval) => { @@ -179,9 +180,8 @@ impl<'tcx> Rvalue<'tcx> { bug!("Rvalue::Discriminant on Lvalue of type {:?}", ty); } } - Rvalue::Box(t) => { - tcx.mk_box(t) - } + Rvalue::NullaryOp(NullOp::Box, t) => tcx.mk_box(t), + Rvalue::NullaryOp(NullOp::SizeOf, _) => tcx.types.usize, Rvalue::Aggregate(ref ak, ref ops) => { match **ak { AggregateKind::Array(ty) => { @@ -227,7 +227,7 @@ impl<'tcx> BinOp { assert_eq!(lhs_ty, rhs_ty); lhs_ty } - &BinOp::Shl | &BinOp::Shr => { + &BinOp::Shl | &BinOp::Shr | &BinOp::Offset => { lhs_ty // lhs_ty can be != rhs_ty } &BinOp::Eq | &BinOp::Lt | &BinOp::Le | @@ -270,7 +270,8 @@ impl BinOp { BinOp::Lt => hir::BinOp_::BiLt, BinOp::Gt => hir::BinOp_::BiGt, BinOp::Le => hir::BinOp_::BiLe, - BinOp::Ge => hir::BinOp_::BiGe + BinOp::Ge => hir::BinOp_::BiGe, + BinOp::Offset => unreachable!() } } } diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index 557fedadeba..780ce736bfd 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -509,7 +509,7 @@ macro_rules! make_mir_visitor { self.visit_lvalue(lvalue, LvalueContext::Inspect, location); } - Rvalue::Box(ref $($mutability)* ty) => { + Rvalue::NullaryOp(_op, ref $($mutability)* ty) => { self.visit_ty(ty); } diff --git a/src/librustc_borrowck/borrowck/mir/gather_moves.rs b/src/librustc_borrowck/borrowck/mir/gather_moves.rs index 931cdf4f686..b03d2a775df 100644 --- a/src/librustc_borrowck/borrowck/mir/gather_moves.rs +++ b/src/librustc_borrowck/borrowck/mir/gather_moves.rs @@ -438,7 +438,8 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { Rvalue::Ref(..) | Rvalue::Discriminant(..) | Rvalue::Len(..) | - Rvalue::Box(..) => { + Rvalue::NullaryOp(NullOp::SizeOf, _) | + Rvalue::NullaryOp(NullOp::Box, _) => { // This returns an rvalue with uninitialized contents. We can't // move out of it here because it is an rvalue - assignments always // completely initialize their lvalue. diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index e1832e0a0af..2884b60fdd8 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -97,7 +97,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let value = this.hir.mirror(value); let result = this.temp(expr.ty, expr_span); // to start, malloc some memory of suitable type (thus far, uninitialized): - this.cfg.push_assign(block, source_info, &result, Rvalue::Box(value.ty)); + let box_ = Rvalue::NullaryOp(NullOp::Box, value.ty); + this.cfg.push_assign(block, source_info, &result, box_); this.in_scope(value_extents, block, |this| { // schedule a shallow free of that memory, lest we unwind: this.schedule_box_free(expr_span, value_extents, &result, value.ty); diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index 19714849b09..fa88eca6ec3 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -53,7 +53,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { Rvalue::CheckedBinaryOp(..) | Rvalue::UnaryOp(..) | Rvalue::Discriminant(..) | - Rvalue::Box(..) | + Rvalue::NullaryOp(..) | Rvalue::Aggregate(..) => { // These variants don't contain regions. } diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index 4e84cbe6fec..a938e5e29cd 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -595,7 +595,9 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { match *rvalue { Rvalue::Use(_) | Rvalue::Repeat(..) | - Rvalue::UnaryOp(..) | + Rvalue::UnaryOp(UnOp::Neg, _) | + Rvalue::UnaryOp(UnOp::Not, _) | + Rvalue::NullaryOp(NullOp::SizeOf, _) | Rvalue::CheckedBinaryOp(..) | Rvalue::Cast(CastKind::ReifyFnPointer, ..) | Rvalue::Cast(CastKind::UnsafeFnPointer, ..) | @@ -703,7 +705,8 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { if let ty::TyRawPtr(_) = lhs.ty(self.mir, self.tcx).sty { assert!(op == BinOp::Eq || op == BinOp::Ne || op == BinOp::Le || op == BinOp::Lt || - op == BinOp::Ge || op == BinOp::Gt); + op == BinOp::Ge || op == BinOp::Gt || + op == BinOp::Offset); self.add(Qualif::NOT_CONST); if self.mode != Mode::Fn { @@ -719,7 +722,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } } - Rvalue::Box(_) => { + Rvalue::NullaryOp(NullOp::Box, _) => { self.add(Qualif::NOT_CONST); if self.mode != Mode::Fn { struct_span_err!(self.tcx.sess, self.span, E0010, diff --git a/src/librustc_passes/mir_stats.rs b/src/librustc_passes/mir_stats.rs index 24218725186..e29da3a6496 100644 --- a/src/librustc_passes/mir_stats.rs +++ b/src/librustc_passes/mir_stats.rs @@ -186,7 +186,7 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { Rvalue::CheckedBinaryOp(..) => "Rvalue::CheckedBinaryOp", Rvalue::UnaryOp(..) => "Rvalue::UnaryOp", Rvalue::Discriminant(..) => "Rvalue::Discriminant", - Rvalue::Box(..) => "Rvalue::Box", + Rvalue::NullaryOp(..) => "Rvalue::NullaryOp", Rvalue::Aggregate(ref kind, ref _operands) => { // AggregateKind is not distinguished by visit API, so // record it. (`super_rvalue` handles `_operands`.) diff --git a/src/librustc_trans/collector.rs b/src/librustc_trans/collector.rs index 429e7b01610..dc4e947b0f6 100644 --- a/src/librustc_trans/collector.rs +++ b/src/librustc_trans/collector.rs @@ -502,7 +502,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { _ => bug!(), } } - mir::Rvalue::Box(..) => { + mir::Rvalue::NullaryOp(mir::NullOp::Box, _) => { let tcx = self.scx.tcx(); let exchange_malloc_fn_def_id = tcx .lang_items diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 59876a7f2a2..fa400b54d27 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -76,6 +76,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf let align = C_uint(bcx.ccx, align); return (size, align); } + assert!(!info.is_null()); match t.sty { ty::TyAdt(def, substs) => { let ccx = bcx.ccx; diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index cd27ddda1b1..4967ef2f790 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -796,6 +796,12 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { Const::new(llval, operand.ty) } + mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { + assert!(self.ccx.shared().type_is_sized(ty)); + let llval = C_uint(self.ccx, self.ccx.size_of(ty)); + Const::new(llval, tcx.types.usize) + } + _ => span_bug!(span, "{:?} in constant", rvalue) }; @@ -870,6 +876,7 @@ pub fn const_scalar_binop(op: mir::BinOp, llvm::LLVMConstICmp(cmp, lhs, rhs) } } + mir::BinOp::Offset => unreachable!("BinOp::Offset in const-eval!") } } } diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 8b7c7d9d372..a12d0fec1cd 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -114,7 +114,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { pub fn deref(self) -> LvalueRef<'tcx> { let projected_ty = self.ty.builtin_deref(true, ty::NoPreference) - .unwrap().ty; + .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty; let (llptr, llextra) = match self.val { OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()), OperandValue::Pair(llptr, llextra) => (llptr, llextra), diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 667075e6970..b2f44a5d89f 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -432,7 +432,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }) } - mir::Rvalue::Box(content_ty) => { + mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { + assert!(bcx.ccx.shared().type_is_sized(ty)); + let val = C_uint(bcx.ccx, bcx.ccx.size_of(ty)); + let tcx = bcx.tcx(); + (bcx, OperandRef { + val: OperandValue::Immediate(val), + ty: tcx.types.usize, + }) + } + + mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); let llty = type_of::type_of(bcx.ccx, content_ty); let llsize = machine::llsize_of(bcx.ccx, llty); @@ -515,6 +525,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::BinOp::BitOr => bcx.or(lhs, rhs), mir::BinOp::BitAnd => bcx.and(lhs, rhs), mir::BinOp::BitXor => bcx.xor(lhs, rhs), + mir::BinOp::Offset => bcx.inbounds_gep(lhs, &[rhs]), mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs), mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | @@ -660,7 +671,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::CheckedBinaryOp(..) | mir::Rvalue::UnaryOp(..) | mir::Rvalue::Discriminant(..) | - mir::Rvalue::Box(..) | + mir::Rvalue::NullaryOp(..) | mir::Rvalue::Use(..) => // (*) true, mir::Rvalue::Repeat(..) | diff --git a/src/rustllvm/RustWrapper.cpp b/src/rustllvm/RustWrapper.cpp index c24867224ea..838c180c70b 100644 --- a/src/rustllvm/RustWrapper.cpp +++ b/src/rustllvm/RustWrapper.cpp @@ -781,11 +781,15 @@ extern "C" void LLVMRustWriteTypeToString(LLVMTypeRef Ty, RustStringRef Str) { extern "C" void LLVMRustWriteValueToString(LLVMValueRef V, RustStringRef Str) { RawRustStringOstream OS(Str); - OS << "("; - unwrap(V)->getType()->print(OS); - OS << ":"; - unwrap(V)->print(OS); - OS << ")"; + if (!V) { + OS << "(null)"; + } else { + OS << "("; + unwrap(V)->getType()->print(OS); + OS << ":"; + unwrap(V)->print(OS); + OS << ")"; + } } extern "C" bool LLVMRustLinkInExternalBitcode(LLVMModuleRef DstRef, char *BC, From 55767702ec0f4e710b711815381897b3304f3785 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Thu, 18 May 2017 20:56:25 +0300 Subject: [PATCH 07/11] fix RUST_LOG ICE caused by printing a default impl's DefId --- src/librustc/middle/cstore.rs | 2 -- src/librustc/ty/item_path.rs | 4 ++-- src/librustc/ty/maps.rs | 3 +++ src/librustc_borrowck/borrowck/mir/mod.rs | 2 +- src/librustc_metadata/cstore_impl.rs | 10 +++------- src/librustc_mir/transform/qualify_consts.rs | 2 +- src/librustc_mir/transform/type_check.rs | 2 +- src/librustc_typeck/collect.rs | 12 ++++++++++++ src/librustdoc/clean/inline.rs | 2 +- 9 files changed, 24 insertions(+), 15 deletions(-) diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index 8ad1db78595..6597db9e19b 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -245,7 +245,6 @@ pub trait CrateStore { // flags fn is_const_fn(&self, did: DefId) -> bool; - fn is_default_impl(&self, impl_did: DefId) -> bool; fn is_dllimport_foreign_item(&self, def: DefId) -> bool; fn is_statically_included_foreign_item(&self, def_id: DefId) -> bool; @@ -364,7 +363,6 @@ impl CrateStore for DummyCrateStore { // flags fn is_const_fn(&self, did: DefId) -> bool { bug!("is_const_fn") } - fn is_default_impl(&self, impl_did: DefId) -> bool { bug!("is_default_impl") } fn is_dllimport_foreign_item(&self, id: DefId) -> bool { false } fn is_statically_included_foreign_item(&self, def_id: DefId) -> bool { false } diff --git a/src/librustc/ty/item_path.rs b/src/librustc/ty/item_path.rs index 16d5d1187fc..78536b53ba8 100644 --- a/src/librustc/ty/item_path.rs +++ b/src/librustc/ty/item_path.rs @@ -218,7 +218,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // Always use types for non-local impls, where types are always // available, and filename/line-number is mostly uninteresting. - let use_types = !impl_def_id.is_local() || { + let use_types = !self.is_default_impl(impl_def_id) && (!impl_def_id.is_local() || { // Otherwise, use filename/line-number if forced. let force_no_types = FORCE_IMPL_FILENAME_LINE.with(|f| f.get()); !force_no_types && { @@ -226,7 +226,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::queries::impl_trait_ref::try_get(self, DUMMY_SP, impl_def_id).is_ok() && ty::queries::type_of::try_get(self, DUMMY_SP, impl_def_id).is_ok() } - }; + }); if !use_types { return self.push_impl_path_fallback(buffer, impl_def_id); diff --git a/src/librustc/ty/maps.rs b/src/librustc/ty/maps.rs index fb352e5be89..757687f00a2 100644 --- a/src/librustc/ty/maps.rs +++ b/src/librustc/ty/maps.rs @@ -774,6 +774,9 @@ define_maps! { <'tcx> /// True if this is a foreign item (i.e., linked via `extern { ... }`). [] is_foreign_item: IsForeignItem(DefId) -> bool, + /// True if this is a default impl (aka impl Foo for ..) + [] is_default_impl: ItemSignature(DefId) -> bool, + /// Get a map with the variance of every item; use `item_variance` /// instead. [] crate_variances: crate_variances(CrateNum) -> Rc, diff --git a/src/librustc_borrowck/borrowck/mir/mod.rs b/src/librustc_borrowck/borrowck/mir/mod.rs index 2eb064305e8..2b39d2a256e 100644 --- a/src/librustc_borrowck/borrowck/mir/mod.rs +++ b/src/librustc_borrowck/borrowck/mir/mod.rs @@ -59,7 +59,7 @@ pub fn borrowck_mir(bcx: &mut BorrowckCtxt, attributes: &[ast::Attribute]) { let tcx = bcx.tcx; let def_id = tcx.hir.local_def_id(id); - debug!("borrowck_mir({}) UNIMPLEMENTED", tcx.item_path_str(def_id)); + debug!("borrowck_mir({:?}) UNIMPLEMENTED", def_id); // It is safe for us to borrow `mir_validated()`: `optimized_mir` // steals it, but it forces the `borrowck` query. diff --git a/src/librustc_metadata/cstore_impl.rs b/src/librustc_metadata/cstore_impl.rs index 9d098557367..b3503713c90 100644 --- a/src/librustc_metadata/cstore_impl.rs +++ b/src/librustc_metadata/cstore_impl.rs @@ -106,6 +106,7 @@ provide! { <'tcx> tcx, def_id, cdata closure_type => { cdata.closure_ty(def_id.index, tcx) } inherent_impls => { Rc::new(cdata.get_inherent_implementations_for_type(def_id.index)) } is_foreign_item => { cdata.is_foreign_item(def_id.index) } + is_default_impl => { cdata.is_default_impl(def_id.index) } describe_def => { cdata.get_def(def_id.index) } def_span => { cdata.get_span(def_id.index, &tcx.sess) } stability => { cdata.get_stability(def_id.index) } @@ -176,11 +177,6 @@ impl CrateStore for cstore::CStore { self.get_crate_data(did.krate).is_const_fn(did.index) } - fn is_default_impl(&self, impl_did: DefId) -> bool { - self.dep_graph.read(DepNode::MetaData(impl_did)); - self.get_crate_data(impl_did.krate).is_default_impl(impl_did.index) - } - fn is_statically_included_foreign_item(&self, def_id: DefId) -> bool { self.do_is_statically_included_foreign_item(def_id) @@ -403,7 +399,7 @@ impl CrateStore for cstore::CStore { } self.dep_graph.read(DepNode::MetaData(def_id)); - debug!("item_body({}): inlining item", tcx.item_path_str(def_id)); + debug!("item_body({:?}): inlining item", def_id); self.get_crate_data(def_id.krate).item_body(tcx, def_id.index) } @@ -515,4 +511,4 @@ impl CrateStore for cstore::CStore { drop(visible_parent_map); self.visible_parent_map.borrow() } -} \ No newline at end of file +} diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index a938e5e29cd..3b1c54f68e4 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -361,7 +361,7 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { /// Qualify a whole const, static initializer or const fn. fn qualify_const(&mut self) -> Qualif { - debug!("qualifying {} {}", self.mode, self.tcx.item_path_str(self.def_id)); + debug!("qualifying {} {:?}", self.mode, self.def_id); let mir = self.mir; diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs index 6d9603ea459..8258627748f 100644 --- a/src/librustc_mir/transform/type_check.rs +++ b/src/librustc_mir/transform/type_check.rs @@ -744,7 +744,7 @@ impl MirPass for TypeckMir { mir: &mut Mir<'tcx>) { let item_id = src.item_id(); let def_id = tcx.hir.local_def_id(item_id); - debug!("run_pass: {}", tcx.item_path_str(def_id)); + debug!("run_pass: {:?}", def_id); if tcx.sess.err_count() > 0 { // compiling a broken program can obviously result in a diff --git a/src/librustc_typeck/collect.rs b/src/librustc_typeck/collect.rs index cb1bd3e099d..fb3bcd31e21 100644 --- a/src/librustc_typeck/collect.rs +++ b/src/librustc_typeck/collect.rs @@ -100,6 +100,7 @@ pub fn provide(providers: &mut Providers) { impl_trait_ref, impl_polarity, is_foreign_item, + is_default_impl, ..*providers }; } @@ -1545,3 +1546,14 @@ fn is_foreign_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, _ => bug!("is_foreign_item applied to non-local def-id {:?}", def_id) } } + +fn is_default_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> bool { + match tcx.hir.get_if_local(def_id) { + Some(hir_map::NodeItem(&hir::Item { node: hir::ItemDefaultImpl(..), .. })) + => true, + Some(_) => false, + _ => bug!("is_default_impl applied to non-local def-id {:?}", def_id) + } +} diff --git a/src/librustdoc/clean/inline.rs b/src/librustdoc/clean/inline.rs index 5ea3eaa88d7..8f7add14d0a 100644 --- a/src/librustdoc/clean/inline.rs +++ b/src/librustdoc/clean/inline.rs @@ -290,7 +290,7 @@ pub fn build_impl(cx: &DocContext, did: DefId, ret: &mut Vec) { } // If this is a defaulted impl, then bail out early here - if tcx.sess.cstore.is_default_impl(did) { + if tcx.is_default_impl(did) { return ret.push(clean::Item { inner: clean::DefaultImplItem(clean::DefaultImpl { // FIXME: this should be decoded From 6548aefdeb425170cb40f7160cceedf14c97a433 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Thu, 18 May 2017 23:58:39 +0300 Subject: [PATCH 08/11] fix loops in unwind code in MSVC I'm not sure how well this works, but it's worth a try. --- src/librustc_trans/mir/analyze.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 96ef26d3f6f..0f95668302c 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -260,7 +260,9 @@ pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { - set_successor(funclet, succ); + if funclet != succ { + set_successor(funclet, succ); + } } CleanupKind::Internal { funclet: succ_funclet } => { if funclet != succ_funclet { From 162bc513fbc15066d04f865da232702fefc7b923 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Thu, 18 May 2017 23:59:39 +0300 Subject: [PATCH 09/11] use a pointer-based array drop loop for non-zero-sized types --- src/librustc_mir/util/elaborate_drops.rs | 189 ++++++++++++++++------- 1 file changed, 136 insertions(+), 53 deletions(-) diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index c1d8d087eac..50ebe366387 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -493,13 +493,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let discr_ty = adt.repr.discr_type().to_ty(self.tcx()); let discr = Lvalue::Local(self.new_temp(discr_ty)); let discr_rv = Rvalue::Discriminant(self.lvalue.clone()); - let switch_block = self.elaborator.patch().new_block(BasicBlockData { - statements: vec![ - Statement { - source_info: self.source_info, - kind: StatementKind::Assign(discr.clone(), discr_rv), - } - ], + let switch_block = BasicBlockData { + statements: vec![self.assign(&discr, discr_rv)], terminator: Some(Terminator { source_info: self.source_info, kind: TerminatorKind::SwitchInt { @@ -510,7 +505,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } }), is_cleanup: unwind.is_cleanup(), - }); + }; + let switch_block = self.elaborator.patch().new_block(switch_block); self.drop_flag_test_block(switch_block, succ, unwind) } @@ -531,14 +527,11 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let ref_lvalue = self.new_temp(ref_ty); let unit_temp = Lvalue::Local(self.new_temp(tcx.mk_nil())); - self.elaborator.patch().new_block(BasicBlockData { - statements: vec![Statement { - source_info: self.source_info, - kind: StatementKind::Assign( - Lvalue::Local(ref_lvalue), - Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, self.lvalue.clone()) - ) - }], + let result = BasicBlockData { + statements: vec![self.assign( + &Lvalue::Local(ref_lvalue), + Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, self.lvalue.clone()) + )], terminator: Some(Terminator { kind: TerminatorKind::Call { func: Operand::function_handle(tcx, drop_fn.def_id, substs, @@ -550,24 +543,33 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> source_info: self.source_info }), is_cleanup: unwind.is_cleanup(), - }) + }; + self.elaborator.patch().new_block(result) } /// create a loop that drops an array: + /// + /// /// loop-block: - /// can_go = index == len + /// can_go = cur == length_or_end /// if can_go then succ else drop-block /// drop-block: - /// ptr = &mut LV[index] - /// index = index + 1 + /// if ptr_based { + /// ptr = cur + /// cur = cur.offset(1) + /// } else { + /// ptr = &mut LV[cur] + /// cur = cur + 1 + /// } /// drop(ptr) fn drop_loop(&mut self, succ: BasicBlock, - index: &Lvalue<'tcx>, - length: &Lvalue<'tcx>, + cur: &Lvalue<'tcx>, + length_or_end: &Lvalue<'tcx>, ety: Ty<'tcx>, - unwind: Unwind) + unwind: Unwind, + ptr_based: bool) -> BasicBlock { let use_ = |lv: &Lvalue<'tcx>| Operand::Consume(lv.clone()); @@ -581,17 +583,21 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let can_go = &Lvalue::Local(self.new_temp(tcx.types.bool)); let one = self.constant_usize(1); - let drop_block = self.elaborator.patch().new_block(BasicBlockData { + let (ptr_next, cur_next) = if ptr_based { + (Rvalue::Use(use_(cur)), + Rvalue::BinaryOp(BinOp::Offset, use_(cur), one)) + } else { + (Rvalue::Ref( + tcx.types.re_erased, + BorrowKind::Mut, + self.lvalue.clone().index(use_(cur))), + Rvalue::BinaryOp(BinOp::Add, use_(cur), one)) + }; + + let drop_block = BasicBlockData { statements: vec![ - Statement { source_info: self.source_info, kind: StatementKind::Assign( - ptr.clone(), Rvalue::Ref( - tcx.types.re_erased, BorrowKind::Mut, - self.lvalue.clone().index(use_(index)) - ), - )}, - Statement { source_info: self.source_info, kind: StatementKind::Assign( - index.clone(), Rvalue::BinaryOp(BinOp::Add, use_(index), one) - )}, + self.assign(ptr, ptr_next), + self.assign(cur, cur_next) ], is_cleanup: unwind.is_cleanup(), terminator: Some(Terminator { @@ -599,20 +605,22 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> // this gets overwritten by drop elaboration. kind: TerminatorKind::Unreachable, }) - }); + }; + let drop_block = self.elaborator.patch().new_block(drop_block); - let loop_block = self.elaborator.patch().new_block(BasicBlockData { + let loop_block = BasicBlockData { statements: vec![ - Statement { source_info: self.source_info, kind: StatementKind::Assign( - can_go.clone(), Rvalue::BinaryOp(BinOp::Eq, use_(index), use_(length)) - )}, + self.assign(can_go, Rvalue::BinaryOp(BinOp::Eq, + use_(cur), + use_(length_or_end))) ], is_cleanup: unwind.is_cleanup(), terminator: Some(Terminator { source_info: self.source_info, kind: TerminatorKind::if_(tcx, use_(can_go), succ, drop_block) }) - }); + }; + let loop_block = self.elaborator.patch().new_block(loop_block); self.elaborator.patch().patch_terminator(drop_block, TerminatorKind::Drop { location: ptr.clone().deref(), @@ -625,29 +633,97 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn open_drop_for_array(&mut self, ety: Ty<'tcx>) -> BasicBlock { debug!("open_drop_for_array({:?})", ety); - // FIXME: using an index instead of a pointer to avoid - // special-casing ZSTs. + + // if size_of::() == 0 { + // index_based_loop + // } else { + // ptr_based_loop + // } + let tcx = self.tcx(); - let index = &Lvalue::Local(self.new_temp(tcx.types.usize)); - let length = &Lvalue::Local(self.new_temp(tcx.types.usize)); + + let use_ = |lv: &Lvalue<'tcx>| Operand::Consume(lv.clone()); + let size = &Lvalue::Local(self.new_temp(tcx.types.usize)); + let size_is_zero = &Lvalue::Local(self.new_temp(tcx.types.bool)); + let base_block = BasicBlockData { + statements: vec![ + self.assign(size, Rvalue::NullaryOp(NullOp::SizeOf, ety)), + self.assign(size_is_zero, Rvalue::BinaryOp(BinOp::Eq, + use_(size), + self.constant_usize(0))) + ], + is_cleanup: self.unwind.is_cleanup(), + terminator: Some(Terminator { + source_info: self.source_info, + kind: TerminatorKind::if_( + tcx, + use_(size_is_zero), + self.drop_loop_pair(ety, false), + self.drop_loop_pair(ety, true) + ) + }) + }; + self.elaborator.patch().new_block(base_block) + } + + // create a pair of drop-loops of `lvalue`, which drops its contents + // even in the case of 1 panic. If `ptr_based`, create a pointer loop, + // otherwise create an index loop. + fn drop_loop_pair(&mut self, ety: Ty<'tcx>, ptr_based: bool) -> BasicBlock { + debug!("drop_loop_pair({:?}, {:?})", ety, ptr_based); + let tcx = self.tcx(); + let iter_ty = if ptr_based { + tcx.mk_ptr(ty::TypeAndMut { ty: ety, mutbl: hir::Mutability::MutMutable }) + } else { + tcx.types.usize + }; + + let cur = Lvalue::Local(self.new_temp(iter_ty)); + let length = Lvalue::Local(self.new_temp(tcx.types.usize)); + let length_or_end = if ptr_based { + Lvalue::Local(self.new_temp(iter_ty)) + } else { + length.clone() + }; let unwind = self.unwind.map(|unwind| { - self.drop_loop(unwind, index, length, ety, Unwind::InCleanup) + self.drop_loop(unwind, + &cur, + &length_or_end, + ety, + Unwind::InCleanup, + ptr_based) }); let succ = self.succ; // FIXME(#6393) - let loop_block = self.drop_loop(succ, index, length, ety, unwind); + let loop_block = self.drop_loop( + succ, + &cur, + &length_or_end, + ety, + unwind, + ptr_based); let zero = self.constant_usize(0); + let mut drop_block_stmts = vec![]; + drop_block_stmts.push(self.assign(&length, Rvalue::Len(self.lvalue.clone()))); + if ptr_based { + // cur = &LV[0]; + // end = &LV[len]; + drop_block_stmts.push(self.assign(&cur, Rvalue::Ref( + tcx.types.re_erased, BorrowKind::Mut, + self.lvalue.clone().index(zero.clone()) + ))); + drop_block_stmts.push(self.assign(&length_or_end, Rvalue::Ref( + tcx.types.re_erased, BorrowKind::Mut, + self.lvalue.clone().index(Operand::Consume(length.clone())) + ))); + } else { + // index = 0 (length already pushed) + drop_block_stmts.push(self.assign(&cur, Rvalue::Use(zero))); + } let drop_block = self.elaborator.patch().new_block(BasicBlockData { - statements: vec![ - Statement { source_info: self.source_info, kind: StatementKind::Assign( - length.clone(), Rvalue::Len(self.lvalue.clone()) - )}, - Statement { source_info: self.source_info, kind: StatementKind::Assign( - index.clone(), Rvalue::Use(zero), - )}, - ], + statements: drop_block_stmts, is_cleanup: unwind.is_cleanup(), terminator: Some(Terminator { source_info: self.source_info, @@ -836,4 +912,11 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> literal: Literal::Value { value: ConstVal::Integral(self.tcx().const_usize(val)) } }) } + + fn assign(&self, lhs: &Lvalue<'tcx>, rhs: Rvalue<'tcx>) -> Statement<'tcx> { + Statement { + source_info: self.source_info, + kind: StatementKind::Assign(lhs.clone(), rhs) + } + } } From 6adfbaf2d38df2e39c94d07dd6563da92b732049 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Tue, 23 May 2017 22:01:51 +0300 Subject: [PATCH 10/11] increase macro recursion limit --- src/librustc/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index d1d9dd4853d..2a877aca53b 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -47,7 +47,7 @@ #![cfg_attr(stage0, feature(staged_api))] #![cfg_attr(stage0, feature(loop_break_value))] -#![recursion_limit="128"] +#![recursion_limit="192"] extern crate arena; extern crate core; From ee982d4355a10790993ab65f80730ba75395e140 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Tue, 23 May 2017 23:47:15 +0300 Subject: [PATCH 11/11] fix translation of MSVC funclets that loop to their own start --- src/librustc_data_structures/indexed_vec.rs | 7 ++ src/librustc_trans/common.rs | 9 -- src/librustc_trans/mir/analyze.rs | 10 +++ src/librustc_trans/mir/block.rs | 97 ++++++++++----------- src/librustc_trans/mir/mod.rs | 50 +++++++---- 5 files changed, 93 insertions(+), 80 deletions(-) diff --git a/src/librustc_data_structures/indexed_vec.rs b/src/librustc_data_structures/indexed_vec.rs index 0642ddc7162..29ac650aa70 100644 --- a/src/librustc_data_structures/indexed_vec.rs +++ b/src/librustc_data_structures/indexed_vec.rs @@ -212,6 +212,13 @@ impl IndexMut for IndexVec { } } +impl Default for IndexVec { + #[inline] + fn default() -> Self { + Self::new() + } +} + impl Extend for IndexVec { #[inline] fn extend>(&mut self, iter: J) { diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index efd4f136785..903c74edd1c 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -191,15 +191,6 @@ impl Funclet { } } -impl Clone for Funclet { - fn clone(&self) -> Funclet { - Funclet { - cleanuppad: self.cleanuppad, - operand: OperandBundleDef::new("funclet", &[self.cleanuppad]), - } - } -} - pub fn val_ty(v: ValueRef) -> Type { unsafe { Type::from_ref(llvm::LLVMTypeOf(v)) diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 0f95668302c..45afcf51b52 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -197,6 +197,16 @@ pub enum CleanupKind { Internal { funclet: mir::BasicBlock } } +impl CleanupKind { + pub fn funclet_bb(self, for_bb: mir::BasicBlock) -> Option { + match self { + CleanupKind::NotCleanup => None, + CleanupKind::Funclet => Some(for_bb), + CleanupKind::Internal { funclet } => Some(funclet), + } + } +} + pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { fn discover_masters<'tcx>(result: &mut IndexVec, mir: &mir::Mir<'tcx>) { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index f6c8ee0c825..4926485a121 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -19,8 +19,7 @@ use adt; use base::{self, Lifetime}; use callee; use builder::Builder; -use common::{self, Funclet}; -use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; +use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; use machine::llalign_of_min; use meth; @@ -28,95 +27,88 @@ use monomorphize; use type_of; use type_::Type; -use rustc_data_structures::indexed_vec::IndexVec; use syntax::symbol::Symbol; use std::cmp; use super::{MirContext, LocalRef}; -use super::analyze::CleanupKind; use super::constant::Const; use super::lvalue::{Alignment, LvalueRef}; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; impl<'a, 'tcx> MirContext<'a, 'tcx> { - pub fn trans_block(&mut self, bb: mir::BasicBlock, - funclets: &IndexVec>) { + pub fn trans_block(&mut self, bb: mir::BasicBlock) { let mut bcx = self.get_builder(bb); let data = &self.mir[bb]; debug!("trans_block({:?}={:?})", bb, data); - let funclet = match self.cleanup_kinds[bb] { - CleanupKind::Internal { funclet } => funclets[funclet].as_ref(), - _ => funclets[bb].as_ref(), - }; - for statement in &data.statements { bcx = self.trans_statement(bcx, statement); } - self.trans_terminator(bcx, bb, data.terminator(), funclet); + self.trans_terminator(bcx, bb, data.terminator()); } fn trans_terminator(&mut self, mut bcx: Builder<'a, 'tcx>, bb: mir::BasicBlock, - terminator: &mir::Terminator<'tcx>, - funclet: Option<&Funclet>) + terminator: &mir::Terminator<'tcx>) { debug!("trans_terminator: {:?}", terminator); // Create the cleanup bundle, if needed. let tcx = bcx.tcx(); + let span = terminator.source_info.span; + let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb); + let funclet = funclet_bb.and_then(|funclet_bb| self.funclets[funclet_bb].as_ref()); + let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); let cleanup_bundle = funclet.map(|l| l.bundle()); - let funclet_br = |this: &Self, bcx: Builder, bb: mir::BasicBlock| { - let lltarget = this.blocks[bb]; - if let Some(cp) = cleanup_pad { - match this.cleanup_kinds[bb] { - CleanupKind::Funclet => { - // micro-optimization: generate a `ret` rather than a jump - // to a return block - bcx.cleanup_ret(cp, Some(lltarget)); - } - CleanupKind::Internal { .. } => bcx.br(lltarget), - CleanupKind::NotCleanup => bug!("jump from cleanup bb to bb {:?}", bb) + let lltarget = |this: &mut Self, target: mir::BasicBlock| { + let lltarget = this.blocks[target]; + let target_funclet = this.cleanup_kinds[target].funclet_bb(target); + match (funclet_bb, target_funclet) { + (None, None) => (lltarget, false), + (Some(f), Some(t_f)) + if f == t_f || !base::wants_msvc_seh(tcx.sess) + => (lltarget, false), + (None, Some(_)) => { + // jump *into* cleanup - need a landing pad if GNU + (this.landing_pad_to(target), false) + } + (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", terminator), + (Some(_), Some(_)) => { + (this.landing_pad_to(target), true) } - } else { - bcx.br(lltarget); } }; let llblock = |this: &mut Self, target: mir::BasicBlock| { - let lltarget = this.blocks[target]; + let (lltarget, is_cleanupret) = lltarget(this, target); + if is_cleanupret { + // MSVC cross-funclet jump - need a trampoline - if let Some(cp) = cleanup_pad { - match this.cleanup_kinds[target] { - CleanupKind::Funclet => { - // MSVC cross-funclet jump - need a trampoline - - debug!("llblock: creating cleanup trampoline for {:?}", target); - let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.new_block(name); - trampoline.cleanup_ret(cp, Some(lltarget)); - trampoline.llbb() - } - CleanupKind::Internal { .. } => lltarget, - CleanupKind::NotCleanup => - bug!("jump from cleanup bb {:?} to bb {:?}", bb, target) - } + debug!("llblock: creating cleanup trampoline for {:?}", target); + let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); + let trampoline = this.new_block(name); + trampoline.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); + trampoline.llbb() } else { - if let (CleanupKind::NotCleanup, CleanupKind::Funclet) = - (this.cleanup_kinds[bb], this.cleanup_kinds[target]) - { - // jump *into* cleanup - need a landing pad if GNU - this.landing_pad_to(target) - } else { - lltarget - } + lltarget + } + }; + + let funclet_br = |this: &mut Self, bcx: Builder, target: mir::BasicBlock| { + let (lltarget, is_cleanupret) = lltarget(this, target); + if is_cleanupret { + // micro-optimization: generate a `ret` rather than a jump + // to a trampoline. + bcx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); + } else { + bcx.br(lltarget); } }; @@ -168,7 +160,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } }; - let span = terminator.source_info.span; self.set_debug_loc(&bcx, terminator.source_info); match terminator.kind { mir::TerminatorKind::Resume => { @@ -752,7 +743,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn landing_pad_uncached(&mut self, target_bb: BasicBlockRef) -> BasicBlockRef { if base::wants_msvc_seh(self.ccx.sess()) { - return target_bb; + span_bug!(self.mir.span, "landing pad was not inserted?") } let bcx = self.new_block("cleanup"); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 19a556bf3f0..c54dfb375c0 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -69,6 +69,10 @@ pub struct MirContext<'a, 'tcx:'a> { /// The funclet status of each basic block cleanup_kinds: IndexVec, + /// When targeting MSVC, this stores the cleanup info for each funclet + /// BB. This is initialized as we compute the funclets' head block in RPO. + funclets: &'a IndexVec>, + /// This stores the landing-pad block for a given BB, computed lazily on GNU /// and eagerly on MSVC. landing_pads: IndexVec>, @@ -202,8 +206,11 @@ pub fn trans_mir<'a, 'tcx: 'a>( debuginfo::create_function_debug_context(ccx, instance, sig, llfn, mir); let bcx = Builder::new_block(ccx, llfn, "start"); - let cleanup_kinds = analyze::cleanup_kinds(&mir); + if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) { + bcx.set_personality_fn(ccx.eh_personality()); + } + let cleanup_kinds = analyze::cleanup_kinds(&mir); // Allocate a `Block` for every basic block, except // the start block, if nothing loops back to it. let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty(); @@ -218,6 +225,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( // Compute debuginfo scopes from MIR scopes. let scopes = debuginfo::create_mir_scopes(ccx, mir, &debug_context); + let (landing_pads, funclets) = create_funclets(&bcx, &cleanup_kinds, &block_bcxs); let mut mircx = MirContext { mir: mir, @@ -228,7 +236,8 @@ pub fn trans_mir<'a, 'tcx: 'a>( blocks: block_bcxs, unreachable_block: None, cleanup_kinds: cleanup_kinds, - landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), + landing_pads: landing_pads, + funclets: &funclets, scopes: scopes, locals: IndexVec::new(), debug_context: debug_context, @@ -306,28 +315,13 @@ pub fn trans_mir<'a, 'tcx: 'a>( // emitting should be enabled. debuginfo::start_emitting_source_locations(&mircx.debug_context); - let funclets: IndexVec> = - mircx.cleanup_kinds.iter_enumerated().map(|(bb, cleanup_kind)| { - if let CleanupKind::Funclet = *cleanup_kind { - let bcx = mircx.get_builder(bb); - unsafe { - llvm::LLVMSetPersonalityFn(mircx.llfn, mircx.ccx.eh_personality()); - } - if base::wants_msvc_seh(ccx.sess()) { - return Some(Funclet::new(bcx.cleanup_pad(None, &[]))); - } - } - - None - }).collect(); - let rpo = traversal::reverse_postorder(&mir); let mut visited = BitVector::new(mir.basic_blocks().len()); // Translate the body of each block using reverse postorder for (bb, _) in rpo { visited.insert(bb.index()); - mircx.trans_block(bb, &funclets); + mircx.trans_block(bb); } // Remove blocks that haven't been visited, or have no @@ -343,6 +337,26 @@ pub fn trans_mir<'a, 'tcx: 'a>( } } +fn create_funclets<'a, 'tcx>( + bcx: &Builder<'a, 'tcx>, + cleanup_kinds: &IndexVec, + block_bcxs: &IndexVec) + -> (IndexVec>, + IndexVec>) +{ + block_bcxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| { + match *cleanup_kind { + CleanupKind::Funclet if base::wants_msvc_seh(bcx.sess()) => { + let cleanup_bcx = bcx.build_sibling_block(&format!("funclet_{:?}", bb)); + let cleanup = cleanup_bcx.cleanup_pad(None, &[]); + cleanup_bcx.br(llbb); + (Some(cleanup_bcx.llbb()), Some(Funclet::new(cleanup))) + } + _ => (None, None) + } + }).unzip() +} + /// Produce, for each argument, a `ValueRef` pointing at the /// argument's value. As arguments are lvalues, these are always /// indirect.