mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-25 08:13:41 +00:00
Auto merge of #38302 - Mark-Simulacrum:trans-cleanup, r=eddyb
Cleanup old trans This is a cleanup of old trans, with the following main points: - Remove the `build.rs` API (prefer using `Builder` directly, which is now passed where needed through `BlockAndBuilder`). - Remove `Block` (inlining it into `BlockAndBuilder`) - Remove `Callee::call`, primarily through inlining and simplification of code. - Thinned `FunctionContext`: - `mir`, `debug_scopes`, `scopes`, and `fn_ty` are moved to `MirContext`. - `param_env` is moved to `SharedCrateContext` and renamed to `empty_param_env`. - `llretslotptr` is removed, replaced with more careful management of the return values in calls. - `landingpad_alloca` is inlined into cleanup. - `param_substs` are moved to `MirContext`. - `span` is removed, it was never set to anything but `None`. - `block_arena` and `lpad_arena` are removed, since neither was necessary (landing pads and block are quite small, and neither needs arena allocation). - Fixed `drop_in_place` not running other destructors in the same function. Fixes #35566 (thanks to @est31 for confirming).
This commit is contained in:
commit
1b38776c1f
@ -127,6 +127,7 @@ pub fn usable_size(size: usize, align: usize) -> usize {
|
||||
pub const EMPTY: *mut () = 0x1 as *mut ();
|
||||
|
||||
/// The allocator for unique pointers.
|
||||
// This function must not unwind. If it does, MIR trans will fail.
|
||||
#[cfg(not(test))]
|
||||
#[lang = "exchange_malloc"]
|
||||
#[inline]
|
||||
|
@ -710,6 +710,7 @@ extern "C" {
|
||||
|
||||
// Operations on instructions
|
||||
pub fn LLVMGetInstructionParent(Inst: ValueRef) -> BasicBlockRef;
|
||||
pub fn LLVMGetFirstBasicBlock(Fn: ValueRef) -> BasicBlockRef;
|
||||
pub fn LLVMGetFirstInstruction(BB: BasicBlockRef) -> ValueRef;
|
||||
pub fn LLVMInstructionEraseFromParent(Inst: ValueRef);
|
||||
|
||||
|
@ -10,7 +10,6 @@
|
||||
|
||||
use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace};
|
||||
use base;
|
||||
use build::AllocaFcx;
|
||||
use common::{type_is_fat_ptr, BlockAndBuilder, C_uint};
|
||||
use context::CrateContext;
|
||||
use cabi_x86;
|
||||
@ -99,21 +98,11 @@ impl ArgAttributes {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn unset(&mut self, attr: ArgAttribute) -> &mut Self {
|
||||
self.regular = self.regular - attr;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self {
|
||||
self.dereferenceable_bytes = bytes;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn unset_dereferenceable(&mut self) -> &mut Self {
|
||||
self.dereferenceable_bytes = 0;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
|
||||
unsafe {
|
||||
self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
|
||||
@ -246,7 +235,7 @@ impl ArgType {
|
||||
if self.is_ignore() {
|
||||
return;
|
||||
}
|
||||
let ccx = bcx.ccx();
|
||||
let ccx = bcx.ccx;
|
||||
if self.is_indirect() {
|
||||
let llsz = llsize_of(ccx, self.ty);
|
||||
let llalign = llalign_of_min(ccx, self.ty);
|
||||
@ -278,7 +267,7 @@ impl ArgType {
|
||||
// bitcasting to the struct type yields invalid cast errors.
|
||||
|
||||
// We instead thus allocate some scratch space...
|
||||
let llscratch = AllocaFcx(bcx.fcx(), ty, "abi_cast");
|
||||
let llscratch = bcx.fcx().alloca(ty, "abi_cast");
|
||||
base::Lifetime::Start.call(bcx, llscratch);
|
||||
|
||||
// ...where we first store the value...
|
||||
@ -431,7 +420,7 @@ impl FnType {
|
||||
let ret_ty = sig.output();
|
||||
let mut ret = arg_of(ret_ty, true);
|
||||
|
||||
if !type_is_fat_ptr(ccx.tcx(), ret_ty) {
|
||||
if !type_is_fat_ptr(ccx, ret_ty) {
|
||||
// The `noalias` attribute on the return value is useful to a
|
||||
// function ptr caller.
|
||||
if let ty::TyBox(_) = ret_ty.sty {
|
||||
@ -496,7 +485,7 @@ impl FnType {
|
||||
for ty in inputs.iter().chain(extra_args.iter()) {
|
||||
let mut arg = arg_of(ty, false);
|
||||
|
||||
if type_is_fat_ptr(ccx.tcx(), ty) {
|
||||
if type_is_fat_ptr(ccx, ty) {
|
||||
let original_tys = arg.original_ty.field_types();
|
||||
let sizing_tys = arg.ty.field_types();
|
||||
assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2));
|
||||
@ -569,7 +558,7 @@ impl FnType {
|
||||
};
|
||||
// Fat pointers are returned by-value.
|
||||
if !self.ret.is_ignore() {
|
||||
if !type_is_fat_ptr(ccx.tcx(), sig.output()) {
|
||||
if !type_is_fat_ptr(ccx, sig.output()) {
|
||||
fixup(&mut self.ret);
|
||||
}
|
||||
}
|
||||
|
@ -48,9 +48,7 @@ use std;
|
||||
use llvm::{ValueRef, True, IntEQ, IntNE};
|
||||
use rustc::ty::layout;
|
||||
use rustc::ty::{self, Ty, AdtKind};
|
||||
use build::*;
|
||||
use common::*;
|
||||
use debuginfo::DebugLoc;
|
||||
use glue;
|
||||
use base;
|
||||
use machine;
|
||||
@ -295,7 +293,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>
|
||||
sizing: bool, dst: bool) -> Vec<Type> {
|
||||
let fields = variant.field_index_by_increasing_offset().map(|i| fields[i as usize]);
|
||||
if sizing {
|
||||
fields.filter(|ty| !dst || type_is_sized(cx.tcx(), *ty))
|
||||
fields.filter(|ty| !dst || cx.shared().type_is_sized(*ty))
|
||||
.map(|ty| type_of::sizing_type_of(cx, ty)).collect()
|
||||
} else {
|
||||
fields.map(|ty| type_of::in_memory_type_of(cx, ty)).collect()
|
||||
@ -304,12 +302,13 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>
|
||||
|
||||
/// Obtain a representation of the discriminant sufficient to translate
|
||||
/// destructuring; this may or may not involve the actual discriminant.
|
||||
pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
t: Ty<'tcx>,
|
||||
scrutinee: ValueRef,
|
||||
range_assert: bool)
|
||||
-> (BranchKind, Option<ValueRef>) {
|
||||
let l = bcx.ccx().layout_of(t);
|
||||
pub fn trans_switch<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
t: Ty<'tcx>,
|
||||
scrutinee: ValueRef,
|
||||
range_assert: bool
|
||||
) -> (BranchKind, Option<ValueRef>) {
|
||||
let l = bcx.ccx.layout_of(t);
|
||||
match *l {
|
||||
layout::CEnum { .. } | layout::General { .. } |
|
||||
layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => {
|
||||
@ -331,34 +330,37 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
|
||||
}
|
||||
|
||||
/// Obtain the actual discriminant of a value.
|
||||
pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
|
||||
scrutinee: ValueRef, cast_to: Option<Type>,
|
||||
range_assert: bool)
|
||||
-> ValueRef {
|
||||
pub fn trans_get_discr<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
t: Ty<'tcx>,
|
||||
scrutinee: ValueRef,
|
||||
cast_to: Option<Type>,
|
||||
range_assert: bool
|
||||
) -> ValueRef {
|
||||
let (def, substs) = match t.sty {
|
||||
ty::TyAdt(ref def, substs) if def.adt_kind() == AdtKind::Enum => (def, substs),
|
||||
_ => bug!("{} is not an enum", t)
|
||||
};
|
||||
|
||||
debug!("trans_get_discr t: {:?}", t);
|
||||
let l = bcx.ccx().layout_of(t);
|
||||
let l = bcx.ccx.layout_of(t);
|
||||
|
||||
let val = match *l {
|
||||
layout::CEnum { discr, min, max, .. } => {
|
||||
load_discr(bcx, discr, scrutinee, min, max, range_assert)
|
||||
}
|
||||
layout::General { discr, .. } => {
|
||||
let ptr = StructGEP(bcx, scrutinee, 0);
|
||||
let ptr = bcx.struct_gep(scrutinee, 0);
|
||||
load_discr(bcx, discr, ptr, 0, def.variants.len() as u64 - 1,
|
||||
range_assert)
|
||||
}
|
||||
layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx(), 0),
|
||||
layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0),
|
||||
layout::RawNullablePointer { nndiscr, .. } => {
|
||||
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
|
||||
let llptrty = type_of::sizing_type_of(bcx.ccx(),
|
||||
monomorphize::field_ty(bcx.ccx().tcx(), substs,
|
||||
let llptrty = type_of::sizing_type_of(bcx.ccx,
|
||||
monomorphize::field_ty(bcx.ccx.tcx(), substs,
|
||||
&def.variants[nndiscr as usize].fields[0]));
|
||||
ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None)
|
||||
bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty))
|
||||
}
|
||||
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
|
||||
struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee)
|
||||
@ -367,24 +369,28 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
|
||||
};
|
||||
match cast_to {
|
||||
None => val,
|
||||
Some(llty) => if is_discr_signed(&l) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) }
|
||||
Some(llty) => if is_discr_signed(&l) { bcx.sext(val, llty) } else { bcx.zext(val, llty) }
|
||||
}
|
||||
}
|
||||
|
||||
fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layout::FieldPath,
|
||||
scrutinee: ValueRef) -> ValueRef {
|
||||
let llptrptr = GEPi(bcx, scrutinee,
|
||||
fn struct_wrapped_nullable_bitdiscr(
|
||||
bcx: &BlockAndBuilder,
|
||||
nndiscr: u64,
|
||||
discrfield: &layout::FieldPath,
|
||||
scrutinee: ValueRef
|
||||
) -> ValueRef {
|
||||
let llptrptr = bcx.gepi(scrutinee,
|
||||
&discrfield.iter().map(|f| *f as usize).collect::<Vec<_>>()[..]);
|
||||
let llptr = Load(bcx, llptrptr);
|
||||
let llptr = bcx.load(llptrptr);
|
||||
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
|
||||
ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None)
|
||||
bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
|
||||
}
|
||||
|
||||
/// Helper for cases where the discriminant is simply loaded.
|
||||
fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64,
|
||||
fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64,
|
||||
range_assert: bool)
|
||||
-> ValueRef {
|
||||
let llty = Type::from_integer(bcx.ccx(), ity);
|
||||
let llty = Type::from_integer(bcx.ccx, ity);
|
||||
assert_eq!(val_ty(ptr), llty.ptr_to());
|
||||
let bits = ity.size().bits();
|
||||
assert!(bits <= 64);
|
||||
@ -397,11 +403,11 @@ fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u6
|
||||
// rejected by the LLVM verifier (it would mean either an
|
||||
// empty set, which is impossible, or the entire range of the
|
||||
// type, which is pointless).
|
||||
Load(bcx, ptr)
|
||||
bcx.load(ptr)
|
||||
} else {
|
||||
// llvm::ConstantRange can deal with ranges that wrap around,
|
||||
// so an overflow on (max + 1) is fine.
|
||||
LoadRangeAssert(bcx, ptr, min, max.wrapping_add(1), /* signed: */ True)
|
||||
bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True)
|
||||
}
|
||||
}
|
||||
|
||||
@ -409,18 +415,17 @@ fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u6
|
||||
/// discriminant-like value returned by `trans_switch`.
|
||||
///
|
||||
/// This should ideally be less tightly tied to `_match`.
|
||||
pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr)
|
||||
-> ValueRef {
|
||||
let l = bcx.ccx().layout_of(t);
|
||||
pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef {
|
||||
let l = bcx.ccx.layout_of(t);
|
||||
match *l {
|
||||
layout::CEnum { discr, .. }
|
||||
| layout::General { discr, .. }=> {
|
||||
C_integral(Type::from_integer(bcx.ccx(), discr), value.0, true)
|
||||
C_integral(Type::from_integer(bcx.ccx, discr), value.0, true)
|
||||
}
|
||||
layout::RawNullablePointer { .. } |
|
||||
layout::StructWrappedNullablePointer { .. } => {
|
||||
assert!(value == Disr(0) || value == Disr(1));
|
||||
C_bool(bcx.ccx(), value != Disr(0))
|
||||
C_bool(bcx.ccx, value != Disr(0))
|
||||
}
|
||||
_ => {
|
||||
bug!("{} does not have a discriminant. Represented as {:#?}", t, l);
|
||||
@ -430,18 +435,19 @@ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr)
|
||||
|
||||
/// Set the discriminant for a new value of the given case of the given
|
||||
/// representation.
|
||||
pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
|
||||
val: ValueRef, to: Disr) {
|
||||
let l = bcx.ccx().layout_of(t);
|
||||
pub fn trans_set_discr<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr
|
||||
) {
|
||||
let l = bcx.ccx.layout_of(t);
|
||||
match *l {
|
||||
layout::CEnum{ discr, min, max, .. } => {
|
||||
assert_discr_in_range(Disr(min), Disr(max), to);
|
||||
Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true),
|
||||
bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true),
|
||||
val);
|
||||
}
|
||||
layout::General{ discr, .. } => {
|
||||
Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true),
|
||||
StructGEP(bcx, val, 0));
|
||||
bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true),
|
||||
bcx.struct_gep(val, 0));
|
||||
}
|
||||
layout::Univariant { .. }
|
||||
| layout::UntaggedUnion { .. }
|
||||
@ -449,10 +455,10 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
|
||||
assert_eq!(to, Disr(0));
|
||||
}
|
||||
layout::RawNullablePointer { nndiscr, .. } => {
|
||||
let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0];
|
||||
let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
|
||||
if to.0 != nndiscr {
|
||||
let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
|
||||
Store(bcx, C_null(llptrty), val);
|
||||
let llptrty = type_of::sizing_type_of(bcx.ccx, nnty);
|
||||
bcx.store(C_null(llptrty), val);
|
||||
}
|
||||
}
|
||||
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => {
|
||||
@ -461,17 +467,16 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
|
||||
// Issue #34427: As workaround for LLVM bug on
|
||||
// ARM, use memset of 0 on whole struct rather
|
||||
// than storing null to single target field.
|
||||
let b = B(bcx);
|
||||
let llptr = b.pointercast(val, Type::i8(b.ccx).ptr_to());
|
||||
let fill_byte = C_u8(b.ccx, 0);
|
||||
let size = C_uint(b.ccx, nonnull.stride().bytes());
|
||||
let align = C_i32(b.ccx, nonnull.align.abi() as i32);
|
||||
base::call_memset(&b, llptr, fill_byte, size, align, false);
|
||||
let llptr = bcx.pointercast(val, Type::i8(bcx.ccx).ptr_to());
|
||||
let fill_byte = C_u8(bcx.ccx, 0);
|
||||
let size = C_uint(bcx.ccx, nonnull.stride().bytes());
|
||||
let align = C_i32(bcx.ccx, nonnull.align.abi() as i32);
|
||||
base::call_memset(bcx, llptr, fill_byte, size, align, false);
|
||||
} else {
|
||||
let path = discrfield.iter().map(|&i| i as usize).collect::<Vec<_>>();
|
||||
let llptrptr = GEPi(bcx, val, &path[..]);
|
||||
let llptrptr = bcx.gepi(val, &path[..]);
|
||||
let llptrty = val_ty(llptrptr).element_type();
|
||||
Store(bcx, C_null(llptrty), llptrptr);
|
||||
bcx.store(C_null(llptrty), llptrptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -479,7 +484,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
|
||||
}
|
||||
}
|
||||
|
||||
fn target_sets_discr_via_memset<'blk, 'tcx>(bcx: Block<'blk, 'tcx>) -> bool {
|
||||
fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>) -> bool {
|
||||
bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64"
|
||||
}
|
||||
|
||||
@ -492,19 +497,15 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) {
|
||||
}
|
||||
|
||||
/// Access a field, at a point when the value's case is known.
|
||||
pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
|
||||
val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef {
|
||||
trans_field_ptr_builder(&bcx.build(), t, val, discr, ix)
|
||||
}
|
||||
|
||||
/// Access a field, at a point when the value's case is known.
|
||||
pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
t: Ty<'tcx>,
|
||||
val: MaybeSizedValue,
|
||||
discr: Disr, ix: usize)
|
||||
-> ValueRef {
|
||||
let l = bcx.ccx().layout_of(t);
|
||||
debug!("trans_field_ptr_builder on {} represented as {:#?}", t, l);
|
||||
pub fn trans_field_ptr<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
t: Ty<'tcx>,
|
||||
val: MaybeSizedValue,
|
||||
discr: Disr,
|
||||
ix: usize
|
||||
) -> ValueRef {
|
||||
let l = bcx.ccx.layout_of(t);
|
||||
debug!("trans_field_ptr on {} represented as {:#?}", t, l);
|
||||
// Note: if this ever needs to generate conditionals (e.g., if we
|
||||
// decide to do some kind of cdr-coding-like non-unique repr
|
||||
// someday), it will need to return a possibly-new bcx as well.
|
||||
@ -512,7 +513,7 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
layout::Univariant { ref variant, .. } => {
|
||||
assert_eq!(discr, Disr(0));
|
||||
struct_field_ptr(bcx, &variant,
|
||||
&compute_fields(bcx.ccx(), t, 0, false),
|
||||
&compute_fields(bcx.ccx, t, 0, false),
|
||||
val, ix, false)
|
||||
}
|
||||
layout::Vector { count, .. } => {
|
||||
@ -521,57 +522,53 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
bcx.struct_gep(val.value, ix)
|
||||
}
|
||||
layout::General { discr: d, ref variants, .. } => {
|
||||
let mut fields = compute_fields(bcx.ccx(), t, discr.0 as usize, false);
|
||||
fields.insert(0, d.to_ty(&bcx.ccx().tcx(), false));
|
||||
let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false);
|
||||
fields.insert(0, d.to_ty(&bcx.ccx.tcx(), false));
|
||||
struct_field_ptr(bcx, &variants[discr.0 as usize],
|
||||
&fields,
|
||||
val, ix + 1, true)
|
||||
}
|
||||
layout::UntaggedUnion { .. } => {
|
||||
let fields = compute_fields(bcx.ccx(), t, 0, false);
|
||||
let ty = type_of::in_memory_type_of(bcx.ccx(), fields[ix]);
|
||||
if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
|
||||
let fields = compute_fields(bcx.ccx, t, 0, false);
|
||||
let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]);
|
||||
bcx.pointercast(val.value, ty.ptr_to())
|
||||
}
|
||||
layout::RawNullablePointer { nndiscr, .. } |
|
||||
layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => {
|
||||
let nullfields = compute_fields(bcx.ccx(), t, (1-nndiscr) as usize, false);
|
||||
let nullfields = compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false);
|
||||
// The unit-like case might have a nonzero number of unit-like fields.
|
||||
// (e.d., Result of Either with (), as one side.)
|
||||
let ty = type_of::type_of(bcx.ccx(), nullfields[ix]);
|
||||
assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0);
|
||||
// The contents of memory at this pointer can't matter, but use
|
||||
// the value that's "reasonable" in case of pointer comparison.
|
||||
if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
|
||||
let ty = type_of::type_of(bcx.ccx, nullfields[ix]);
|
||||
assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0);
|
||||
bcx.pointercast(val.value, ty.ptr_to())
|
||||
}
|
||||
layout::RawNullablePointer { nndiscr, .. } => {
|
||||
let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0];
|
||||
let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
|
||||
assert_eq!(ix, 0);
|
||||
assert_eq!(discr.0, nndiscr);
|
||||
let ty = type_of::type_of(bcx.ccx(), nnty);
|
||||
if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
|
||||
let ty = type_of::type_of(bcx.ccx, nnty);
|
||||
bcx.pointercast(val.value, ty.ptr_to())
|
||||
}
|
||||
layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
|
||||
assert_eq!(discr.0, nndiscr);
|
||||
struct_field_ptr(bcx, &nonnull,
|
||||
&compute_fields(bcx.ccx(), t, discr.0 as usize, false),
|
||||
&compute_fields(bcx.ccx, t, discr.0 as usize, false),
|
||||
val, ix, false)
|
||||
}
|
||||
_ => bug!("element access in type without elements: {} represented as {:#?}", t, l)
|
||||
}
|
||||
}
|
||||
|
||||
fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
st: &layout::Struct, fields: &Vec<Ty<'tcx>>, val: MaybeSizedValue,
|
||||
ix: usize, needs_cast: bool) -> ValueRef {
|
||||
fn struct_field_ptr<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
st: &layout::Struct,
|
||||
fields: &Vec<Ty<'tcx>>,
|
||||
val: MaybeSizedValue,
|
||||
ix: usize,
|
||||
needs_cast: bool
|
||||
) -> ValueRef {
|
||||
let fty = fields[ix];
|
||||
let ccx = bcx.ccx();
|
||||
let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty);
|
||||
if bcx.is_unreachable() {
|
||||
return C_undef(ll_fty.ptr_to());
|
||||
}
|
||||
let ccx = bcx.ccx;
|
||||
|
||||
let ptr_val = if needs_cast {
|
||||
let fields = st.field_index_by_increasing_offset().map(|i| {
|
||||
@ -587,7 +584,8 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
// * First field - Always aligned properly
|
||||
// * Packed struct - There is no alignment padding
|
||||
// * Field is sized - pointer is properly aligned already
|
||||
if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || type_is_sized(bcx.tcx(), fty) {
|
||||
if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
|
||||
bcx.ccx.shared().type_is_sized(fty) {
|
||||
return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize);
|
||||
}
|
||||
|
||||
@ -607,8 +605,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
return bcx.struct_gep(ptr_val, ix);
|
||||
}
|
||||
|
||||
let dbloc = DebugLoc::None;
|
||||
|
||||
// We need to get the pointer manually now.
|
||||
// We do this by casting to a *i8, then offsetting it by the appropriate amount.
|
||||
// We do this instead of, say, simply adjusting the pointer from the result of a GEP
|
||||
@ -628,7 +624,7 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
|
||||
|
||||
let offset = st.offsets[ix].bytes();
|
||||
let unaligned_offset = C_uint(bcx.ccx(), offset);
|
||||
let unaligned_offset = C_uint(bcx.ccx, offset);
|
||||
|
||||
// Get the alignment of the field
|
||||
let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta);
|
||||
@ -639,19 +635,18 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
// (unaligned offset + (align - 1)) & -align
|
||||
|
||||
// Calculate offset
|
||||
dbloc.apply(bcx.fcx());
|
||||
let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx(), 1u64));
|
||||
let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64));
|
||||
let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
|
||||
bcx.neg(align));
|
||||
|
||||
debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
|
||||
|
||||
// Cast and adjust pointer
|
||||
let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx()));
|
||||
let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx));
|
||||
let byte_ptr = bcx.gep(byte_ptr, &[offset]);
|
||||
|
||||
// Finally, cast back to the type expected
|
||||
let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty);
|
||||
let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty);
|
||||
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
|
||||
bcx.pointercast(byte_ptr, ll_fty.ptr_to())
|
||||
}
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
use llvm::{self, ValueRef};
|
||||
use base;
|
||||
use build::*;
|
||||
use common::*;
|
||||
use type_of;
|
||||
use type_::Type;
|
||||
@ -25,10 +24,12 @@ use syntax::ast::AsmDialect;
|
||||
use libc::{c_uint, c_char};
|
||||
|
||||
// Take an inline assembly expression and splat it out via LLVM
|
||||
pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
ia: &hir::InlineAsm,
|
||||
outputs: Vec<(ValueRef, Ty<'tcx>)>,
|
||||
mut inputs: Vec<ValueRef>) {
|
||||
pub fn trans_inline_asm<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
ia: &hir::InlineAsm,
|
||||
outputs: Vec<(ValueRef, Ty<'tcx>)>,
|
||||
mut inputs: Vec<ValueRef>
|
||||
) {
|
||||
let mut ext_constraints = vec![];
|
||||
let mut output_types = vec![];
|
||||
|
||||
@ -47,7 +48,7 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
if out.is_indirect {
|
||||
indirect_outputs.push(val.unwrap());
|
||||
} else {
|
||||
output_types.push(type_of::type_of(bcx.ccx(), ty));
|
||||
output_types.push(type_of::type_of(bcx.ccx, ty));
|
||||
}
|
||||
}
|
||||
if !indirect_outputs.is_empty() {
|
||||
@ -78,9 +79,9 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
// Depending on how many outputs we have, the return type is different
|
||||
let num_outputs = output_types.len();
|
||||
let output_type = match num_outputs {
|
||||
0 => Type::void(bcx.ccx()),
|
||||
0 => Type::void(bcx.ccx),
|
||||
1 => output_types[0],
|
||||
_ => Type::struct_(bcx.ccx(), &output_types[..], false)
|
||||
_ => Type::struct_(bcx.ccx, &output_types[..], false)
|
||||
};
|
||||
|
||||
let dialect = match ia.dialect {
|
||||
@ -90,32 +91,33 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
|
||||
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
|
||||
let constraint_cstr = CString::new(all_constraints).unwrap();
|
||||
let r = InlineAsmCall(bcx,
|
||||
asm.as_ptr(),
|
||||
constraint_cstr.as_ptr(),
|
||||
&inputs,
|
||||
output_type,
|
||||
ia.volatile,
|
||||
ia.alignstack,
|
||||
dialect);
|
||||
let r = bcx.inline_asm_call(
|
||||
asm.as_ptr(),
|
||||
constraint_cstr.as_ptr(),
|
||||
&inputs,
|
||||
output_type,
|
||||
ia.volatile,
|
||||
ia.alignstack,
|
||||
dialect
|
||||
);
|
||||
|
||||
// Again, based on how many outputs we have
|
||||
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
|
||||
for (i, (_, &(val, _))) in outputs.enumerate() {
|
||||
let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) };
|
||||
Store(bcx, v, val);
|
||||
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) };
|
||||
bcx.store(v, val);
|
||||
}
|
||||
|
||||
// Store expn_id in a metadata node so we can map LLVM errors
|
||||
// back to source locations. See #17552.
|
||||
unsafe {
|
||||
let key = "srcloc";
|
||||
let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(),
|
||||
let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx.llcx(),
|
||||
key.as_ptr() as *const c_char, key.len() as c_uint);
|
||||
|
||||
let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.into_u32() as i32);
|
||||
let val: llvm::ValueRef = C_i32(bcx.ccx, ia.expn_id.into_u32() as i32);
|
||||
|
||||
llvm::LLVMSetMetadata(r, kind,
|
||||
llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1));
|
||||
llvm::LLVMMDNodeInContext(bcx.ccx.llcx(), &val, 1));
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,58 +0,0 @@
|
||||
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use llvm;
|
||||
use llvm::BasicBlockRef;
|
||||
use value::{Users, Value};
|
||||
use std::iter::{Filter, Map};
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct BasicBlock(pub BasicBlockRef);
|
||||
|
||||
pub type Preds = Map<Filter<Users, fn(&Value) -> bool>, fn(Value) -> BasicBlock>;
|
||||
|
||||
/// Wrapper for LLVM BasicBlockRef
|
||||
impl BasicBlock {
|
||||
pub fn get(&self) -> BasicBlockRef {
|
||||
let BasicBlock(v) = *self; v
|
||||
}
|
||||
|
||||
pub fn as_value(self) -> Value {
|
||||
unsafe {
|
||||
Value(llvm::LLVMBasicBlockAsValue(self.get()))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pred_iter(self) -> Preds {
|
||||
fn is_a_terminator_inst(user: &Value) -> bool { user.is_a_terminator_inst() }
|
||||
let is_a_terminator_inst: fn(&Value) -> bool = is_a_terminator_inst;
|
||||
|
||||
fn get_parent(user: Value) -> BasicBlock { user.get_parent().unwrap() }
|
||||
let get_parent: fn(Value) -> BasicBlock = get_parent;
|
||||
|
||||
self.as_value().user_iter()
|
||||
.filter(is_a_terminator_inst)
|
||||
.map(get_parent)
|
||||
}
|
||||
|
||||
pub fn get_single_predecessor(self) -> Option<BasicBlock> {
|
||||
let mut iter = self.pred_iter();
|
||||
match (iter.next(), iter.next()) {
|
||||
(Some(first), None) => Some(first),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self) {
|
||||
unsafe {
|
||||
llvm::LLVMDeleteBasicBlock(self.0);
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -14,12 +14,10 @@ use llvm;
|
||||
use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
|
||||
use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef};
|
||||
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef};
|
||||
use base;
|
||||
use common::*;
|
||||
use machine::llalign_of_pref;
|
||||
use type_::Type;
|
||||
use value::Value;
|
||||
use util::nodemap::FxHashMap;
|
||||
use libc::{c_uint, c_char};
|
||||
|
||||
use std::borrow::Cow;
|
||||
@ -32,65 +30,40 @@ pub struct Builder<'a, 'tcx: 'a> {
|
||||
pub ccx: &'a CrateContext<'a, 'tcx>,
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> Drop for Builder<'a, 'tcx> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
llvm::LLVMDisposeBuilder(self.llbuilder);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is a really awful way to get a zero-length c-string, but better (and a
|
||||
// lot more efficient) than doing str::as_c_str("", ...) every time.
|
||||
pub fn noname() -> *const c_char {
|
||||
fn noname() -> *const c_char {
|
||||
static CNULL: c_char = 0;
|
||||
&CNULL
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
pub fn new(ccx: &'a CrateContext<'a, 'tcx>) -> Builder<'a, 'tcx> {
|
||||
pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self {
|
||||
// Create a fresh builder from the crate context.
|
||||
let llbuilder = unsafe {
|
||||
llvm::LLVMCreateBuilderInContext(ccx.llcx())
|
||||
};
|
||||
Builder {
|
||||
llbuilder: ccx.raw_builder(),
|
||||
llbuilder: llbuilder,
|
||||
ccx: ccx,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn count_insn(&self, category: &str) {
|
||||
fn count_insn(&self, category: &str) {
|
||||
if self.ccx.sess().trans_stats() {
|
||||
self.ccx.stats().n_llvm_insns.set(self.ccx
|
||||
.stats()
|
||||
.n_llvm_insns
|
||||
.get() + 1);
|
||||
self.ccx.stats().n_llvm_insns.set(self.ccx.stats().n_llvm_insns.get() + 1);
|
||||
}
|
||||
self.ccx.count_llvm_insn();
|
||||
if self.ccx.sess().count_llvm_insns() {
|
||||
base::with_insn_ctxt(|v| {
|
||||
let mut h = self.ccx.stats().llvm_insns.borrow_mut();
|
||||
|
||||
// Build version of path with cycles removed.
|
||||
|
||||
// Pass 1: scan table mapping str -> rightmost pos.
|
||||
let mut mm = FxHashMap();
|
||||
let len = v.len();
|
||||
let mut i = 0;
|
||||
while i < len {
|
||||
mm.insert(v[i], i);
|
||||
i += 1;
|
||||
}
|
||||
|
||||
// Pass 2: concat strings for each elt, skipping
|
||||
// forwards over any cycles by advancing to rightmost
|
||||
// occurrence of each element in path.
|
||||
let mut s = String::from(".");
|
||||
i = 0;
|
||||
while i < len {
|
||||
i = mm[v[i]];
|
||||
s.push('/');
|
||||
s.push_str(v[i]);
|
||||
i += 1;
|
||||
}
|
||||
|
||||
s.push('/');
|
||||
s.push_str(category);
|
||||
|
||||
let n = match h.get(&s) {
|
||||
Some(&n) => n,
|
||||
_ => 0
|
||||
};
|
||||
h.insert(s, n+1);
|
||||
})
|
||||
let mut h = self.ccx.stats().llvm_insns.borrow_mut();
|
||||
*h.entry(category.to_string()).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -462,7 +435,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
|
||||
pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef {
|
||||
self.count_insn("alloca");
|
||||
unsafe {
|
||||
if name.is_empty() {
|
||||
@ -1103,6 +1076,20 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_case(&self, s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) {
|
||||
unsafe {
|
||||
if llvm::LLVMIsUndef(s) == llvm::True { return; }
|
||||
llvm::LLVMAddCase(s, on_val, dest)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_incoming_to_phi(&self, phi: ValueRef, val: ValueRef, bb: BasicBlockRef) {
|
||||
unsafe {
|
||||
if llvm::LLVMIsUndef(phi) == llvm::True { return; }
|
||||
llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the ptr value that should be used for storing `val`.
|
||||
fn check_store<'b>(&self,
|
||||
val: ValueRef,
|
||||
|
@ -8,8 +8,6 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![allow(non_upper_case_globals)]
|
||||
|
||||
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
|
||||
use abi::{self, align_up_to, FnType, ArgType};
|
||||
use context::CrateContext;
|
||||
|
@ -16,7 +16,6 @@
|
||||
|
||||
pub use self::CalleeData::*;
|
||||
|
||||
use arena::TypedArena;
|
||||
use llvm::{self, ValueRef, get_params};
|
||||
use rustc::hir::def_id::DefId;
|
||||
use rustc::ty::subst::Substs;
|
||||
@ -25,10 +24,10 @@ use abi::{Abi, FnType};
|
||||
use attributes;
|
||||
use base;
|
||||
use base::*;
|
||||
use build::*;
|
||||
use common::{self, Block, Result, CrateContext, FunctionContext, SharedCrateContext};
|
||||
use common::{
|
||||
self, CrateContext, FunctionContext, SharedCrateContext
|
||||
};
|
||||
use consts;
|
||||
use debuginfo::DebugLoc;
|
||||
use declare;
|
||||
use value::Value;
|
||||
use meth;
|
||||
@ -71,25 +70,8 @@ impl<'tcx> Callee<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait or impl method call.
|
||||
pub fn method_call<'blk>(bcx: Block<'blk, 'tcx>,
|
||||
method_call: ty::MethodCall)
|
||||
-> Callee<'tcx> {
|
||||
let method = bcx.tcx().tables().method_map[&method_call];
|
||||
Callee::method(bcx, method)
|
||||
}
|
||||
|
||||
/// Trait or impl method.
|
||||
pub fn method<'blk>(bcx: Block<'blk, 'tcx>,
|
||||
method: ty::MethodCallee<'tcx>) -> Callee<'tcx> {
|
||||
let substs = bcx.fcx.monomorphize(&method.substs);
|
||||
Callee::def(bcx.ccx(), method.def_id, substs)
|
||||
}
|
||||
|
||||
/// Function or method definition.
|
||||
pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>,
|
||||
def_id: DefId,
|
||||
substs: &'tcx Substs<'tcx>)
|
||||
pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>)
|
||||
-> Callee<'tcx> {
|
||||
let tcx = ccx.tcx();
|
||||
|
||||
@ -196,25 +178,6 @@ impl<'tcx> Callee<'tcx> {
|
||||
fn_ty
|
||||
}
|
||||
|
||||
/// This behemoth of a function translates function calls. Unfortunately, in
|
||||
/// order to generate more efficient LLVM output at -O0, it has quite a complex
|
||||
/// signature (refactoring this into two functions seems like a good idea).
|
||||
///
|
||||
/// In particular, for lang items, it is invoked with a dest of None, and in
|
||||
/// that case the return value contains the result of the fn. The lang item must
|
||||
/// not return a structural type or else all heck breaks loose.
|
||||
///
|
||||
/// For non-lang items, `dest` is always Some, and hence the result is written
|
||||
/// into memory somewhere. Nonetheless we return the actual return value of the
|
||||
/// function.
|
||||
pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>,
|
||||
debug_loc: DebugLoc,
|
||||
args: &[ValueRef],
|
||||
dest: Option<ValueRef>)
|
||||
-> Result<'blk, 'tcx> {
|
||||
trans_call_inner(bcx, debug_loc, self, args, dest)
|
||||
}
|
||||
|
||||
/// Turn the callee into a function pointer.
|
||||
pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
|
||||
match self.data {
|
||||
@ -267,8 +230,6 @@ fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
|
||||
// then adapt the self type
|
||||
let llfn_closure_kind = ccx.tcx().closure_kind(def_id);
|
||||
|
||||
let _icx = push_ctxt("trans_closure_adapter_shim");
|
||||
|
||||
debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \
|
||||
trait_closure_kind={:?}, llfn={:?})",
|
||||
llfn_closure_kind, trait_closure_kind, Value(llfn));
|
||||
@ -367,23 +328,28 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
|
||||
let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty);
|
||||
attributes::set_frame_pointer_elimination(ccx, lloncefn);
|
||||
|
||||
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
|
||||
block_arena = TypedArena::new();
|
||||
fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena);
|
||||
let mut bcx = fcx.init(false);
|
||||
let orig_fn_ty = fn_ty;
|
||||
let fcx = FunctionContext::new(ccx, lloncefn);
|
||||
let mut bcx = fcx.get_entry_block();
|
||||
|
||||
let callee = Callee {
|
||||
data: Fn(llreffn),
|
||||
ty: llref_fn_ty
|
||||
};
|
||||
|
||||
// the first argument (`self`) will be the (by value) closure env.
|
||||
|
||||
let mut llargs = get_params(fcx.llfn);
|
||||
let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize;
|
||||
let env_arg = &fcx.fn_ty.args[0];
|
||||
let fn_ret = callee.ty.fn_ret();
|
||||
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
|
||||
let self_idx = fn_ty.ret.is_indirect() as usize;
|
||||
let env_arg = &orig_fn_ty.args[0];
|
||||
let llenv = if env_arg.is_indirect() {
|
||||
llargs[self_idx]
|
||||
} else {
|
||||
let scratch = alloc_ty(bcx, closure_ty, "self");
|
||||
let scratch = alloc_ty(&bcx, closure_ty, "self");
|
||||
let mut llarg_idx = self_idx;
|
||||
env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, scratch);
|
||||
env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch);
|
||||
scratch
|
||||
};
|
||||
|
||||
@ -391,33 +357,37 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
|
||||
// Adjust llargs such that llargs[self_idx..] has the call arguments.
|
||||
// For zero-sized closures that means sneaking in a new argument.
|
||||
if env_arg.is_ignore() {
|
||||
if self_idx > 0 {
|
||||
self_idx -= 1;
|
||||
llargs[self_idx] = llenv;
|
||||
} else {
|
||||
llargs.insert(0, llenv);
|
||||
}
|
||||
llargs.insert(self_idx, llenv);
|
||||
} else {
|
||||
llargs[self_idx] = llenv;
|
||||
}
|
||||
|
||||
let dest = fcx.llretslotptr.get();
|
||||
|
||||
let callee = Callee {
|
||||
data: Fn(llreffn),
|
||||
ty: llref_fn_ty
|
||||
};
|
||||
|
||||
// Call the by-ref closure body with `self` in a cleanup scope,
|
||||
// to drop `self` when the body returns, or in case it unwinds.
|
||||
let self_scope = fcx.push_custom_cleanup_scope();
|
||||
fcx.schedule_drop_mem(self_scope, llenv, closure_ty);
|
||||
let self_scope = fcx.schedule_drop_mem(llenv, closure_ty);
|
||||
|
||||
bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).bcx;
|
||||
let llfn = callee.reify(bcx.ccx);
|
||||
let llret;
|
||||
if let Some(landing_pad) = self_scope.landing_pad {
|
||||
let normal_bcx = bcx.fcx().build_new_block("normal-return");
|
||||
llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None);
|
||||
bcx = normal_bcx;
|
||||
} else {
|
||||
llret = bcx.call(llfn, &llargs[..], None);
|
||||
}
|
||||
fn_ty.apply_attrs_callsite(llret);
|
||||
|
||||
fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope);
|
||||
if fn_ret.0.is_never() {
|
||||
bcx.unreachable();
|
||||
} else {
|
||||
self_scope.trans(&bcx);
|
||||
|
||||
fcx.finish(bcx, DebugLoc::None);
|
||||
if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() {
|
||||
bcx.ret_void();
|
||||
} else {
|
||||
bcx.ret(llret);
|
||||
}
|
||||
}
|
||||
|
||||
ccx.instances().borrow_mut().insert(method_instance, lloncefn);
|
||||
|
||||
@ -443,7 +413,6 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
|
||||
bare_fn_ty: Ty<'tcx>)
|
||||
-> ValueRef
|
||||
{
|
||||
let _icx = push_ctxt("trans_fn_pointer_shim");
|
||||
let tcx = ccx.tcx();
|
||||
|
||||
// Normalize the type for better caching.
|
||||
@ -519,32 +488,39 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
|
||||
let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty);
|
||||
attributes::set_frame_pointer_elimination(ccx, llfn);
|
||||
//
|
||||
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
|
||||
block_arena = TypedArena::new();
|
||||
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
|
||||
let mut bcx = fcx.init(false);
|
||||
let fcx = FunctionContext::new(ccx, llfn);
|
||||
let bcx = fcx.get_entry_block();
|
||||
|
||||
let llargs = get_params(fcx.llfn);
|
||||
let mut llargs = get_params(fcx.llfn);
|
||||
|
||||
let self_idx = fcx.fn_ty.ret.is_indirect() as usize;
|
||||
let self_arg = llargs.remove(fn_ty.ret.is_indirect() as usize);
|
||||
let llfnpointer = llfnpointer.unwrap_or_else(|| {
|
||||
// the first argument (`self`) will be ptr to the fn pointer
|
||||
if is_by_ref {
|
||||
Load(bcx, llargs[self_idx])
|
||||
bcx.load(self_arg)
|
||||
} else {
|
||||
llargs[self_idx]
|
||||
self_arg
|
||||
}
|
||||
});
|
||||
|
||||
let dest = fcx.llretslotptr.get();
|
||||
|
||||
let callee = Callee {
|
||||
data: Fn(llfnpointer),
|
||||
ty: bare_fn_ty
|
||||
};
|
||||
bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).bcx;
|
||||
let fn_ret = callee.ty.fn_ret();
|
||||
let fn_ty = callee.direct_fn_type(ccx, &[]);
|
||||
let llret = bcx.call(llfnpointer, &llargs, None);
|
||||
fn_ty.apply_attrs_callsite(llret);
|
||||
|
||||
fcx.finish(bcx, DebugLoc::None);
|
||||
if fn_ret.0.is_never() {
|
||||
bcx.unreachable();
|
||||
} else {
|
||||
if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() {
|
||||
bcx.ret_void();
|
||||
} else {
|
||||
bcx.ret(llret);
|
||||
}
|
||||
}
|
||||
|
||||
ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn);
|
||||
|
||||
@ -649,87 +625,3 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
||||
|
||||
(llfn, fn_ty)
|
||||
}
|
||||
|
||||
// ______________________________________________________________________
|
||||
// Translating calls
|
||||
|
||||
fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
debug_loc: DebugLoc,
|
||||
callee: Callee<'tcx>,
|
||||
args: &[ValueRef],
|
||||
opt_llretslot: Option<ValueRef>)
|
||||
-> Result<'blk, 'tcx> {
|
||||
// Introduce a temporary cleanup scope that will contain cleanups
|
||||
// for the arguments while they are being evaluated. The purpose
|
||||
// this cleanup is to ensure that, should a panic occur while
|
||||
// evaluating argument N, the values for arguments 0...N-1 are all
|
||||
// cleaned up. If no panic occurs, the values are handed off to
|
||||
// the callee, and hence none of the cleanups in this temporary
|
||||
// scope will ever execute.
|
||||
let fcx = bcx.fcx;
|
||||
let ccx = fcx.ccx;
|
||||
|
||||
let fn_ret = callee.ty.fn_ret();
|
||||
let fn_ty = callee.direct_fn_type(ccx, &[]);
|
||||
|
||||
let mut callee = match callee.data {
|
||||
NamedTupleConstructor(_) | Intrinsic => {
|
||||
bug!("{:?} calls should not go through Callee::call", callee);
|
||||
}
|
||||
f => f
|
||||
};
|
||||
|
||||
// If there no destination, return must be direct, with no cast.
|
||||
if opt_llretslot.is_none() {
|
||||
assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none());
|
||||
}
|
||||
|
||||
let mut llargs = Vec::new();
|
||||
|
||||
if fn_ty.ret.is_indirect() {
|
||||
let mut llretslot = opt_llretslot.unwrap();
|
||||
if let Some(ty) = fn_ty.ret.cast {
|
||||
llretslot = PointerCast(bcx, llretslot, ty.ptr_to());
|
||||
}
|
||||
llargs.push(llretslot);
|
||||
}
|
||||
|
||||
match callee {
|
||||
Virtual(idx) => {
|
||||
llargs.push(args[0]);
|
||||
|
||||
let fn_ptr = meth::get_virtual_method(bcx, args[1], idx);
|
||||
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
|
||||
callee = Fn(PointerCast(bcx, fn_ptr, llty));
|
||||
llargs.extend_from_slice(&args[2..]);
|
||||
}
|
||||
_ => llargs.extend_from_slice(args)
|
||||
}
|
||||
|
||||
let llfn = match callee {
|
||||
Fn(f) => f,
|
||||
_ => bug!("expected fn pointer callee, found {:?}", callee)
|
||||
};
|
||||
|
||||
let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
|
||||
if !bcx.unreachable.get() {
|
||||
fn_ty.apply_attrs_callsite(llret);
|
||||
|
||||
// If the function we just called does not use an outpointer,
|
||||
// store the result into the rust outpointer. Cast the outpointer
|
||||
// type to match because some ABIs will use a different type than
|
||||
// the Rust type. e.g., a {u32,u32} struct could be returned as
|
||||
// u64.
|
||||
if !fn_ty.ret.is_indirect() {
|
||||
if let Some(llretslot) = opt_llretslot {
|
||||
fn_ty.ret.store(&bcx.build(), llret, llretslot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fn_ret.0.is_never() {
|
||||
Unreachable(bcx);
|
||||
}
|
||||
|
||||
Result::new(bcx, llret)
|
||||
}
|
||||
|
@ -11,216 +11,100 @@
|
||||
//! ## The Cleanup module
|
||||
//!
|
||||
//! The cleanup module tracks what values need to be cleaned up as scopes
|
||||
//! are exited, either via panic or just normal control flow. The basic
|
||||
//! idea is that the function context maintains a stack of cleanup scopes
|
||||
//! that are pushed/popped as we traverse the AST tree. There is typically
|
||||
//! at least one cleanup scope per AST node; some AST nodes may introduce
|
||||
//! additional temporary scopes.
|
||||
//! are exited, either via panic or just normal control flow.
|
||||
//!
|
||||
//! Cleanup items can be scheduled into any of the scopes on the stack.
|
||||
//! Typically, when a scope is popped, we will also generate the code for
|
||||
//! each of its cleanups at that time. This corresponds to a normal exit
|
||||
//! from a block (for example, an expression completing evaluation
|
||||
//! successfully without panic). However, it is also possible to pop a
|
||||
//! block *without* executing its cleanups; this is typically used to
|
||||
//! guard intermediate values that must be cleaned up on panic, but not
|
||||
//! if everything goes right. See the section on custom scopes below for
|
||||
//! more details.
|
||||
//!
|
||||
//! Cleanup scopes come in three kinds:
|
||||
//!
|
||||
//! - **AST scopes:** each AST node in a function body has a corresponding
|
||||
//! AST scope. We push the AST scope when we start generate code for an AST
|
||||
//! node and pop it once the AST node has been fully generated.
|
||||
//! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are
|
||||
//! never scheduled into loop scopes; instead, they are used to record the
|
||||
//! basic blocks that we should branch to when a `continue` or `break` statement
|
||||
//! is encountered.
|
||||
//! - **Custom scopes:** custom scopes are typically used to ensure cleanup
|
||||
//! of intermediate values.
|
||||
//!
|
||||
//! ### When to schedule cleanup
|
||||
//!
|
||||
//! Although the cleanup system is intended to *feel* fairly declarative,
|
||||
//! it's still important to time calls to `schedule_clean()` correctly.
|
||||
//! Basically, you should not schedule cleanup for memory until it has
|
||||
//! been initialized, because if an unwind should occur before the memory
|
||||
//! is fully initialized, then the cleanup will run and try to free or
|
||||
//! drop uninitialized memory. If the initialization itself produces
|
||||
//! byproducts that need to be freed, then you should use temporary custom
|
||||
//! scopes to ensure that those byproducts will get freed on unwind. For
|
||||
//! example, an expression like `box foo()` will first allocate a box in the
|
||||
//! heap and then call `foo()` -- if `foo()` should panic, this box needs
|
||||
//! to be *shallowly* freed.
|
||||
//!
|
||||
//! ### Long-distance jumps
|
||||
//!
|
||||
//! In addition to popping a scope, which corresponds to normal control
|
||||
//! flow exiting the scope, we may also *jump out* of a scope into some
|
||||
//! earlier scope on the stack. This can occur in response to a `return`,
|
||||
//! `break`, or `continue` statement, but also in response to panic. In
|
||||
//! any of these cases, we will generate a series of cleanup blocks for
|
||||
//! each of the scopes that is exited. So, if the stack contains scopes A
|
||||
//! ... Z, and we break out of a loop whose corresponding cleanup scope is
|
||||
//! X, we would generate cleanup blocks for the cleanups in X, Y, and Z.
|
||||
//! After cleanup is done we would branch to the exit point for scope X.
|
||||
//! But if panic should occur, we would generate cleanups for all the
|
||||
//! scopes from A to Z and then resume the unwind process afterwards.
|
||||
//!
|
||||
//! To avoid generating tons of code, we cache the cleanup blocks that we
|
||||
//! create for breaks, returns, unwinds, and other jumps. Whenever a new
|
||||
//! cleanup is scheduled, though, we must clear these cached blocks. A
|
||||
//! possible improvement would be to keep the cached blocks but simply
|
||||
//! generate a new block which performs the additional cleanup and then
|
||||
//! branches to the existing cached blocks.
|
||||
//!
|
||||
//! ### AST and loop cleanup scopes
|
||||
//!
|
||||
//! AST cleanup scopes are pushed when we begin and end processing an AST
|
||||
//! node. They are used to house cleanups related to rvalue temporary that
|
||||
//! get referenced (e.g., due to an expression like `&Foo()`). Whenever an
|
||||
//! AST scope is popped, we always trans all the cleanups, adding the cleanup
|
||||
//! code after the postdominator of the AST node.
|
||||
//!
|
||||
//! AST nodes that represent breakable loops also push a loop scope; the
|
||||
//! loop scope never has any actual cleanups, it's just used to point to
|
||||
//! the basic blocks where control should flow after a "continue" or
|
||||
//! "break" statement. Popping a loop scope never generates code.
|
||||
//!
|
||||
//! ### Custom cleanup scopes
|
||||
//!
|
||||
//! Custom cleanup scopes are used for a variety of purposes. The most
|
||||
//! common though is to handle temporary byproducts, where cleanup only
|
||||
//! needs to occur on panic. The general strategy is to push a custom
|
||||
//! cleanup scope, schedule *shallow* cleanups into the custom scope, and
|
||||
//! then pop the custom scope (without transing the cleanups) when
|
||||
//! execution succeeds normally. This way the cleanups are only trans'd on
|
||||
//! unwind, and only up until the point where execution succeeded, at
|
||||
//! which time the complete value should be stored in an lvalue or some
|
||||
//! other place where normal cleanup applies.
|
||||
//!
|
||||
//! To spell it out, here is an example. Imagine an expression `box expr`.
|
||||
//! We would basically:
|
||||
//!
|
||||
//! 1. Push a custom cleanup scope C.
|
||||
//! 2. Allocate the box.
|
||||
//! 3. Schedule a shallow free in the scope C.
|
||||
//! 4. Trans `expr` into the box.
|
||||
//! 5. Pop the scope C.
|
||||
//! 6. Return the box as an rvalue.
|
||||
//!
|
||||
//! This way, if a panic occurs while transing `expr`, the custom
|
||||
//! cleanup scope C is pushed and hence the box will be freed. The trans
|
||||
//! code for `expr` itself is responsible for freeing any other byproducts
|
||||
//! that may be in play.
|
||||
|
||||
pub use self::EarlyExitLabel::*;
|
||||
//! Typically, when a scope is finished, we generate the cleanup code. This
|
||||
//! corresponds to a normal exit from a block (for example, an expression
|
||||
//! completing evaluation successfully without panic).
|
||||
|
||||
use llvm::{BasicBlockRef, ValueRef};
|
||||
use base;
|
||||
use build;
|
||||
use common;
|
||||
use common::{Block, FunctionContext, LandingPad};
|
||||
use debuginfo::{DebugLoc};
|
||||
use common::{BlockAndBuilder, FunctionContext, Funclet};
|
||||
use glue;
|
||||
use type_::Type;
|
||||
use value::Value;
|
||||
use rustc::ty::Ty;
|
||||
|
||||
pub struct CleanupScope<'tcx> {
|
||||
// Cleanups to run upon scope exit.
|
||||
cleanups: Vec<DropValue<'tcx>>,
|
||||
// Cleanup to run upon scope exit.
|
||||
cleanup: Option<DropValue<'tcx>>,
|
||||
|
||||
// The debug location any drop calls generated for this scope will be
|
||||
// associated with.
|
||||
debug_loc: DebugLoc,
|
||||
|
||||
cached_early_exits: Vec<CachedEarlyExit>,
|
||||
cached_landing_pad: Option<BasicBlockRef>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct CustomScopeIndex {
|
||||
index: usize
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Debug)]
|
||||
pub enum EarlyExitLabel {
|
||||
UnwindExit(UnwindKind),
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum UnwindKind {
|
||||
LandingPad,
|
||||
CleanupPad(ValueRef),
|
||||
// Computed on creation if compiling with landing pads (!sess.no_landing_pads)
|
||||
pub landing_pad: Option<BasicBlockRef>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct CachedEarlyExit {
|
||||
label: EarlyExitLabel,
|
||||
cleanup_block: BasicBlockRef,
|
||||
last_cleanup: usize,
|
||||
pub struct DropValue<'tcx> {
|
||||
val: ValueRef,
|
||||
ty: Ty<'tcx>,
|
||||
skip_dtor: bool,
|
||||
}
|
||||
|
||||
impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
pub fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
|
||||
let index = self.scopes_len();
|
||||
debug!("push_custom_cleanup_scope(): {}", index);
|
||||
|
||||
// Just copy the debuginfo source location from the enclosing scope
|
||||
let debug_loc = self.scopes
|
||||
.borrow()
|
||||
.last()
|
||||
.map(|opt_scope| opt_scope.debug_loc)
|
||||
.unwrap_or(DebugLoc::None);
|
||||
|
||||
self.push_scope(CleanupScope::new(debug_loc));
|
||||
CustomScopeIndex { index: index }
|
||||
impl<'tcx> DropValue<'tcx> {
|
||||
fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &BlockAndBuilder<'a, 'tcx>) {
|
||||
glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet)
|
||||
}
|
||||
|
||||
/// Removes the top cleanup scope from the stack without executing its cleanups. The top
|
||||
/// cleanup scope must be the temporary scope `custom_scope`.
|
||||
pub fn pop_custom_cleanup_scope(&self,
|
||||
custom_scope: CustomScopeIndex) {
|
||||
debug!("pop_custom_cleanup_scope({})", custom_scope.index);
|
||||
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
|
||||
let _ = self.pop_scope();
|
||||
/// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary
|
||||
/// for an unwind and then `resume` to continue error propagation:
|
||||
///
|
||||
/// landing_pad -> ... cleanups ... -> [resume]
|
||||
///
|
||||
/// This should only be called once per function, as it creates an alloca for the landingpad.
|
||||
fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef {
|
||||
debug!("get_landing_pad");
|
||||
let bcx = fcx.build_new_block("cleanup_unwind");
|
||||
let llpersonality = bcx.ccx.eh_personality();
|
||||
bcx.set_personality_fn(llpersonality);
|
||||
|
||||
if base::wants_msvc_seh(fcx.ccx.sess()) {
|
||||
let pad = bcx.cleanup_pad(None, &[]);
|
||||
let funclet = Some(Funclet::new(pad));
|
||||
self.trans(funclet.as_ref(), &bcx);
|
||||
|
||||
bcx.cleanup_ret(pad, None);
|
||||
} else {
|
||||
// The landing pad return type (the type being propagated). Not sure
|
||||
// what this represents but it's determined by the personality
|
||||
// function and this is what the EH proposal example uses.
|
||||
let llretty = Type::struct_(fcx.ccx, &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false);
|
||||
|
||||
// The only landing pad clause will be 'cleanup'
|
||||
let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.fcx().llfn);
|
||||
|
||||
// The landing pad block is a cleanup
|
||||
bcx.set_cleanup(llretval);
|
||||
|
||||
// Insert cleanup instructions into the cleanup block
|
||||
self.trans(None, &bcx);
|
||||
|
||||
if !bcx.sess().target.target.options.custom_unwind_resume {
|
||||
bcx.resume(llretval);
|
||||
} else {
|
||||
let exc_ptr = bcx.extract_value(llretval, 0);
|
||||
bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], None);
|
||||
bcx.unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
bcx.llbb()
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes the top cleanup scope from the stack, which must be a temporary scope, and
|
||||
/// generates the code to do its cleanups for normal exit.
|
||||
pub fn pop_and_trans_custom_cleanup_scope(&self,
|
||||
bcx: Block<'blk, 'tcx>,
|
||||
custom_scope: CustomScopeIndex)
|
||||
-> Block<'blk, 'tcx> {
|
||||
debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
|
||||
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
|
||||
|
||||
let scope = self.pop_scope();
|
||||
self.trans_scope_cleanups(bcx, &scope)
|
||||
}
|
||||
|
||||
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of
|
||||
/// `ty`
|
||||
pub fn schedule_drop_mem(&self,
|
||||
cleanup_scope: CustomScopeIndex,
|
||||
val: ValueRef,
|
||||
ty: Ty<'tcx>) {
|
||||
if !self.type_needs_drop(ty) { return; }
|
||||
impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
|
||||
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
|
||||
pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> {
|
||||
if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
|
||||
let drop = DropValue {
|
||||
is_immediate: false,
|
||||
val: val,
|
||||
ty: ty,
|
||||
skip_dtor: false,
|
||||
};
|
||||
|
||||
debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) skip_dtor={}",
|
||||
cleanup_scope,
|
||||
Value(val),
|
||||
ty,
|
||||
drop.skip_dtor);
|
||||
debug!("schedule_drop_mem(val={:?}, ty={:?}) skip_dtor={}", Value(val), ty, drop.skip_dtor);
|
||||
|
||||
self.schedule_clean(cleanup_scope, drop);
|
||||
CleanupScope::new(self, drop)
|
||||
}
|
||||
|
||||
/// Issue #23611: Schedules a (deep) drop of the contents of
|
||||
@ -228,477 +112,46 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
/// `ty`. The scheduled code handles extracting the discriminant
|
||||
/// and dropping the contents associated with that variant
|
||||
/// *without* executing any associated drop implementation.
|
||||
pub fn schedule_drop_adt_contents(&self,
|
||||
cleanup_scope: CustomScopeIndex,
|
||||
val: ValueRef,
|
||||
ty: Ty<'tcx>) {
|
||||
pub fn schedule_drop_adt_contents(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> {
|
||||
// `if` below could be "!contents_needs_drop"; skipping drop
|
||||
// is just an optimization, so sound to be conservative.
|
||||
if !self.type_needs_drop(ty) { return; }
|
||||
if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
|
||||
|
||||
let drop = DropValue {
|
||||
is_immediate: false,
|
||||
val: val,
|
||||
ty: ty,
|
||||
skip_dtor: true,
|
||||
};
|
||||
|
||||
debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) skip_dtor={}",
|
||||
cleanup_scope,
|
||||
Value(val),
|
||||
ty,
|
||||
drop.skip_dtor);
|
||||
debug!("schedule_drop_adt_contents(val={:?}, ty={:?}) skip_dtor={}",
|
||||
Value(val), ty, drop.skip_dtor);
|
||||
|
||||
self.schedule_clean(cleanup_scope, drop);
|
||||
}
|
||||
|
||||
/// Schedules a (deep) drop of `val`, which is an instance of `ty`
|
||||
pub fn schedule_drop_immediate(&self,
|
||||
cleanup_scope: CustomScopeIndex,
|
||||
val: ValueRef,
|
||||
ty: Ty<'tcx>) {
|
||||
|
||||
if !self.type_needs_drop(ty) { return; }
|
||||
let drop = DropValue {
|
||||
is_immediate: true,
|
||||
val: val,
|
||||
ty: ty,
|
||||
skip_dtor: false,
|
||||
};
|
||||
|
||||
debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) skip_dtor={}",
|
||||
cleanup_scope,
|
||||
Value(val),
|
||||
ty,
|
||||
drop.skip_dtor);
|
||||
|
||||
self.schedule_clean(cleanup_scope, drop);
|
||||
}
|
||||
|
||||
/// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
|
||||
fn schedule_clean(&self, custom_scope: CustomScopeIndex, cleanup: DropValue<'tcx>) {
|
||||
debug!("schedule_clean_in_custom_scope(custom_scope={})",
|
||||
custom_scope.index);
|
||||
|
||||
assert!(self.is_valid_custom_scope(custom_scope));
|
||||
|
||||
let mut scopes = self.scopes.borrow_mut();
|
||||
let scope = &mut (*scopes)[custom_scope.index];
|
||||
scope.cleanups.push(cleanup);
|
||||
scope.cached_landing_pad = None;
|
||||
}
|
||||
|
||||
/// Returns true if there are pending cleanups that should execute on panic.
|
||||
pub fn needs_invoke(&self) -> bool {
|
||||
self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
|
||||
}
|
||||
|
||||
/// Returns a basic block to branch to in the event of a panic. This block
|
||||
/// will run the panic cleanups and eventually resume the exception that
|
||||
/// caused the landing pad to be run.
|
||||
pub fn get_landing_pad(&'blk self) -> BasicBlockRef {
|
||||
let _icx = base::push_ctxt("get_landing_pad");
|
||||
|
||||
debug!("get_landing_pad");
|
||||
|
||||
let orig_scopes_len = self.scopes_len();
|
||||
assert!(orig_scopes_len > 0);
|
||||
|
||||
// Remove any scopes that do not have cleanups on panic:
|
||||
let mut popped_scopes = vec![];
|
||||
while !self.top_scope(|s| s.needs_invoke()) {
|
||||
debug!("top scope does not need invoke");
|
||||
popped_scopes.push(self.pop_scope());
|
||||
}
|
||||
|
||||
// Check for an existing landing pad in the new topmost scope:
|
||||
let llbb = self.get_or_create_landing_pad();
|
||||
|
||||
// Push the scopes we removed back on:
|
||||
loop {
|
||||
match popped_scopes.pop() {
|
||||
Some(scope) => self.push_scope(scope),
|
||||
None => break
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(self.scopes_len(), orig_scopes_len);
|
||||
|
||||
return llbb;
|
||||
}
|
||||
|
||||
fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
|
||||
self.is_valid_custom_scope(custom_scope) &&
|
||||
custom_scope.index == self.scopes.borrow().len() - 1
|
||||
}
|
||||
|
||||
fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
|
||||
let scopes = self.scopes.borrow();
|
||||
custom_scope.index < scopes.len()
|
||||
}
|
||||
|
||||
/// Generates the cleanups for `scope` into `bcx`
|
||||
fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
|
||||
bcx: Block<'blk, 'tcx>,
|
||||
scope: &CleanupScope<'tcx>) -> Block<'blk, 'tcx> {
|
||||
|
||||
let mut bcx = bcx;
|
||||
if !bcx.unreachable.get() {
|
||||
for cleanup in scope.cleanups.iter().rev() {
|
||||
bcx = cleanup.trans(bcx, scope.debug_loc);
|
||||
}
|
||||
}
|
||||
bcx
|
||||
}
|
||||
|
||||
fn scopes_len(&self) -> usize {
|
||||
self.scopes.borrow().len()
|
||||
}
|
||||
|
||||
fn push_scope(&self, scope: CleanupScope<'tcx>) {
|
||||
self.scopes.borrow_mut().push(scope)
|
||||
}
|
||||
|
||||
fn pop_scope(&self) -> CleanupScope<'tcx> {
|
||||
debug!("popping cleanup scope {}, {} scopes remaining",
|
||||
self.top_scope(|s| s.block_name("")),
|
||||
self.scopes_len() - 1);
|
||||
|
||||
self.scopes.borrow_mut().pop().unwrap()
|
||||
}
|
||||
|
||||
fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'tcx>) -> R {
|
||||
f(self.scopes.borrow().last().unwrap())
|
||||
}
|
||||
|
||||
/// Used when the caller wishes to jump to an early exit, such as a return,
|
||||
/// break, continue, or unwind. This function will generate all cleanups
|
||||
/// between the top of the stack and the exit `label` and return a basic
|
||||
/// block that the caller can branch to.
|
||||
///
|
||||
/// For example, if the current stack of cleanups were as follows:
|
||||
///
|
||||
/// AST 22
|
||||
/// Custom 1
|
||||
/// AST 23
|
||||
/// Loop 23
|
||||
/// Custom 2
|
||||
/// AST 24
|
||||
///
|
||||
/// and the `label` specifies a break from `Loop 23`, then this function
|
||||
/// would generate a series of basic blocks as follows:
|
||||
///
|
||||
/// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
|
||||
///
|
||||
/// where `break_blk` is the block specified in `Loop 23` as the target for
|
||||
/// breaks. The return value would be the first basic block in that sequence
|
||||
/// (`Cleanup(AST 24)`). The caller could then branch to `Cleanup(AST 24)`
|
||||
/// and it will perform all cleanups and finally branch to the `break_blk`.
|
||||
fn trans_cleanups_to_exit_scope(&'blk self,
|
||||
label: EarlyExitLabel)
|
||||
-> BasicBlockRef {
|
||||
debug!("trans_cleanups_to_exit_scope label={:?} scopes={}",
|
||||
label, self.scopes_len());
|
||||
|
||||
let orig_scopes_len = self.scopes_len();
|
||||
let mut prev_llbb;
|
||||
let mut popped_scopes = vec![];
|
||||
let mut skip = 0;
|
||||
|
||||
// First we pop off all the cleanup stacks that are
|
||||
// traversed until the exit is reached, pushing them
|
||||
// onto the side vector `popped_scopes`. No code is
|
||||
// generated at this time.
|
||||
//
|
||||
// So, continuing the example from above, we would wind up
|
||||
// with a `popped_scopes` vector of `[AST 24, Custom 2]`.
|
||||
// (Presuming that there are no cached exits)
|
||||
loop {
|
||||
if self.scopes_len() == 0 {
|
||||
match label {
|
||||
UnwindExit(val) => {
|
||||
// Generate a block that will resume unwinding to the
|
||||
// calling function
|
||||
let bcx = self.new_block("resume");
|
||||
match val {
|
||||
UnwindKind::LandingPad => {
|
||||
let addr = self.landingpad_alloca.get()
|
||||
.unwrap();
|
||||
let lp = build::Load(bcx, addr);
|
||||
base::call_lifetime_end(bcx, addr);
|
||||
base::trans_unwind_resume(bcx, lp);
|
||||
}
|
||||
UnwindKind::CleanupPad(_) => {
|
||||
let pad = build::CleanupPad(bcx, None, &[]);
|
||||
build::CleanupRet(bcx, pad, None);
|
||||
}
|
||||
}
|
||||
prev_llbb = bcx.llbb;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pop off the scope, since we may be generating
|
||||
// unwinding code for it.
|
||||
let top_scope = self.pop_scope();
|
||||
let cached_exit = top_scope.cached_early_exit(label);
|
||||
popped_scopes.push(top_scope);
|
||||
|
||||
// Check if we have already cached the unwinding of this
|
||||
// scope for this label. If so, we can stop popping scopes
|
||||
// and branch to the cached label, since it contains the
|
||||
// cleanups for any subsequent scopes.
|
||||
if let Some((exit, last_cleanup)) = cached_exit {
|
||||
prev_llbb = exit;
|
||||
skip = last_cleanup;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
debug!("trans_cleanups_to_exit_scope: popped {} scopes",
|
||||
popped_scopes.len());
|
||||
|
||||
// Now push the popped scopes back on. As we go,
|
||||
// we track in `prev_llbb` the exit to which this scope
|
||||
// should branch when it's done.
|
||||
//
|
||||
// So, continuing with our example, we will start out with
|
||||
// `prev_llbb` being set to `break_blk` (or possibly a cached
|
||||
// early exit). We will then pop the scopes from `popped_scopes`
|
||||
// and generate a basic block for each one, prepending it in the
|
||||
// series and updating `prev_llbb`. So we begin by popping `Custom 2`
|
||||
// and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
|
||||
// branch to `prev_llbb == break_blk`, giving us a sequence like:
|
||||
//
|
||||
// Cleanup(Custom 2) -> prev_llbb
|
||||
//
|
||||
// We then pop `AST 24` and repeat the process, giving us the sequence:
|
||||
//
|
||||
// Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
|
||||
//
|
||||
// At this point, `popped_scopes` is empty, and so the final block
|
||||
// that we return to the user is `Cleanup(AST 24)`.
|
||||
while let Some(mut scope) = popped_scopes.pop() {
|
||||
if !scope.cleanups.is_empty() {
|
||||
let name = scope.block_name("clean");
|
||||
debug!("generating cleanups for {}", name);
|
||||
|
||||
let bcx_in = self.new_block(&name[..]);
|
||||
let exit_label = label.start(bcx_in);
|
||||
let mut bcx_out = bcx_in;
|
||||
let len = scope.cleanups.len();
|
||||
for cleanup in scope.cleanups.iter().rev().take(len - skip) {
|
||||
bcx_out = cleanup.trans(bcx_out, scope.debug_loc);
|
||||
}
|
||||
skip = 0;
|
||||
exit_label.branch(bcx_out, prev_llbb);
|
||||
prev_llbb = bcx_in.llbb;
|
||||
|
||||
scope.add_cached_early_exit(exit_label, prev_llbb, len);
|
||||
}
|
||||
self.push_scope(scope);
|
||||
}
|
||||
|
||||
debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb);
|
||||
|
||||
assert_eq!(self.scopes_len(), orig_scopes_len);
|
||||
prev_llbb
|
||||
}
|
||||
|
||||
/// Creates a landing pad for the top scope, if one does not exist. The
|
||||
/// landing pad will perform all cleanups necessary for an unwind and then
|
||||
/// `resume` to continue error propagation:
|
||||
///
|
||||
/// landing_pad -> ... cleanups ... -> [resume]
|
||||
///
|
||||
/// (The cleanups and resume instruction are created by
|
||||
/// `trans_cleanups_to_exit_scope()`, not in this function itself.)
|
||||
fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
|
||||
let pad_bcx;
|
||||
|
||||
debug!("get_or_create_landing_pad");
|
||||
|
||||
// Check if a landing pad block exists; if not, create one.
|
||||
{
|
||||
let mut scopes = self.scopes.borrow_mut();
|
||||
let last_scope = scopes.last_mut().unwrap();
|
||||
match last_scope.cached_landing_pad {
|
||||
Some(llbb) => return llbb,
|
||||
None => {
|
||||
let name = last_scope.block_name("unwind");
|
||||
pad_bcx = self.new_block(&name[..]);
|
||||
last_scope.cached_landing_pad = Some(pad_bcx.llbb);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let llpersonality = pad_bcx.fcx.eh_personality();
|
||||
|
||||
let val = if base::wants_msvc_seh(self.ccx.sess()) {
|
||||
// A cleanup pad requires a personality function to be specified, so
|
||||
// we do that here explicitly (happens implicitly below through
|
||||
// creation of the landingpad instruction). We then create a
|
||||
// cleanuppad instruction which has no filters to run cleanup on all
|
||||
// exceptions.
|
||||
build::SetPersonalityFn(pad_bcx, llpersonality);
|
||||
let llretval = build::CleanupPad(pad_bcx, None, &[]);
|
||||
UnwindKind::CleanupPad(llretval)
|
||||
} else {
|
||||
// The landing pad return type (the type being propagated). Not sure
|
||||
// what this represents but it's determined by the personality
|
||||
// function and this is what the EH proposal example uses.
|
||||
let llretty = Type::struct_(self.ccx,
|
||||
&[Type::i8p(self.ccx), Type::i32(self.ccx)],
|
||||
false);
|
||||
|
||||
// The only landing pad clause will be 'cleanup'
|
||||
let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1);
|
||||
|
||||
// The landing pad block is a cleanup
|
||||
build::SetCleanup(pad_bcx, llretval);
|
||||
|
||||
let addr = match self.landingpad_alloca.get() {
|
||||
Some(addr) => addr,
|
||||
None => {
|
||||
let addr = base::alloca(pad_bcx, common::val_ty(llretval),
|
||||
"");
|
||||
base::call_lifetime_start(pad_bcx, addr);
|
||||
self.landingpad_alloca.set(Some(addr));
|
||||
addr
|
||||
}
|
||||
};
|
||||
build::Store(pad_bcx, llretval, addr);
|
||||
UnwindKind::LandingPad
|
||||
};
|
||||
|
||||
// Generate the cleanup block and branch to it.
|
||||
let label = UnwindExit(val);
|
||||
let cleanup_llbb = self.trans_cleanups_to_exit_scope(label);
|
||||
label.branch(pad_bcx, cleanup_llbb);
|
||||
|
||||
return pad_bcx.llbb;
|
||||
CleanupScope::new(self, drop)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> CleanupScope<'tcx> {
|
||||
fn new(debug_loc: DebugLoc) -> CleanupScope<'tcx> {
|
||||
fn new<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> {
|
||||
CleanupScope {
|
||||
debug_loc: debug_loc,
|
||||
cleanups: vec![],
|
||||
cached_early_exits: vec![],
|
||||
cached_landing_pad: None,
|
||||
cleanup: Some(drop_val),
|
||||
landing_pad: if !fcx.ccx.sess().no_landing_pads() {
|
||||
Some(drop_val.get_landing_pad(fcx))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn cached_early_exit(&self,
|
||||
label: EarlyExitLabel)
|
||||
-> Option<(BasicBlockRef, usize)> {
|
||||
self.cached_early_exits.iter().rev().
|
||||
find(|e| e.label == label).
|
||||
map(|e| (e.cleanup_block, e.last_cleanup))
|
||||
}
|
||||
|
||||
fn add_cached_early_exit(&mut self,
|
||||
label: EarlyExitLabel,
|
||||
blk: BasicBlockRef,
|
||||
last_cleanup: usize) {
|
||||
self.cached_early_exits.push(
|
||||
CachedEarlyExit { label: label,
|
||||
cleanup_block: blk,
|
||||
last_cleanup: last_cleanup});
|
||||
}
|
||||
|
||||
/// True if this scope has cleanups that need unwinding
|
||||
fn needs_invoke(&self) -> bool {
|
||||
self.cached_landing_pad.is_some() ||
|
||||
!self.cleanups.is_empty()
|
||||
}
|
||||
|
||||
/// Returns a suitable name to use for the basic block that handles this cleanup scope
|
||||
fn block_name(&self, prefix: &str) -> String {
|
||||
format!("{}_custom_", prefix)
|
||||
}
|
||||
}
|
||||
|
||||
impl EarlyExitLabel {
|
||||
/// Generates a branch going from `from_bcx` to `to_llbb` where `self` is
|
||||
/// the exit label attached to the start of `from_bcx`.
|
||||
///
|
||||
/// Transitions from an exit label to other exit labels depend on the type
|
||||
/// of label. For example with MSVC exceptions unwind exit labels will use
|
||||
/// the `cleanupret` instruction instead of the `br` instruction.
|
||||
fn branch(&self, from_bcx: Block, to_llbb: BasicBlockRef) {
|
||||
if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self {
|
||||
build::CleanupRet(from_bcx, pad, Some(to_llbb));
|
||||
} else {
|
||||
build::Br(from_bcx, to_llbb, DebugLoc::None);
|
||||
pub fn noop() -> CleanupScope<'tcx> {
|
||||
CleanupScope {
|
||||
cleanup: None,
|
||||
landing_pad: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates the necessary instructions at the start of `bcx` to prepare
|
||||
/// for the same kind of early exit label that `self` is.
|
||||
///
|
||||
/// This function will appropriately configure `bcx` based on the kind of
|
||||
/// label this is. For UnwindExit labels, the `lpad` field of the block will
|
||||
/// be set to `Some`, and for MSVC exceptions this function will generate a
|
||||
/// `cleanuppad` instruction at the start of the block so it may be jumped
|
||||
/// to in the future (e.g. so this block can be cached as an early exit).
|
||||
///
|
||||
/// Returns a new label which will can be used to cache `bcx` in the list of
|
||||
/// early exits.
|
||||
fn start(&self, bcx: Block) -> EarlyExitLabel {
|
||||
match *self {
|
||||
UnwindExit(UnwindKind::CleanupPad(..)) => {
|
||||
let pad = build::CleanupPad(bcx, None, &[]);
|
||||
bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::msvc(pad))));
|
||||
UnwindExit(UnwindKind::CleanupPad(pad))
|
||||
}
|
||||
UnwindExit(UnwindKind::LandingPad) => {
|
||||
bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu())));
|
||||
*self
|
||||
}
|
||||
pub fn trans<'a>(self, bcx: &'a BlockAndBuilder<'a, 'tcx>) {
|
||||
if let Some(cleanup) = self.cleanup {
|
||||
cleanup.trans(None, &bcx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for UnwindKind {
|
||||
fn eq(&self, val: &UnwindKind) -> bool {
|
||||
match (*self, *val) {
|
||||
(UnwindKind::LandingPad, UnwindKind::LandingPad) |
|
||||
(UnwindKind::CleanupPad(..), UnwindKind::CleanupPad(..)) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Cleanup types
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct DropValue<'tcx> {
|
||||
is_immediate: bool,
|
||||
val: ValueRef,
|
||||
ty: Ty<'tcx>,
|
||||
skip_dtor: bool,
|
||||
}
|
||||
|
||||
impl<'tcx> DropValue<'tcx> {
|
||||
fn trans<'blk>(&self,
|
||||
bcx: Block<'blk, 'tcx>,
|
||||
debug_loc: DebugLoc)
|
||||
-> Block<'blk, 'tcx> {
|
||||
let skip_dtor = self.skip_dtor;
|
||||
let _icx = if skip_dtor {
|
||||
base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=true")
|
||||
} else {
|
||||
base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=false")
|
||||
};
|
||||
let bcx = if self.is_immediate {
|
||||
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
|
||||
} else {
|
||||
glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
|
||||
};
|
||||
bcx
|
||||
}
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ use syntax::abi::Abi;
|
||||
use syntax_pos::DUMMY_SP;
|
||||
use base::custom_coerce_unsize_info;
|
||||
use context::SharedCrateContext;
|
||||
use common::{fulfill_obligation, type_is_sized};
|
||||
use common::fulfill_obligation;
|
||||
use glue::{self, DropGlueKind};
|
||||
use monomorphize::{self, Instance};
|
||||
use util::nodemap::{FxHashSet, FxHashMap, DefIdMap};
|
||||
@ -337,7 +337,7 @@ fn collect_items_rec<'a, 'tcx: 'a>(scx: &SharedCrateContext<'a, 'tcx>,
|
||||
TransItem::Static(node_id) => {
|
||||
let def_id = scx.tcx().map.local_def_id(node_id);
|
||||
let ty = scx.tcx().item_type(def_id);
|
||||
let ty = glue::get_drop_glue_type(scx.tcx(), ty);
|
||||
let ty = glue::get_drop_glue_type(scx, ty);
|
||||
neighbors.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
|
||||
|
||||
recursion_depth_reset = None;
|
||||
@ -542,7 +542,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
|
||||
self.param_substs,
|
||||
&ty);
|
||||
assert!(ty.is_normalized_for_trans());
|
||||
let ty = glue::get_drop_glue_type(self.scx.tcx(), ty);
|
||||
let ty = glue::get_drop_glue_type(self.scx, ty);
|
||||
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
|
||||
}
|
||||
|
||||
@ -678,7 +678,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
|
||||
let operand_ty = monomorphize::apply_param_substs(self.scx,
|
||||
self.param_substs,
|
||||
&mt.ty);
|
||||
let ty = glue::get_drop_glue_type(tcx, operand_ty);
|
||||
let ty = glue::get_drop_glue_type(self.scx, operand_ty);
|
||||
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
|
||||
} else {
|
||||
bug!("Has the drop_in_place() intrinsic's signature changed?")
|
||||
@ -804,17 +804,17 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
|
||||
let field_type = monomorphize::apply_param_substs(scx,
|
||||
substs,
|
||||
&field_type);
|
||||
let field_type = glue::get_drop_glue_type(scx.tcx(), field_type);
|
||||
let field_type = glue::get_drop_glue_type(scx, field_type);
|
||||
|
||||
if glue::type_needs_drop(scx.tcx(), field_type) {
|
||||
if scx.type_needs_drop(field_type) {
|
||||
output.push(TransItem::DropGlue(DropGlueKind::Ty(field_type)));
|
||||
}
|
||||
}
|
||||
}
|
||||
ty::TyClosure(def_id, substs) => {
|
||||
for upvar_ty in substs.upvar_tys(def_id, scx.tcx()) {
|
||||
let upvar_ty = glue::get_drop_glue_type(scx.tcx(), upvar_ty);
|
||||
if glue::type_needs_drop(scx.tcx(), upvar_ty) {
|
||||
let upvar_ty = glue::get_drop_glue_type(scx, upvar_ty);
|
||||
if scx.type_needs_drop(upvar_ty) {
|
||||
output.push(TransItem::DropGlue(DropGlueKind::Ty(upvar_ty)));
|
||||
}
|
||||
}
|
||||
@ -822,15 +822,15 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
|
||||
ty::TyBox(inner_type) |
|
||||
ty::TySlice(inner_type) |
|
||||
ty::TyArray(inner_type, _) => {
|
||||
let inner_type = glue::get_drop_glue_type(scx.tcx(), inner_type);
|
||||
if glue::type_needs_drop(scx.tcx(), inner_type) {
|
||||
let inner_type = glue::get_drop_glue_type(scx, inner_type);
|
||||
if scx.type_needs_drop(inner_type) {
|
||||
output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type)));
|
||||
}
|
||||
}
|
||||
ty::TyTuple(args) => {
|
||||
for arg in args {
|
||||
let arg = glue::get_drop_glue_type(scx.tcx(), arg);
|
||||
if glue::type_needs_drop(scx.tcx(), arg) {
|
||||
let arg = glue::get_drop_glue_type(scx, arg);
|
||||
if scx.type_needs_drop(arg) {
|
||||
output.push(TransItem::DropGlue(DropGlueKind::Ty(arg)));
|
||||
}
|
||||
}
|
||||
@ -969,7 +969,7 @@ fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
|
||||
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
|
||||
let (inner_source, inner_target) = (a, b);
|
||||
|
||||
if !type_is_sized(scx.tcx(), inner_source) {
|
||||
if !scx.type_is_sized(inner_source) {
|
||||
(inner_source, inner_target)
|
||||
} else {
|
||||
scx.tcx().struct_lockstep_tails(inner_source, inner_target)
|
||||
@ -1051,7 +1051,7 @@ fn create_trans_items_for_vtable_methods<'a, 'tcx>(scx: &SharedCrateContext<'a,
|
||||
output.extend(methods);
|
||||
}
|
||||
// Also add the destructor
|
||||
let dg_type = glue::get_drop_glue_type(scx.tcx(), impl_ty);
|
||||
let dg_type = glue::get_drop_glue_type(scx, impl_ty);
|
||||
output.push(TransItem::DropGlue(DropGlueKind::Ty(dg_type)));
|
||||
}
|
||||
}
|
||||
@ -1097,7 +1097,7 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> {
|
||||
def_id_to_string(self.scx.tcx(), def_id));
|
||||
|
||||
let ty = self.scx.tcx().item_type(def_id);
|
||||
let ty = glue::get_drop_glue_type(self.scx.tcx(), ty);
|
||||
let ty = glue::get_drop_glue_type(self.scx, ty);
|
||||
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
|
||||
}
|
||||
}
|
||||
|
@ -14,24 +14,16 @@
|
||||
|
||||
use session::Session;
|
||||
use llvm;
|
||||
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind};
|
||||
use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind};
|
||||
use llvm::{True, False, Bool, OperandBundleDef};
|
||||
use rustc::hir::def::Def;
|
||||
use rustc::hir::def_id::DefId;
|
||||
use rustc::hir::map::DefPathData;
|
||||
use rustc::infer::TransNormalize;
|
||||
use rustc::mir::Mir;
|
||||
use rustc::util::common::MemoizationMap;
|
||||
use middle::lang_items::LangItem;
|
||||
use rustc::ty::subst::Substs;
|
||||
use abi::{Abi, FnType};
|
||||
use base;
|
||||
use build;
|
||||
use builder::Builder;
|
||||
use callee::Callee;
|
||||
use cleanup;
|
||||
use consts;
|
||||
use debuginfo::{self, DebugLoc};
|
||||
use declare;
|
||||
use machine;
|
||||
use monomorphize;
|
||||
@ -40,34 +32,26 @@ use value::Value;
|
||||
use rustc::ty::{self, Ty, TyCtxt};
|
||||
use rustc::ty::layout::Layout;
|
||||
use rustc::traits::{self, SelectionContext, Reveal};
|
||||
use rustc::ty::fold::TypeFoldable;
|
||||
use rustc::hir;
|
||||
|
||||
use arena::TypedArena;
|
||||
use libc::{c_uint, c_char};
|
||||
use std::borrow::Cow;
|
||||
use std::iter;
|
||||
use std::ops::Deref;
|
||||
use std::ffi::CString;
|
||||
use std::cell::{Cell, RefCell, Ref};
|
||||
|
||||
use syntax::ast;
|
||||
use syntax::symbol::{Symbol, InternedString};
|
||||
use syntax_pos::{DUMMY_SP, Span};
|
||||
use syntax_pos::Span;
|
||||
|
||||
pub use context::{CrateContext, SharedCrateContext};
|
||||
|
||||
/// Is the type's representation size known at compile time?
|
||||
pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
|
||||
ty.is_sized(tcx, &tcx.empty_parameter_environment(), DUMMY_SP)
|
||||
}
|
||||
|
||||
pub fn type_is_fat_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
|
||||
pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
|
||||
match ty.sty {
|
||||
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
|
||||
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
|
||||
ty::TyBox(ty) => {
|
||||
!type_is_sized(tcx, ty)
|
||||
!ccx.shared().type_is_sized(ty)
|
||||
}
|
||||
_ => {
|
||||
false
|
||||
@ -79,14 +63,13 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -
|
||||
use machine::llsize_of_alloc;
|
||||
use type_of::sizing_type_of;
|
||||
|
||||
let tcx = ccx.tcx();
|
||||
let simple = ty.is_scalar() ||
|
||||
ty.is_unique() || ty.is_region_ptr() ||
|
||||
ty.is_simd();
|
||||
if simple && !type_is_fat_ptr(tcx, ty) {
|
||||
if simple && !type_is_fat_ptr(ccx, ty) {
|
||||
return true;
|
||||
}
|
||||
if !type_is_sized(tcx, ty) {
|
||||
if !ccx.shared().type_is_sized(ty) {
|
||||
return false;
|
||||
}
|
||||
match ty.sty {
|
||||
@ -236,416 +219,139 @@ impl<'a, 'tcx> VariantInfo<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BuilderRef_res {
|
||||
pub b: BuilderRef,
|
||||
}
|
||||
|
||||
impl Drop for BuilderRef_res {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
llvm::LLVMDisposeBuilder(self.b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res {
|
||||
BuilderRef_res {
|
||||
b: b
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validate_substs(substs: &Substs) {
|
||||
assert!(!substs.needs_infer());
|
||||
}
|
||||
|
||||
// Function context. Every LLVM function we create will have one of
|
||||
// these.
|
||||
// Function context. Every LLVM function we create will have one of these.
|
||||
pub struct FunctionContext<'a, 'tcx: 'a> {
|
||||
// The MIR for this function.
|
||||
pub mir: Option<Ref<'tcx, Mir<'tcx>>>,
|
||||
|
||||
// The ValueRef returned from a call to llvm::LLVMAddFunction; the
|
||||
// address of the first instruction in the sequence of
|
||||
// instructions for this function that will go in the .text
|
||||
// section of the executable we're generating.
|
||||
pub llfn: ValueRef,
|
||||
|
||||
// always an empty parameter-environment NOTE: @jroesch another use of ParamEnv
|
||||
pub param_env: ty::ParameterEnvironment<'tcx>,
|
||||
|
||||
// A pointer to where to store the return value. If the return type is
|
||||
// immediate, this points to an alloca in the function. Otherwise, it's a
|
||||
// pointer to the hidden first parameter of the function. After function
|
||||
// construction, this should always be Some.
|
||||
pub llretslotptr: Cell<Option<ValueRef>>,
|
||||
|
||||
// These pub elements: "hoisted basic blocks" containing
|
||||
// administrative activities that have to happen in only one place in
|
||||
// the function, due to LLVM's quirks.
|
||||
// A marker for the place where we want to insert the function's static
|
||||
// allocas, so that LLVM will coalesce them into a single alloca call.
|
||||
pub alloca_insert_pt: Cell<Option<ValueRef>>,
|
||||
|
||||
// When working with landingpad-based exceptions this value is alloca'd and
|
||||
// later loaded when using the resume instruction. This ends up being
|
||||
// critical to chaining landing pads and resuing already-translated
|
||||
// cleanups.
|
||||
//
|
||||
// Note that for cleanuppad-based exceptions this is not used.
|
||||
pub landingpad_alloca: Cell<Option<ValueRef>>,
|
||||
|
||||
// Describes the return/argument LLVM types and their ABI handling.
|
||||
pub fn_ty: FnType,
|
||||
|
||||
// If this function is being monomorphized, this contains the type
|
||||
// substitutions used.
|
||||
pub param_substs: &'tcx Substs<'tcx>,
|
||||
|
||||
// The source span and nesting context where this function comes from, for
|
||||
// error reporting and symbol generation.
|
||||
pub span: Option<Span>,
|
||||
|
||||
// The arena that blocks are allocated from.
|
||||
pub block_arena: &'a TypedArena<BlockS<'a, 'tcx>>,
|
||||
|
||||
// The arena that landing pads are allocated from.
|
||||
pub lpad_arena: TypedArena<LandingPad>,
|
||||
alloca_insert_pt: Option<ValueRef>,
|
||||
|
||||
// This function's enclosing crate context.
|
||||
pub ccx: &'a CrateContext<'a, 'tcx>,
|
||||
|
||||
// Used and maintained by the debuginfo module.
|
||||
pub debug_context: debuginfo::FunctionDebugContext,
|
||||
|
||||
// Cleanup scopes.
|
||||
pub scopes: RefCell<Vec<cleanup::CleanupScope<'tcx>>>,
|
||||
alloca_builder: Builder<'a, 'tcx>,
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
|
||||
pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> {
|
||||
self.mir.as_ref().map(Ref::clone).expect("fcx.mir was empty")
|
||||
/// Create a function context for the given function.
|
||||
/// Call FunctionContext::get_entry_block for the first entry block.
|
||||
pub fn new(ccx: &'a CrateContext<'a, 'tcx>, llfndecl: ValueRef) -> FunctionContext<'a, 'tcx> {
|
||||
let mut fcx = FunctionContext {
|
||||
llfn: llfndecl,
|
||||
alloca_insert_pt: None,
|
||||
ccx: ccx,
|
||||
alloca_builder: Builder::with_ccx(ccx),
|
||||
};
|
||||
|
||||
let val = {
|
||||
let entry_bcx = fcx.build_new_block("entry-block");
|
||||
let val = entry_bcx.load(C_null(Type::i8p(ccx)));
|
||||
fcx.alloca_builder.position_at_start(entry_bcx.llbb());
|
||||
val
|
||||
};
|
||||
|
||||
// Use a dummy instruction as the insertion point for all allocas.
|
||||
// This is later removed in the drop of FunctionContext.
|
||||
fcx.alloca_insert_pt = Some(val);
|
||||
|
||||
fcx
|
||||
}
|
||||
|
||||
pub fn cleanup(&self) {
|
||||
unsafe {
|
||||
llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt
|
||||
.get()
|
||||
.unwrap());
|
||||
}
|
||||
pub fn get_entry_block(&'a self) -> BlockAndBuilder<'a, 'tcx> {
|
||||
BlockAndBuilder::new(unsafe {
|
||||
llvm::LLVMGetFirstBasicBlock(self.llfn)
|
||||
}, self)
|
||||
}
|
||||
|
||||
pub fn new_block(&'a self,
|
||||
name: &str)
|
||||
-> Block<'a, 'tcx> {
|
||||
pub fn new_block(&'a self, name: &str) -> BasicBlockRef {
|
||||
unsafe {
|
||||
let name = CString::new(name).unwrap();
|
||||
let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
|
||||
self.llfn,
|
||||
name.as_ptr());
|
||||
BlockS::new(llbb, self)
|
||||
llvm::LLVMAppendBasicBlockInContext(
|
||||
self.ccx.llcx(),
|
||||
self.llfn,
|
||||
name.as_ptr()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn monomorphize<T>(&self, value: &T) -> T
|
||||
where T: TransNormalize<'tcx>
|
||||
{
|
||||
monomorphize::apply_param_substs(self.ccx.shared(),
|
||||
self.param_substs,
|
||||
value)
|
||||
pub fn build_new_block(&'a self, name: &str) -> BlockAndBuilder<'a, 'tcx> {
|
||||
BlockAndBuilder::new(self.new_block(name), self)
|
||||
}
|
||||
|
||||
/// This is the same as `common::type_needs_drop`, except that it
|
||||
/// may use or update caches within this `FunctionContext`.
|
||||
pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
|
||||
self.ccx.tcx().type_needs_drop_given_env(ty, &self.param_env)
|
||||
}
|
||||
|
||||
pub fn eh_personality(&self) -> ValueRef {
|
||||
// The exception handling personality function.
|
||||
//
|
||||
// If our compilation unit has the `eh_personality` lang item somewhere
|
||||
// within it, then we just need to translate that. Otherwise, we're
|
||||
// building an rlib which will depend on some upstream implementation of
|
||||
// this function, so we just codegen a generic reference to it. We don't
|
||||
// specify any of the types for the function, we just make it a symbol
|
||||
// that LLVM can later use.
|
||||
//
|
||||
// Note that MSVC is a little special here in that we don't use the
|
||||
// `eh_personality` lang item at all. Currently LLVM has support for
|
||||
// both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
|
||||
// *name of the personality function* to decide what kind of unwind side
|
||||
// tables/landing pads to emit. It looks like Dwarf is used by default,
|
||||
// injecting a dependency on the `_Unwind_Resume` symbol for resuming
|
||||
// an "exception", but for MSVC we want to force SEH. This means that we
|
||||
// can't actually have the personality function be our standard
|
||||
// `rust_eh_personality` function, but rather we wired it up to the
|
||||
// CRT's custom personality function, which forces LLVM to consider
|
||||
// landing pads as "landing pads for SEH".
|
||||
let ccx = self.ccx;
|
||||
let tcx = ccx.tcx();
|
||||
match tcx.lang_items.eh_personality() {
|
||||
Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => {
|
||||
Callee::def(ccx, def_id, tcx.intern_substs(&[])).reify(ccx)
|
||||
}
|
||||
_ => {
|
||||
if let Some(llpersonality) = ccx.eh_personality().get() {
|
||||
return llpersonality
|
||||
}
|
||||
let name = if base::wants_msvc_seh(ccx.sess()) {
|
||||
"__CxxFrameHandler3"
|
||||
} else {
|
||||
"rust_eh_personality"
|
||||
};
|
||||
let fty = Type::variadic_func(&[], &Type::i32(ccx));
|
||||
let f = declare::declare_cfn(ccx, name, fty);
|
||||
ccx.eh_personality().set(Some(f));
|
||||
f
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined,
|
||||
// otherwise declares it as an external function.
|
||||
pub fn eh_unwind_resume(&self) -> Callee<'tcx> {
|
||||
use attributes;
|
||||
let ccx = self.ccx;
|
||||
let tcx = ccx.tcx();
|
||||
assert!(ccx.sess().target.target.options.custom_unwind_resume);
|
||||
if let Some(def_id) = tcx.lang_items.eh_unwind_resume() {
|
||||
return Callee::def(ccx, def_id, tcx.intern_substs(&[]));
|
||||
}
|
||||
|
||||
let ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
|
||||
unsafety: hir::Unsafety::Unsafe,
|
||||
abi: Abi::C,
|
||||
sig: ty::Binder(tcx.mk_fn_sig(
|
||||
iter::once(tcx.mk_mut_ptr(tcx.types.u8)),
|
||||
tcx.types.never,
|
||||
false
|
||||
)),
|
||||
}));
|
||||
|
||||
let unwresume = ccx.eh_unwind_resume();
|
||||
if let Some(llfn) = unwresume.get() {
|
||||
return Callee::ptr(llfn, ty);
|
||||
}
|
||||
let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty);
|
||||
attributes::unwind(llfn, true);
|
||||
unwresume.set(Some(llfn));
|
||||
Callee::ptr(llfn, ty)
|
||||
pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
|
||||
self.alloca_builder.dynamic_alloca(ty, name)
|
||||
}
|
||||
}
|
||||
|
||||
// Basic block context. We create a block context for each basic block
|
||||
// (single-entry, single-exit sequence of instructions) we generate from Rust
|
||||
// code. Each basic block we generate is attached to a function, typically
|
||||
// with many basic blocks per function. All the basic blocks attached to a
|
||||
// function are organized as a directed graph.
|
||||
pub struct BlockS<'blk, 'tcx: 'blk> {
|
||||
impl<'a, 'tcx> Drop for FunctionContext<'a, 'tcx> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt.unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub struct BlockAndBuilder<'a, 'tcx: 'a> {
|
||||
// The BasicBlockRef returned from a call to
|
||||
// llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
|
||||
// block to the function pointed to by llfn. We insert
|
||||
// instructions into that block by way of this block context.
|
||||
// The block pointing to this one in the function's digraph.
|
||||
pub llbb: BasicBlockRef,
|
||||
pub terminated: Cell<bool>,
|
||||
pub unreachable: Cell<bool>,
|
||||
|
||||
// If this block part of a landing pad, then this is `Some` indicating what
|
||||
// kind of landing pad its in, otherwise this is none.
|
||||
pub lpad: Cell<Option<&'blk LandingPad>>,
|
||||
llbb: BasicBlockRef,
|
||||
|
||||
// The function context for the function to which this block is
|
||||
// attached.
|
||||
pub fcx: &'blk FunctionContext<'blk, 'tcx>,
|
||||
fcx: &'a FunctionContext<'a, 'tcx>,
|
||||
|
||||
builder: Builder<'a, 'tcx>,
|
||||
}
|
||||
|
||||
pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>;
|
||||
|
||||
impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
|
||||
pub fn new(llbb: BasicBlockRef,
|
||||
fcx: &'blk FunctionContext<'blk, 'tcx>)
|
||||
-> Block<'blk, 'tcx> {
|
||||
fcx.block_arena.alloc(BlockS {
|
||||
llbb: llbb,
|
||||
terminated: Cell::new(false),
|
||||
unreachable: Cell::new(false),
|
||||
lpad: Cell::new(None),
|
||||
fcx: fcx
|
||||
})
|
||||
}
|
||||
|
||||
pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
|
||||
self.fcx.ccx
|
||||
}
|
||||
pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> {
|
||||
self.fcx
|
||||
}
|
||||
pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> {
|
||||
self.fcx.ccx.tcx()
|
||||
}
|
||||
pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() }
|
||||
|
||||
pub fn lpad(&self) -> Option<&'blk LandingPad> {
|
||||
self.lpad.get()
|
||||
}
|
||||
|
||||
pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) {
|
||||
// FIXME: use an IVar?
|
||||
self.lpad.set(lpad);
|
||||
}
|
||||
|
||||
pub fn set_lpad(&self, lpad: Option<LandingPad>) {
|
||||
self.set_lpad_ref(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p)))
|
||||
}
|
||||
|
||||
pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> {
|
||||
self.fcx.mir()
|
||||
}
|
||||
|
||||
pub fn name(&self, name: ast::Name) -> String {
|
||||
name.to_string()
|
||||
}
|
||||
|
||||
pub fn node_id_to_string(&self, id: ast::NodeId) -> String {
|
||||
self.tcx().map.node_to_string(id).to_string()
|
||||
}
|
||||
|
||||
pub fn to_str(&self) -> String {
|
||||
format!("[block {:p}]", self)
|
||||
}
|
||||
|
||||
pub fn monomorphize<T>(&self, value: &T) -> T
|
||||
where T: TransNormalize<'tcx>
|
||||
{
|
||||
monomorphize::apply_param_substs(self.fcx.ccx.shared(),
|
||||
self.fcx.param_substs,
|
||||
value)
|
||||
}
|
||||
|
||||
pub fn build(&'blk self) -> BlockAndBuilder<'blk, 'tcx> {
|
||||
BlockAndBuilder::new(self, OwnedBuilder::new_with_ccx(self.ccx()))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OwnedBuilder<'blk, 'tcx: 'blk> {
|
||||
builder: Builder<'blk, 'tcx>
|
||||
}
|
||||
|
||||
impl<'blk, 'tcx> OwnedBuilder<'blk, 'tcx> {
|
||||
pub fn new_with_ccx(ccx: &'blk CrateContext<'blk, 'tcx>) -> Self {
|
||||
// Create a fresh builder from the crate context.
|
||||
let llbuilder = unsafe {
|
||||
llvm::LLVMCreateBuilderInContext(ccx.llcx())
|
||||
};
|
||||
OwnedBuilder {
|
||||
builder: Builder {
|
||||
llbuilder: llbuilder,
|
||||
ccx: ccx,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'blk, 'tcx> Drop for OwnedBuilder<'blk, 'tcx> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
llvm::LLVMDisposeBuilder(self.builder.llbuilder);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BlockAndBuilder<'blk, 'tcx: 'blk> {
|
||||
bcx: Block<'blk, 'tcx>,
|
||||
owned_builder: OwnedBuilder<'blk, 'tcx>,
|
||||
}
|
||||
|
||||
impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> {
|
||||
pub fn new(bcx: Block<'blk, 'tcx>, owned_builder: OwnedBuilder<'blk, 'tcx>) -> Self {
|
||||
impl<'a, 'tcx> BlockAndBuilder<'a, 'tcx> {
|
||||
pub fn new(llbb: BasicBlockRef, fcx: &'a FunctionContext<'a, 'tcx>) -> Self {
|
||||
let builder = Builder::with_ccx(fcx.ccx);
|
||||
// Set the builder's position to this block's end.
|
||||
owned_builder.builder.position_at_end(bcx.llbb);
|
||||
builder.position_at_end(llbb);
|
||||
BlockAndBuilder {
|
||||
bcx: bcx,
|
||||
owned_builder: owned_builder,
|
||||
llbb: llbb,
|
||||
fcx: fcx,
|
||||
builder: builder,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_block<F, R>(&self, f: F) -> R
|
||||
where F: FnOnce(Block<'blk, 'tcx>) -> R
|
||||
{
|
||||
let result = f(self.bcx);
|
||||
self.position_at_end(self.bcx.llbb);
|
||||
result
|
||||
}
|
||||
|
||||
pub fn map_block<F>(self, f: F) -> Self
|
||||
where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>
|
||||
{
|
||||
let BlockAndBuilder { bcx, owned_builder } = self;
|
||||
let bcx = f(bcx);
|
||||
BlockAndBuilder::new(bcx, owned_builder)
|
||||
}
|
||||
|
||||
pub fn at_start<F, R>(&self, f: F) -> R
|
||||
where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>) -> R
|
||||
where F: FnOnce(&BlockAndBuilder<'a, 'tcx>) -> R
|
||||
{
|
||||
self.position_at_start(self.bcx.llbb);
|
||||
self.position_at_start(self.llbb);
|
||||
let r = f(self);
|
||||
self.position_at_end(self.bcx.llbb);
|
||||
self.position_at_end(self.llbb);
|
||||
r
|
||||
}
|
||||
|
||||
// Methods delegated to bcx
|
||||
|
||||
pub fn is_unreachable(&self) -> bool {
|
||||
self.bcx.unreachable.get()
|
||||
pub fn fcx(&self) -> &'a FunctionContext<'a, 'tcx> {
|
||||
self.fcx
|
||||
}
|
||||
|
||||
pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
|
||||
self.bcx.ccx()
|
||||
pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
|
||||
self.ccx.tcx()
|
||||
}
|
||||
pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> {
|
||||
self.bcx.fcx()
|
||||
}
|
||||
pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> {
|
||||
self.bcx.tcx()
|
||||
}
|
||||
pub fn sess(&self) -> &'blk Session {
|
||||
self.bcx.sess()
|
||||
pub fn sess(&self) -> &'a Session {
|
||||
self.ccx.sess()
|
||||
}
|
||||
|
||||
pub fn llbb(&self) -> BasicBlockRef {
|
||||
self.bcx.llbb
|
||||
}
|
||||
|
||||
pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> {
|
||||
self.bcx.mir()
|
||||
}
|
||||
|
||||
pub fn monomorphize<T>(&self, value: &T) -> T
|
||||
where T: TransNormalize<'tcx>
|
||||
{
|
||||
self.bcx.monomorphize(value)
|
||||
}
|
||||
|
||||
pub fn set_lpad(&self, lpad: Option<LandingPad>) {
|
||||
self.bcx.set_lpad(lpad)
|
||||
}
|
||||
|
||||
pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) {
|
||||
// FIXME: use an IVar?
|
||||
self.bcx.set_lpad_ref(lpad);
|
||||
}
|
||||
|
||||
pub fn lpad(&self) -> Option<&'blk LandingPad> {
|
||||
self.bcx.lpad()
|
||||
self.llbb
|
||||
}
|
||||
}
|
||||
|
||||
impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> {
|
||||
type Target = Builder<'blk, 'tcx>;
|
||||
impl<'a, 'tcx> Deref for BlockAndBuilder<'a, 'tcx> {
|
||||
type Target = Builder<'a, 'tcx>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.owned_builder.builder
|
||||
&self.builder
|
||||
}
|
||||
}
|
||||
|
||||
@ -663,53 +369,33 @@ impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> {
|
||||
/// When inside of a landing pad, each function call in LLVM IR needs to be
|
||||
/// annotated with which landing pad it's a part of. This is accomplished via
|
||||
/// the `OperandBundleDef` value created for MSVC landing pads.
|
||||
pub struct LandingPad {
|
||||
cleanuppad: Option<ValueRef>,
|
||||
operand: Option<OperandBundleDef>,
|
||||
pub struct Funclet {
|
||||
cleanuppad: ValueRef,
|
||||
operand: OperandBundleDef,
|
||||
}
|
||||
|
||||
impl LandingPad {
|
||||
pub fn gnu() -> LandingPad {
|
||||
LandingPad { cleanuppad: None, operand: None }
|
||||
}
|
||||
|
||||
pub fn msvc(cleanuppad: ValueRef) -> LandingPad {
|
||||
LandingPad {
|
||||
cleanuppad: Some(cleanuppad),
|
||||
operand: Some(OperandBundleDef::new("funclet", &[cleanuppad])),
|
||||
impl Funclet {
|
||||
pub fn new(cleanuppad: ValueRef) -> Funclet {
|
||||
Funclet {
|
||||
cleanuppad: cleanuppad,
|
||||
operand: OperandBundleDef::new("funclet", &[cleanuppad]),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bundle(&self) -> Option<&OperandBundleDef> {
|
||||
self.operand.as_ref()
|
||||
}
|
||||
|
||||
pub fn cleanuppad(&self) -> Option<ValueRef> {
|
||||
pub fn cleanuppad(&self) -> ValueRef {
|
||||
self.cleanuppad
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for LandingPad {
|
||||
fn clone(&self) -> LandingPad {
|
||||
LandingPad {
|
||||
cleanuppad: self.cleanuppad,
|
||||
operand: self.cleanuppad.map(|p| {
|
||||
OperandBundleDef::new("funclet", &[p])
|
||||
}),
|
||||
}
|
||||
pub fn bundle(&self) -> &OperandBundleDef {
|
||||
&self.operand
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Result<'blk, 'tcx: 'blk> {
|
||||
pub bcx: Block<'blk, 'tcx>,
|
||||
pub val: ValueRef
|
||||
}
|
||||
|
||||
impl<'b, 'tcx> Result<'b, 'tcx> {
|
||||
pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> {
|
||||
Result {
|
||||
bcx: bcx,
|
||||
val: val,
|
||||
impl Clone for Funclet {
|
||||
fn clone(&self) -> Funclet {
|
||||
Funclet {
|
||||
cleanuppad: self.cleanuppad,
|
||||
operand: OperandBundleDef::new("funclet", &[self.cleanuppad]),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1016,43 +702,42 @@ pub fn langcall(tcx: TyCtxt,
|
||||
// all shifts). For 32- and 64-bit types, this matches the semantics
|
||||
// of Java. (See related discussion on #1877 and #10183.)
|
||||
|
||||
pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
binop_debug_loc: DebugLoc) -> ValueRef {
|
||||
pub fn build_unchecked_lshift<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef
|
||||
) -> ValueRef {
|
||||
let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs);
|
||||
// #1877, #10183: Ensure that input is always valid
|
||||
let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
|
||||
build::Shl(bcx, lhs, rhs, binop_debug_loc)
|
||||
let rhs = shift_mask_rhs(bcx, rhs);
|
||||
bcx.shl(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
lhs_t: Ty<'tcx>,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
binop_debug_loc: DebugLoc) -> ValueRef {
|
||||
pub fn build_unchecked_rshift<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef
|
||||
) -> ValueRef {
|
||||
let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs);
|
||||
// #1877, #10183: Ensure that input is always valid
|
||||
let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
|
||||
let rhs = shift_mask_rhs(bcx, rhs);
|
||||
let is_signed = lhs_t.is_signed();
|
||||
if is_signed {
|
||||
build::AShr(bcx, lhs, rhs, binop_debug_loc)
|
||||
bcx.ashr(lhs, rhs)
|
||||
} else {
|
||||
build::LShr(bcx, lhs, rhs, binop_debug_loc)
|
||||
bcx.lshr(lhs, rhs)
|
||||
}
|
||||
}
|
||||
|
||||
fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc) -> ValueRef {
|
||||
fn shift_mask_rhs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, rhs: ValueRef) -> ValueRef {
|
||||
let rhs_llty = val_ty(rhs);
|
||||
build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc)
|
||||
bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false))
|
||||
}
|
||||
|
||||
pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
llty: Type,
|
||||
mask_llty: Type,
|
||||
invert: bool) -> ValueRef {
|
||||
pub fn shift_mask_val<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
llty: Type,
|
||||
mask_llty: Type,
|
||||
invert: bool
|
||||
) -> ValueRef {
|
||||
let kind = llty.kind();
|
||||
match kind {
|
||||
TypeKind::Integer => {
|
||||
@ -1066,7 +751,7 @@ pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
},
|
||||
TypeKind::Vector => {
|
||||
let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert);
|
||||
build::VectorSplat(bcx, mask_llty.vector_length(), mask)
|
||||
bcx.vector_splat(mask_llty.vector_length(), mask)
|
||||
},
|
||||
_ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ use rustc_const_eval::ConstEvalErr;
|
||||
use rustc::hir::def_id::DefId;
|
||||
use rustc::hir::map as hir_map;
|
||||
use {debuginfo, machine};
|
||||
use base::{self, push_ctxt};
|
||||
use base;
|
||||
use trans_item::TransItem;
|
||||
use common::{CrateContext, val_ty};
|
||||
use declare;
|
||||
@ -221,7 +221,6 @@ pub fn trans_static(ccx: &CrateContext,
|
||||
attrs: &[ast::Attribute])
|
||||
-> Result<ValueRef, ConstEvalErr> {
|
||||
unsafe {
|
||||
let _icx = push_ctxt("trans_static");
|
||||
let def_id = ccx.tcx().map.local_def_id(id);
|
||||
let g = get_static(ccx, def_id);
|
||||
|
||||
|
@ -9,17 +9,16 @@
|
||||
// except according to those terms.
|
||||
|
||||
use llvm;
|
||||
use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef};
|
||||
use rustc::dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig,
|
||||
WorkProduct};
|
||||
use llvm::{ContextRef, ModuleRef, ValueRef};
|
||||
use rustc::dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig, WorkProduct};
|
||||
use middle::cstore::LinkMeta;
|
||||
use rustc::hir;
|
||||
use rustc::hir::def::ExportMap;
|
||||
use rustc::hir::def_id::DefId;
|
||||
use rustc::traits;
|
||||
use base;
|
||||
use builder::Builder;
|
||||
use common::BuilderRef_res;
|
||||
use debuginfo;
|
||||
use callee::Callee;
|
||||
use base;
|
||||
use declare;
|
||||
use glue::DropGlueKind;
|
||||
use monomorphize::Instance;
|
||||
@ -40,11 +39,13 @@ use std::ffi::{CStr, CString};
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::marker::PhantomData;
|
||||
use std::ptr;
|
||||
use std::iter;
|
||||
use std::rc::Rc;
|
||||
use std::str;
|
||||
use syntax::ast;
|
||||
use syntax::symbol::InternedString;
|
||||
use abi::FnType;
|
||||
use syntax_pos::DUMMY_SP;
|
||||
use abi::{Abi, FnType};
|
||||
|
||||
pub struct Stats {
|
||||
pub n_glues_created: Cell<usize>,
|
||||
@ -71,6 +72,7 @@ pub struct SharedCrateContext<'a, 'tcx: 'a> {
|
||||
exported_symbols: NodeSet,
|
||||
link_meta: LinkMeta,
|
||||
tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
empty_param_env: ty::ParameterEnvironment<'tcx>,
|
||||
stats: Stats,
|
||||
check_overflow: bool,
|
||||
|
||||
@ -140,7 +142,6 @@ pub struct LocalCrateContext<'tcx> {
|
||||
int_type: Type,
|
||||
opaque_vec_type: Type,
|
||||
str_slice_type: Type,
|
||||
builder: BuilderRef_res,
|
||||
|
||||
/// Holds the LLVM values for closure IDs.
|
||||
closure_vals: RefCell<FxHashMap<Instance<'tcx>, ValueRef>>,
|
||||
@ -153,11 +154,6 @@ pub struct LocalCrateContext<'tcx> {
|
||||
|
||||
intrinsics: RefCell<FxHashMap<&'static str, ValueRef>>,
|
||||
|
||||
/// Number of LLVM instructions translated into this `LocalCrateContext`.
|
||||
/// This is used to perform some basic load-balancing to keep all LLVM
|
||||
/// contexts around the same size.
|
||||
n_llvm_insns: Cell<usize>,
|
||||
|
||||
/// Depth of the current type-of computation - used to bail out
|
||||
type_of_depth: Cell<usize>,
|
||||
|
||||
@ -316,38 +312,6 @@ impl<'a, 'tcx> Iterator for CrateContextIterator<'a,'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// The iterator produced by `CrateContext::maybe_iter`.
|
||||
pub struct CrateContextMaybeIterator<'a, 'tcx: 'a> {
|
||||
shared: &'a SharedCrateContext<'a, 'tcx>,
|
||||
local_ccxs: &'a [LocalCrateContext<'tcx>],
|
||||
index: usize,
|
||||
single: bool,
|
||||
origin: usize,
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> Iterator for CrateContextMaybeIterator<'a, 'tcx> {
|
||||
type Item = (CrateContext<'a, 'tcx>, bool);
|
||||
|
||||
fn next(&mut self) -> Option<(CrateContext<'a, 'tcx>, bool)> {
|
||||
if self.index >= self.local_ccxs.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let index = self.index;
|
||||
self.index += 1;
|
||||
if self.single {
|
||||
self.index = self.local_ccxs.len();
|
||||
}
|
||||
|
||||
let ccx = CrateContext {
|
||||
shared: self.shared,
|
||||
index: index,
|
||||
local_ccxs: self.local_ccxs
|
||||
};
|
||||
Some((ccx, index == self.origin))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode {
|
||||
let reloc_model_arg = match sess.opts.cg.relocation_model {
|
||||
Some(ref s) => &s[..],
|
||||
@ -496,6 +460,7 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> {
|
||||
export_map: export_map,
|
||||
exported_symbols: exported_symbols,
|
||||
link_meta: link_meta,
|
||||
empty_param_env: tcx.empty_parameter_environment(),
|
||||
tcx: tcx,
|
||||
stats: Stats {
|
||||
n_glues_created: Cell::new(0),
|
||||
@ -516,6 +481,14 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
|
||||
self.tcx.type_needs_drop_given_env(ty, &self.empty_param_env)
|
||||
}
|
||||
|
||||
pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
|
||||
ty.is_sized(self.tcx, &self.empty_param_env, DUMMY_SP)
|
||||
}
|
||||
|
||||
pub fn metadata_llmod(&self) -> ModuleRef {
|
||||
self.metadata_llmod
|
||||
}
|
||||
@ -638,14 +611,12 @@ impl<'tcx> LocalCrateContext<'tcx> {
|
||||
int_type: Type::from_ref(ptr::null_mut()),
|
||||
opaque_vec_type: Type::from_ref(ptr::null_mut()),
|
||||
str_slice_type: Type::from_ref(ptr::null_mut()),
|
||||
builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)),
|
||||
closure_vals: RefCell::new(FxHashMap()),
|
||||
dbg_cx: dbg_cx,
|
||||
eh_personality: Cell::new(None),
|
||||
eh_unwind_resume: Cell::new(None),
|
||||
rust_try_fn: Cell::new(None),
|
||||
intrinsics: RefCell::new(FxHashMap()),
|
||||
n_llvm_insns: Cell::new(0),
|
||||
type_of_depth: Cell::new(0),
|
||||
symbol_map: symbol_map,
|
||||
local_gen_sym_counter: Cell::new(0),
|
||||
@ -671,10 +642,6 @@ impl<'tcx> LocalCrateContext<'tcx> {
|
||||
local_ccx.opaque_vec_type = opaque_vec_type;
|
||||
local_ccx.str_slice_type = str_slice_ty;
|
||||
|
||||
if shared.tcx.sess.count_llvm_insns() {
|
||||
base::init_insn_ctxt()
|
||||
}
|
||||
|
||||
local_ccx
|
||||
}
|
||||
}
|
||||
@ -703,26 +670,10 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
|
||||
self.shared
|
||||
}
|
||||
|
||||
pub fn local(&self) -> &'b LocalCrateContext<'tcx> {
|
||||
fn local(&self) -> &'b LocalCrateContext<'tcx> {
|
||||
&self.local_ccxs[self.index]
|
||||
}
|
||||
|
||||
/// Either iterate over only `self`, or iterate over all `CrateContext`s in
|
||||
/// the `SharedCrateContext`. The iterator produces `(ccx, is_origin)`
|
||||
/// pairs, where `is_origin` is `true` if `ccx` is `self` and `false`
|
||||
/// otherwise. This method is useful for avoiding code duplication in
|
||||
/// cases where it may or may not be necessary to translate code into every
|
||||
/// context.
|
||||
pub fn maybe_iter(&self, iter_all: bool) -> CrateContextMaybeIterator<'b, 'tcx> {
|
||||
CrateContextMaybeIterator {
|
||||
shared: self.shared,
|
||||
index: if iter_all { 0 } else { self.index },
|
||||
single: !iter_all,
|
||||
origin: self.index,
|
||||
local_ccxs: self.local_ccxs,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
|
||||
self.shared.tcx
|
||||
}
|
||||
@ -731,14 +682,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
|
||||
&self.shared.tcx.sess
|
||||
}
|
||||
|
||||
pub fn builder<'a>(&'a self) -> Builder<'a, 'tcx> {
|
||||
Builder::new(self)
|
||||
}
|
||||
|
||||
pub fn raw_builder<'a>(&'a self) -> BuilderRef {
|
||||
self.local().builder.b
|
||||
}
|
||||
|
||||
pub fn get_intrinsic(&self, key: &str) -> ValueRef {
|
||||
if let Some(v) = self.intrinsics().borrow().get(key).cloned() {
|
||||
return v;
|
||||
@ -886,14 +829,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
|
||||
&self.local().dbg_cx
|
||||
}
|
||||
|
||||
pub fn eh_personality<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
|
||||
&self.local().eh_personality
|
||||
}
|
||||
|
||||
pub fn eh_unwind_resume<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
|
||||
&self.local().eh_unwind_resume
|
||||
}
|
||||
|
||||
pub fn rust_try_fn<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
|
||||
&self.local().rust_try_fn
|
||||
}
|
||||
@ -902,10 +837,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
|
||||
&self.local().intrinsics
|
||||
}
|
||||
|
||||
pub fn count_llvm_insn(&self) {
|
||||
self.local().n_llvm_insns.set(self.local().n_llvm_insns.get() + 1);
|
||||
}
|
||||
|
||||
pub fn obj_size_bound(&self) -> u64 {
|
||||
self.tcx().data_layout.obj_size_bound()
|
||||
}
|
||||
@ -974,6 +905,82 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
|
||||
base_n::push_str(idx as u64, base_n::ALPHANUMERIC_ONLY, &mut name);
|
||||
name
|
||||
}
|
||||
|
||||
pub fn eh_personality(&self) -> ValueRef {
|
||||
// The exception handling personality function.
|
||||
//
|
||||
// If our compilation unit has the `eh_personality` lang item somewhere
|
||||
// within it, then we just need to translate that. Otherwise, we're
|
||||
// building an rlib which will depend on some upstream implementation of
|
||||
// this function, so we just codegen a generic reference to it. We don't
|
||||
// specify any of the types for the function, we just make it a symbol
|
||||
// that LLVM can later use.
|
||||
//
|
||||
// Note that MSVC is a little special here in that we don't use the
|
||||
// `eh_personality` lang item at all. Currently LLVM has support for
|
||||
// both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
|
||||
// *name of the personality function* to decide what kind of unwind side
|
||||
// tables/landing pads to emit. It looks like Dwarf is used by default,
|
||||
// injecting a dependency on the `_Unwind_Resume` symbol for resuming
|
||||
// an "exception", but for MSVC we want to force SEH. This means that we
|
||||
// can't actually have the personality function be our standard
|
||||
// `rust_eh_personality` function, but rather we wired it up to the
|
||||
// CRT's custom personality function, which forces LLVM to consider
|
||||
// landing pads as "landing pads for SEH".
|
||||
if let Some(llpersonality) = self.local().eh_personality.get() {
|
||||
return llpersonality
|
||||
}
|
||||
let tcx = self.tcx();
|
||||
let llfn = match tcx.lang_items.eh_personality() {
|
||||
Some(def_id) if !base::wants_msvc_seh(self.sess()) => {
|
||||
Callee::def(self, def_id, tcx.intern_substs(&[])).reify(self)
|
||||
}
|
||||
_ => {
|
||||
let name = if base::wants_msvc_seh(self.sess()) {
|
||||
"__CxxFrameHandler3"
|
||||
} else {
|
||||
"rust_eh_personality"
|
||||
};
|
||||
let fty = Type::variadic_func(&[], &Type::i32(self));
|
||||
declare::declare_cfn(self, name, fty)
|
||||
}
|
||||
};
|
||||
self.local().eh_personality.set(Some(llfn));
|
||||
llfn
|
||||
}
|
||||
|
||||
// Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined,
|
||||
// otherwise declares it as an external function.
|
||||
pub fn eh_unwind_resume(&self) -> ValueRef {
|
||||
use attributes;
|
||||
let unwresume = &self.local().eh_unwind_resume;
|
||||
if let Some(llfn) = unwresume.get() {
|
||||
return llfn;
|
||||
}
|
||||
|
||||
let tcx = self.tcx();
|
||||
assert!(self.sess().target.target.options.custom_unwind_resume);
|
||||
if let Some(def_id) = tcx.lang_items.eh_unwind_resume() {
|
||||
let llfn = Callee::def(self, def_id, tcx.intern_substs(&[])).reify(self);
|
||||
unwresume.set(Some(llfn));
|
||||
return llfn;
|
||||
}
|
||||
|
||||
let ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
|
||||
unsafety: hir::Unsafety::Unsafe,
|
||||
abi: Abi::C,
|
||||
sig: ty::Binder(tcx.mk_fn_sig(
|
||||
iter::once(tcx.mk_mut_ptr(tcx.types.u8)),
|
||||
tcx.types.never,
|
||||
false
|
||||
)),
|
||||
}));
|
||||
|
||||
let llfn = declare::declare_fn(self, "rust_eh_unwind_resume", ty);
|
||||
attributes::unwind(llfn, true);
|
||||
unwresume.set(Some(llfn));
|
||||
llfn
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>);
|
||||
|
@ -44,8 +44,8 @@ impl MirDebugScope {
|
||||
|
||||
/// Produce DIScope DIEs for each MIR Scope which has variables defined in it.
|
||||
/// If debuginfo is disabled, the returned vector is empty.
|
||||
pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec<VisibilityScope, MirDebugScope> {
|
||||
let mir = fcx.mir();
|
||||
pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &FunctionDebugContext)
|
||||
-> IndexVec<VisibilityScope, MirDebugScope> {
|
||||
let null_scope = MirDebugScope {
|
||||
scope_metadata: ptr::null_mut(),
|
||||
file_start_pos: BytePos(0),
|
||||
@ -53,8 +53,8 @@ pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec<VisibilityScope, Mir
|
||||
};
|
||||
let mut scopes = IndexVec::from_elem(null_scope, &mir.visibility_scopes);
|
||||
|
||||
let fn_metadata = match fcx.debug_context {
|
||||
FunctionDebugContext::RegularContext(box ref data) => data.fn_metadata,
|
||||
let fn_metadata = match *debug_context {
|
||||
FunctionDebugContext::RegularContext(ref data) => data.fn_metadata,
|
||||
FunctionDebugContext::DebugInfoDisabled |
|
||||
FunctionDebugContext::FunctionWithoutDebugInfo => {
|
||||
return scopes;
|
||||
|
@ -13,37 +13,26 @@
|
||||
use llvm;
|
||||
|
||||
use common::{C_bytes, CrateContext, C_i32};
|
||||
use builder::Builder;
|
||||
use declare;
|
||||
use type_::Type;
|
||||
use session::config::NoDebugInfo;
|
||||
|
||||
use std::ffi::CString;
|
||||
use std::ptr;
|
||||
use syntax::attr;
|
||||
|
||||
|
||||
/// Inserts a side-effect free instruction sequence that makes sure that the
|
||||
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
|
||||
pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext) {
|
||||
pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext, builder: &Builder) {
|
||||
if needs_gdb_debug_scripts_section(ccx) {
|
||||
let empty = CString::new("").unwrap();
|
||||
let gdb_debug_scripts_section_global =
|
||||
get_or_insert_gdb_debug_scripts_section_global(ccx);
|
||||
let gdb_debug_scripts_section_global = get_or_insert_gdb_debug_scripts_section_global(ccx);
|
||||
// Load just the first byte as that's all that's necessary to force
|
||||
// LLVM to keep around the reference to the global.
|
||||
let indices = [C_i32(ccx, 0), C_i32(ccx, 0)];
|
||||
let element = builder.inbounds_gep(gdb_debug_scripts_section_global, &indices);
|
||||
let volative_load_instruction = builder.volatile_load(element);
|
||||
unsafe {
|
||||
// Load just the first byte as that's all that's necessary to force
|
||||
// LLVM to keep around the reference to the global.
|
||||
let indices = [C_i32(ccx, 0), C_i32(ccx, 0)];
|
||||
let element =
|
||||
llvm::LLVMBuildInBoundsGEP(ccx.raw_builder(),
|
||||
gdb_debug_scripts_section_global,
|
||||
indices.as_ptr(),
|
||||
indices.len() as ::libc::c_uint,
|
||||
empty.as_ptr());
|
||||
let volative_load_instruction =
|
||||
llvm::LLVMBuildLoad(ccx.raw_builder(),
|
||||
element,
|
||||
empty.as_ptr());
|
||||
llvm::LLVMSetVolatile(volative_load_instruction, llvm::True);
|
||||
llvm::LLVMSetAlignment(volative_load_instruction, 1);
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ use rustc::hir::def_id::DefId;
|
||||
use rustc::ty::subst::Substs;
|
||||
|
||||
use abi::Abi;
|
||||
use common::{CrateContext, FunctionContext, Block, BlockAndBuilder};
|
||||
use common::{CrateContext, BlockAndBuilder};
|
||||
use monomorphize::{self, Instance};
|
||||
use rustc::ty::{self, Ty};
|
||||
use rustc::mir;
|
||||
@ -55,6 +55,7 @@ pub use self::create_scope_map::{create_mir_scopes, MirDebugScope};
|
||||
pub use self::source_loc::start_emitting_source_locations;
|
||||
pub use self::metadata::create_global_var_metadata;
|
||||
pub use self::metadata::extend_scope_to_file;
|
||||
pub use self::source_loc::set_source_location;
|
||||
|
||||
#[allow(non_upper_case_globals)]
|
||||
const DW_TAG_auto_variable: c_uint = 0x100;
|
||||
@ -65,7 +66,6 @@ const DW_TAG_arg_variable: c_uint = 0x101;
|
||||
pub struct CrateDebugContext<'tcx> {
|
||||
llcontext: ContextRef,
|
||||
builder: DIBuilderRef,
|
||||
current_debug_location: Cell<InternalDebugLocation>,
|
||||
created_files: RefCell<FxHashMap<String, DIFile>>,
|
||||
created_enum_disr_types: RefCell<FxHashMap<(DefId, layout::Integer), DIType>>,
|
||||
|
||||
@ -83,40 +83,33 @@ impl<'tcx> CrateDebugContext<'tcx> {
|
||||
let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) };
|
||||
// DIBuilder inherits context from the module, so we'd better use the same one
|
||||
let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
|
||||
return CrateDebugContext {
|
||||
CrateDebugContext {
|
||||
llcontext: llcontext,
|
||||
builder: builder,
|
||||
current_debug_location: Cell::new(InternalDebugLocation::UnknownLocation),
|
||||
created_files: RefCell::new(FxHashMap()),
|
||||
created_enum_disr_types: RefCell::new(FxHashMap()),
|
||||
type_map: RefCell::new(TypeMap::new()),
|
||||
namespace_map: RefCell::new(DefIdMap()),
|
||||
composite_types_completed: RefCell::new(FxHashSet()),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum FunctionDebugContext {
|
||||
RegularContext(Box<FunctionDebugContextData>),
|
||||
RegularContext(FunctionDebugContextData),
|
||||
DebugInfoDisabled,
|
||||
FunctionWithoutDebugInfo,
|
||||
}
|
||||
|
||||
impl FunctionDebugContext {
|
||||
fn get_ref<'a>(&'a self,
|
||||
span: Span)
|
||||
-> &'a FunctionDebugContextData {
|
||||
fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData {
|
||||
match *self {
|
||||
FunctionDebugContext::RegularContext(box ref data) => data,
|
||||
FunctionDebugContext::RegularContext(ref data) => data,
|
||||
FunctionDebugContext::DebugInfoDisabled => {
|
||||
span_bug!(span,
|
||||
"{}",
|
||||
FunctionDebugContext::debuginfo_disabled_message());
|
||||
span_bug!(span, "{}", FunctionDebugContext::debuginfo_disabled_message());
|
||||
}
|
||||
FunctionDebugContext::FunctionWithoutDebugInfo => {
|
||||
span_bug!(span,
|
||||
"{}",
|
||||
FunctionDebugContext::should_be_ignored_message());
|
||||
span_bug!(span, "{}", FunctionDebugContext::should_be_ignored_message());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -134,7 +127,6 @@ impl FunctionDebugContext {
|
||||
pub struct FunctionDebugContextData {
|
||||
fn_metadata: DISubprogram,
|
||||
source_locations_enabled: Cell<bool>,
|
||||
source_location_override: Cell<bool>,
|
||||
}
|
||||
|
||||
pub enum VariableAccess<'a> {
|
||||
@ -197,18 +189,6 @@ pub fn finalize(cx: &CrateContext) {
|
||||
};
|
||||
}
|
||||
|
||||
/// Creates a function-specific debug context for a function w/o debuginfo.
|
||||
pub fn empty_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>)
|
||||
-> FunctionDebugContext {
|
||||
if cx.sess().opts.debuginfo == NoDebugInfo {
|
||||
return FunctionDebugContext::DebugInfoDisabled;
|
||||
}
|
||||
|
||||
// Clear the debug location so we don't assign them in the function prelude.
|
||||
source_loc::set_debug_location(cx, None, UnknownLocation);
|
||||
FunctionDebugContext::FunctionWithoutDebugInfo
|
||||
}
|
||||
|
||||
/// Creates the function-specific debug context.
|
||||
///
|
||||
/// Returns the FunctionDebugContext for the function which holds state needed
|
||||
@ -225,15 +205,18 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
|
||||
return FunctionDebugContext::DebugInfoDisabled;
|
||||
}
|
||||
|
||||
// Clear the debug location so we don't assign them in the function prelude.
|
||||
// Do this here already, in case we do an early exit from this function.
|
||||
source_loc::set_debug_location(cx, None, UnknownLocation);
|
||||
for attr in cx.tcx().get_attrs(instance.def).iter() {
|
||||
if attr.check_name("no_debug") {
|
||||
return FunctionDebugContext::FunctionWithoutDebugInfo;
|
||||
}
|
||||
}
|
||||
|
||||
let containing_scope = get_containing_scope(cx, instance);
|
||||
let span = mir.span;
|
||||
|
||||
// This can be the case for functions inlined from another crate
|
||||
if span == syntax_pos::DUMMY_SP {
|
||||
// FIXME(simulacrum): Probably can't happen; remove.
|
||||
return FunctionDebugContext::FunctionWithoutDebugInfo;
|
||||
}
|
||||
|
||||
@ -293,10 +276,9 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
|
||||
};
|
||||
|
||||
// Initialize fn debug context (including scope map and namespace map)
|
||||
let fn_debug_context = box FunctionDebugContextData {
|
||||
let fn_debug_context = FunctionDebugContextData {
|
||||
fn_metadata: fn_metadata,
|
||||
source_locations_enabled: Cell::new(false),
|
||||
source_location_override: Cell::new(false),
|
||||
};
|
||||
|
||||
return FunctionDebugContext::RegularContext(fn_debug_context);
|
||||
@ -441,14 +423,15 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
variable_name: ast::Name,
|
||||
variable_type: Ty<'tcx>,
|
||||
scope_metadata: DIScope,
|
||||
variable_access: VariableAccess,
|
||||
variable_kind: VariableKind,
|
||||
span: Span) {
|
||||
let cx: &CrateContext = bcx.ccx();
|
||||
pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
dbg_context: &FunctionDebugContext,
|
||||
variable_name: ast::Name,
|
||||
variable_type: Ty<'tcx>,
|
||||
scope_metadata: DIScope,
|
||||
variable_access: VariableAccess,
|
||||
variable_kind: VariableKind,
|
||||
span: Span) {
|
||||
let cx = bcx.ccx;
|
||||
|
||||
let file = span_start(cx, span).file;
|
||||
let filename = file.name.clone();
|
||||
@ -483,10 +466,10 @@ pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
align as u64,
|
||||
)
|
||||
};
|
||||
source_loc::set_debug_location(cx, None,
|
||||
source_loc::set_debug_location(bcx,
|
||||
InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize()));
|
||||
unsafe {
|
||||
let debug_loc = llvm::LLVMGetCurrentDebugLocation(cx.raw_builder());
|
||||
let debug_loc = llvm::LLVMGetCurrentDebugLocation(bcx.llbuilder);
|
||||
let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
|
||||
DIB(cx),
|
||||
alloca,
|
||||
@ -494,38 +477,18 @@ pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
address_operations.as_ptr(),
|
||||
address_operations.len() as c_uint,
|
||||
debug_loc,
|
||||
bcx.llbb);
|
||||
bcx.llbb());
|
||||
|
||||
llvm::LLVMSetInstDebugLocation(::build::B(bcx).llbuilder, instr);
|
||||
llvm::LLVMSetInstDebugLocation(bcx.llbuilder, instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match variable_kind {
|
||||
ArgumentVariable(_) | CapturedVariable => {
|
||||
assert!(!bcx.fcx
|
||||
.debug_context
|
||||
.get_ref(span)
|
||||
.source_locations_enabled
|
||||
.get());
|
||||
source_loc::set_debug_location(cx, None, UnknownLocation);
|
||||
assert!(!dbg_context.get_ref(span).source_locations_enabled.get());
|
||||
source_loc::set_debug_location(bcx, UnknownLocation);
|
||||
}
|
||||
_ => { /* nothing to do */ }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||
pub enum DebugLoc {
|
||||
ScopeAt(DIScope, Span),
|
||||
None
|
||||
}
|
||||
|
||||
impl DebugLoc {
|
||||
pub fn apply(self, fcx: &FunctionContext) {
|
||||
source_loc::set_source_location(fcx, None, self);
|
||||
}
|
||||
|
||||
pub fn apply_to_bcx(self, bcx: &BlockAndBuilder) {
|
||||
source_loc::set_source_location(bcx.fcx(), Some(bcx), self);
|
||||
}
|
||||
}
|
||||
|
@ -11,57 +11,40 @@
|
||||
use self::InternalDebugLocation::*;
|
||||
|
||||
use super::utils::{debug_context, span_start};
|
||||
use super::metadata::{UNKNOWN_COLUMN_NUMBER};
|
||||
use super::{FunctionDebugContext, DebugLoc};
|
||||
use super::metadata::UNKNOWN_COLUMN_NUMBER;
|
||||
use super::FunctionDebugContext;
|
||||
|
||||
use llvm;
|
||||
use llvm::debuginfo::DIScope;
|
||||
use builder::Builder;
|
||||
use common::{CrateContext, FunctionContext};
|
||||
|
||||
use libc::c_uint;
|
||||
use std::ptr;
|
||||
use syntax_pos::Pos;
|
||||
use syntax_pos::{Span, Pos};
|
||||
|
||||
/// Sets the current debug location at the beginning of the span.
|
||||
///
|
||||
/// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...).
|
||||
pub fn set_source_location(fcx: &FunctionContext,
|
||||
builder: Option<&Builder>,
|
||||
debug_loc: DebugLoc) {
|
||||
let builder = builder.map(|b| b.llbuilder);
|
||||
let function_debug_context = match fcx.debug_context {
|
||||
pub fn set_source_location(
|
||||
debug_context: &FunctionDebugContext, builder: &Builder, scope: DIScope, span: Span
|
||||
) {
|
||||
let function_debug_context = match *debug_context {
|
||||
FunctionDebugContext::DebugInfoDisabled => return,
|
||||
FunctionDebugContext::FunctionWithoutDebugInfo => {
|
||||
set_debug_location(fcx.ccx, builder, UnknownLocation);
|
||||
set_debug_location(builder, UnknownLocation);
|
||||
return;
|
||||
}
|
||||
FunctionDebugContext::RegularContext(box ref data) => data
|
||||
FunctionDebugContext::RegularContext(ref data) => data
|
||||
};
|
||||
|
||||
if function_debug_context.source_location_override.get() {
|
||||
// Just ignore any attempts to set a new debug location while
|
||||
// the override is active.
|
||||
return;
|
||||
}
|
||||
|
||||
let dbg_loc = if function_debug_context.source_locations_enabled.get() {
|
||||
let (scope, span) = match debug_loc {
|
||||
DebugLoc::ScopeAt(scope, span) => (scope, span),
|
||||
DebugLoc::None => {
|
||||
set_debug_location(fcx.ccx, builder, UnknownLocation);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
debug!("set_source_location: {}",
|
||||
fcx.ccx.sess().codemap().span_to_string(span));
|
||||
let loc = span_start(fcx.ccx, span);
|
||||
debug!("set_source_location: {}", builder.ccx.sess().codemap().span_to_string(span));
|
||||
let loc = span_start(builder.ccx, span);
|
||||
InternalDebugLocation::new(scope, loc.line, loc.col.to_usize())
|
||||
} else {
|
||||
UnknownLocation
|
||||
};
|
||||
set_debug_location(fcx.ccx, builder, dbg_loc);
|
||||
set_debug_location(builder, dbg_loc);
|
||||
}
|
||||
|
||||
/// Enables emitting source locations for the given functions.
|
||||
@ -70,9 +53,9 @@ pub fn set_source_location(fcx: &FunctionContext,
|
||||
/// they are disabled when beginning to translate a new function. This functions
|
||||
/// switches source location emitting on and must therefore be called before the
|
||||
/// first real statement/expression of the function is translated.
|
||||
pub fn start_emitting_source_locations(fcx: &FunctionContext) {
|
||||
match fcx.debug_context {
|
||||
FunctionDebugContext::RegularContext(box ref data) => {
|
||||
pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) {
|
||||
match *dbg_context {
|
||||
FunctionDebugContext::RegularContext(ref data) => {
|
||||
data.source_locations_enabled.set(true)
|
||||
},
|
||||
_ => { /* safe to ignore */ }
|
||||
@ -96,15 +79,7 @@ impl InternalDebugLocation {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_debug_location(cx: &CrateContext,
|
||||
builder: Option<llvm::BuilderRef>,
|
||||
debug_location: InternalDebugLocation) {
|
||||
if builder.is_none() {
|
||||
if debug_location == debug_context(cx).current_debug_location.get() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_debug_location(builder: &Builder, debug_location: InternalDebugLocation) {
|
||||
let metadata_node = match debug_location {
|
||||
KnownLocation { scope, line, .. } => {
|
||||
// Always set the column to zero like Clang and GCC
|
||||
@ -113,7 +88,7 @@ pub fn set_debug_location(cx: &CrateContext,
|
||||
|
||||
unsafe {
|
||||
llvm::LLVMRustDIBuilderCreateDebugLocation(
|
||||
debug_context(cx).llcontext,
|
||||
debug_context(builder.ccx).llcontext,
|
||||
line as c_uint,
|
||||
col as c_uint,
|
||||
scope,
|
||||
@ -126,12 +101,7 @@ pub fn set_debug_location(cx: &CrateContext,
|
||||
}
|
||||
};
|
||||
|
||||
if builder.is_none() {
|
||||
debug_context(cx).current_debug_location.set(debug_location);
|
||||
}
|
||||
|
||||
let builder = builder.unwrap_or_else(|| cx.raw_builder());
|
||||
unsafe {
|
||||
llvm::LLVMSetCurrentDebugLocation(builder, metadata_node);
|
||||
llvm::LLVMSetCurrentDebugLocation(builder.llbuilder, metadata_node);
|
||||
}
|
||||
}
|
||||
|
@ -19,13 +19,11 @@ use llvm::{ValueRef, get_param};
|
||||
use middle::lang_items::ExchangeFreeFnLangItem;
|
||||
use rustc::ty::subst::{Substs};
|
||||
use rustc::traits;
|
||||
use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable};
|
||||
use rustc::ty::{self, AdtKind, Ty, TypeFoldable};
|
||||
use adt;
|
||||
use base::*;
|
||||
use build::*;
|
||||
use callee::{Callee};
|
||||
use callee::Callee;
|
||||
use common::*;
|
||||
use debuginfo::DebugLoc;
|
||||
use machine::*;
|
||||
use monomorphize;
|
||||
use trans_item::TransItem;
|
||||
@ -34,69 +32,50 @@ use type_of::{type_of, sizing_type_of, align_of};
|
||||
use type_::Type;
|
||||
use value::Value;
|
||||
use Disr;
|
||||
use cleanup::CleanupScope;
|
||||
|
||||
use arena::TypedArena;
|
||||
use syntax_pos::DUMMY_SP;
|
||||
|
||||
pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
v: ValueRef,
|
||||
size: ValueRef,
|
||||
align: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> Block<'blk, 'tcx> {
|
||||
let _icx = push_ctxt("trans_exchange_free");
|
||||
|
||||
pub fn trans_exchange_free_dyn<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
v: ValueRef,
|
||||
size: ValueRef,
|
||||
align: ValueRef
|
||||
) {
|
||||
let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem);
|
||||
let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align];
|
||||
Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[]))
|
||||
.call(bcx, debug_loc, &args, None).bcx
|
||||
let args = [bcx.pointercast(v, Type::i8p(bcx.ccx)), size, align];
|
||||
let callee = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[]));
|
||||
|
||||
let ccx = bcx.ccx;
|
||||
let fn_ty = callee.direct_fn_type(ccx, &[]);
|
||||
|
||||
let llret = bcx.call(callee.reify(ccx), &args[..], None);
|
||||
fn_ty.apply_attrs_callsite(llret);
|
||||
}
|
||||
|
||||
pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
||||
v: ValueRef,
|
||||
size: u64,
|
||||
align: u32,
|
||||
debug_loc: DebugLoc)
|
||||
-> Block<'blk, 'tcx> {
|
||||
trans_exchange_free_dyn(cx,
|
||||
v,
|
||||
C_uint(cx.ccx(), size),
|
||||
C_uint(cx.ccx(), align),
|
||||
debug_loc)
|
||||
}
|
||||
|
||||
pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
ptr: ValueRef,
|
||||
content_ty: Ty<'tcx>,
|
||||
debug_loc: DebugLoc)
|
||||
-> Block<'blk, 'tcx> {
|
||||
assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
|
||||
let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
|
||||
let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
|
||||
pub fn trans_exchange_free_ty<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>, ptr: ValueRef, content_ty: Ty<'tcx>
|
||||
) {
|
||||
assert!(bcx.ccx.shared().type_is_sized(content_ty));
|
||||
let sizing_type = sizing_type_of(bcx.ccx, content_ty);
|
||||
let content_size = llsize_of_alloc(bcx.ccx, sizing_type);
|
||||
|
||||
// `Box<ZeroSizeType>` does not allocate.
|
||||
if content_size != 0 {
|
||||
let content_align = align_of(bcx.ccx(), content_ty);
|
||||
trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc)
|
||||
} else {
|
||||
bcx
|
||||
let content_align = align_of(bcx.ccx, content_ty);
|
||||
let ccx = bcx.ccx;
|
||||
trans_exchange_free_dyn(bcx, ptr, C_uint(ccx, content_size), C_uint(ccx, content_align));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
ty: Ty<'tcx>) -> bool {
|
||||
tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment())
|
||||
}
|
||||
|
||||
pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
t: Ty<'tcx>) -> Ty<'tcx> {
|
||||
pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
|
||||
assert!(t.is_normalized_for_trans());
|
||||
|
||||
let t = tcx.erase_regions(&t);
|
||||
let t = scx.tcx().erase_regions(&t);
|
||||
|
||||
// Even if there is no dtor for t, there might be one deeper down and we
|
||||
// might need to pass in the vtable ptr.
|
||||
if !type_is_sized(tcx, t) {
|
||||
if !scx.type_is_sized(t) {
|
||||
return t;
|
||||
}
|
||||
|
||||
@ -109,17 +88,16 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
// returned `tcx.types.i8` does not appear unsound. The impact on
|
||||
// code quality is unknown at this time.)
|
||||
|
||||
if !type_needs_drop(tcx, t) {
|
||||
return tcx.types.i8;
|
||||
if !scx.type_needs_drop(t) {
|
||||
return scx.tcx().types.i8;
|
||||
}
|
||||
match t.sty {
|
||||
ty::TyBox(typ) if !type_needs_drop(tcx, typ)
|
||||
&& type_is_sized(tcx, typ) => {
|
||||
tcx.infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| {
|
||||
ty::TyBox(typ) if !scx.type_needs_drop(typ) && scx.type_is_sized(typ) => {
|
||||
scx.tcx().infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| {
|
||||
let layout = t.layout(&infcx).unwrap();
|
||||
if layout.size(&tcx.data_layout).bytes() == 0 {
|
||||
if layout.size(&scx.tcx().data_layout).bytes() == 0 {
|
||||
// `Box<ZeroSizeType>` does not allocate.
|
||||
tcx.types.i8
|
||||
scx.tcx().types.i8
|
||||
} else {
|
||||
t
|
||||
}
|
||||
@ -129,56 +107,37 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
v: ValueRef,
|
||||
t: Ty<'tcx>,
|
||||
debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
|
||||
drop_ty_core(bcx, v, t, debug_loc, false)
|
||||
fn drop_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, t: Ty<'tcx>) {
|
||||
call_drop_glue(bcx, v, t, false, None)
|
||||
}
|
||||
|
||||
pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
v: ValueRef,
|
||||
t: Ty<'tcx>,
|
||||
debug_loc: DebugLoc,
|
||||
skip_dtor: bool)
|
||||
-> Block<'blk, 'tcx> {
|
||||
pub fn call_drop_glue<'a, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
v: ValueRef,
|
||||
t: Ty<'tcx>,
|
||||
skip_dtor: bool,
|
||||
funclet: Option<&'a Funclet>,
|
||||
) {
|
||||
// NB: v is an *alias* of type t here, not a direct value.
|
||||
debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor);
|
||||
let _icx = push_ctxt("drop_ty");
|
||||
if bcx.fcx.type_needs_drop(t) {
|
||||
let ccx = bcx.ccx();
|
||||
debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor);
|
||||
if bcx.ccx.shared().type_needs_drop(t) {
|
||||
let ccx = bcx.ccx;
|
||||
let g = if skip_dtor {
|
||||
DropGlueKind::TyContents(t)
|
||||
} else {
|
||||
DropGlueKind::Ty(t)
|
||||
};
|
||||
let glue = get_drop_glue_core(ccx, g);
|
||||
let glue_type = get_drop_glue_type(ccx.tcx(), t);
|
||||
let glue_type = get_drop_glue_type(ccx.shared(), t);
|
||||
let ptr = if glue_type != t {
|
||||
PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
|
||||
bcx.pointercast(v, type_of(ccx, glue_type).ptr_to())
|
||||
} else {
|
||||
v
|
||||
};
|
||||
|
||||
// No drop-hint ==> call standard drop glue
|
||||
Call(bcx, glue, &[ptr], debug_loc);
|
||||
bcx.call(glue, &[ptr], funclet.map(|b| b.bundle()));
|
||||
}
|
||||
bcx
|
||||
}
|
||||
|
||||
pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
v: ValueRef,
|
||||
t: Ty<'tcx>,
|
||||
debug_loc: DebugLoc,
|
||||
skip_dtor: bool)
|
||||
-> Block<'blk, 'tcx> {
|
||||
let _icx = push_ctxt("drop_ty_immediate");
|
||||
let vp = alloc_ty(bcx, t, "");
|
||||
call_lifetime_start(bcx, vp);
|
||||
store_ty(bcx, v, vp, t);
|
||||
let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor);
|
||||
call_lifetime_end(bcx, vp);
|
||||
bcx
|
||||
}
|
||||
|
||||
pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
|
||||
@ -212,9 +171,8 @@ impl<'tcx> DropGlueKind<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
||||
g: DropGlueKind<'tcx>) -> ValueRef {
|
||||
let g = g.map_ty(|t| get_drop_glue_type(ccx.tcx(), t));
|
||||
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) -> ValueRef {
|
||||
let g = g.map_ty(|t| get_drop_glue_type(ccx.shared(), t));
|
||||
match ccx.drop_glues().borrow().get(&g) {
|
||||
Some(&(glue, _)) => glue,
|
||||
None => {
|
||||
@ -226,17 +184,12 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
||||
g: DropGlueKind<'tcx>) {
|
||||
let tcx = ccx.tcx();
|
||||
assert_eq!(g.ty(), get_drop_glue_type(tcx, g.ty()));
|
||||
let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone();
|
||||
pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) {
|
||||
assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty()));
|
||||
let (llfn, _) = ccx.drop_glues().borrow().get(&g).unwrap().clone();
|
||||
|
||||
let (arena, fcx): (TypedArena<_>, FunctionContext);
|
||||
arena = TypedArena::new();
|
||||
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &arena);
|
||||
|
||||
let bcx = fcx.init(false);
|
||||
let fcx = FunctionContext::new(ccx, llfn);
|
||||
let bcx = fcx.get_entry_block();
|
||||
|
||||
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
|
||||
// All glue functions take values passed *by alias*; this is a
|
||||
@ -247,19 +200,91 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
||||
// llfn is expected be declared to take a parameter of the appropriate
|
||||
// type, so we don't need to explicitly cast the function parameter.
|
||||
|
||||
let bcx = make_drop_glue(bcx, get_param(llfn, 0), g);
|
||||
fcx.finish(bcx, DebugLoc::None);
|
||||
// NB: v0 is an *alias* of type t here, not a direct value.
|
||||
// Only drop the value when it ... well, we used to check for
|
||||
// non-null, (and maybe we need to continue doing so), but we now
|
||||
// must definitely check for special bit-patterns corresponding to
|
||||
// the special dtor markings.
|
||||
let v0 = get_param(llfn, 0);
|
||||
let t = g.ty();
|
||||
|
||||
let skip_dtor = match g {
|
||||
DropGlueKind::Ty(_) => false,
|
||||
DropGlueKind::TyContents(_) => true
|
||||
};
|
||||
|
||||
let bcx = match t.sty {
|
||||
ty::TyBox(content_ty) => {
|
||||
// Support for TyBox is built-in and its drop glue is
|
||||
// special. It may move to library and have Drop impl. As
|
||||
// a safe-guard, assert TyBox not used with TyContents.
|
||||
assert!(!skip_dtor);
|
||||
if !bcx.ccx.shared().type_is_sized(content_ty) {
|
||||
let llval = get_dataptr(&bcx, v0);
|
||||
let llbox = bcx.load(llval);
|
||||
drop_ty(&bcx, v0, content_ty);
|
||||
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
|
||||
let info = get_meta(&bcx, v0);
|
||||
let info = bcx.load(info);
|
||||
let (llsize, llalign) = size_and_align_of_dst(&bcx, content_ty, info);
|
||||
|
||||
// `Box<ZeroSizeType>` does not allocate.
|
||||
let needs_free = bcx.icmp(llvm::IntNE, llsize, C_uint(bcx.ccx, 0u64));
|
||||
if const_to_opt_uint(needs_free) == Some(0) {
|
||||
bcx
|
||||
} else {
|
||||
let next_cx = bcx.fcx().build_new_block("next");
|
||||
let cond_cx = bcx.fcx().build_new_block("cond");
|
||||
bcx.cond_br(needs_free, cond_cx.llbb(), next_cx.llbb());
|
||||
trans_exchange_free_dyn(&cond_cx, llbox, llsize, llalign);
|
||||
cond_cx.br(next_cx.llbb());
|
||||
next_cx
|
||||
}
|
||||
} else {
|
||||
let llval = v0;
|
||||
let llbox = bcx.load(llval);
|
||||
drop_ty(&bcx, llbox, content_ty);
|
||||
trans_exchange_free_ty(&bcx, llbox, content_ty);
|
||||
bcx
|
||||
}
|
||||
}
|
||||
ty::TyDynamic(..) => {
|
||||
// No support in vtable for distinguishing destroying with
|
||||
// versus without calling Drop::drop. Assert caller is
|
||||
// okay with always calling the Drop impl, if any.
|
||||
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
|
||||
assert!(!skip_dtor);
|
||||
let data_ptr = get_dataptr(&bcx, v0);
|
||||
let vtable_ptr = bcx.load(get_meta(&bcx, v0));
|
||||
let dtor = bcx.load(vtable_ptr);
|
||||
bcx.call(dtor, &[bcx.pointercast(bcx.load(data_ptr), Type::i8p(bcx.ccx))], None);
|
||||
bcx
|
||||
}
|
||||
ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => {
|
||||
trans_custom_dtor(bcx, t, v0, def.is_union())
|
||||
}
|
||||
ty::TyAdt(def, ..) if def.is_union() => {
|
||||
bcx
|
||||
}
|
||||
_ => {
|
||||
if bcx.ccx.shared().type_needs_drop(t) {
|
||||
drop_structural_ty(bcx, v0, t)
|
||||
} else {
|
||||
bcx
|
||||
}
|
||||
}
|
||||
};
|
||||
bcx.ret_void();
|
||||
}
|
||||
|
||||
fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
t: Ty<'tcx>,
|
||||
v0: ValueRef,
|
||||
shallow_drop: bool)
|
||||
-> Block<'blk, 'tcx>
|
||||
fn trans_custom_dtor<'a, 'tcx>(mut bcx: BlockAndBuilder<'a, 'tcx>,
|
||||
t: Ty<'tcx>,
|
||||
v0: ValueRef,
|
||||
shallow_drop: bool)
|
||||
-> BlockAndBuilder<'a, 'tcx>
|
||||
{
|
||||
debug!("trans_custom_dtor t: {}", t);
|
||||
let tcx = bcx.tcx();
|
||||
let mut bcx = bcx;
|
||||
|
||||
let def = t.ty_adt_def().unwrap();
|
||||
|
||||
@ -269,23 +294,23 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
//
|
||||
// FIXME (#14875) panic-in-drop semantics might be unsupported; we
|
||||
// might well consider changing below to more direct code.
|
||||
let contents_scope = bcx.fcx.push_custom_cleanup_scope();
|
||||
|
||||
// Issue #23611: schedule cleanup of contents, re-inspecting the
|
||||
// discriminant (if any) in case of variant swap in drop code.
|
||||
if !shallow_drop {
|
||||
bcx.fcx.schedule_drop_adt_contents(contents_scope, v0, t);
|
||||
}
|
||||
let contents_scope = if !shallow_drop {
|
||||
bcx.fcx().schedule_drop_adt_contents(v0, t)
|
||||
} else {
|
||||
CleanupScope::noop()
|
||||
};
|
||||
|
||||
let (sized_args, unsized_args);
|
||||
let args: &[ValueRef] = if type_is_sized(tcx, t) {
|
||||
let args: &[ValueRef] = if bcx.ccx.shared().type_is_sized(t) {
|
||||
sized_args = [v0];
|
||||
&sized_args
|
||||
} else {
|
||||
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
|
||||
unsized_args = [
|
||||
Load(bcx, get_dataptr(bcx, v0)),
|
||||
Load(bcx, get_meta(bcx, v0))
|
||||
bcx.load(get_dataptr(&bcx, v0)),
|
||||
bcx.load(get_meta(&bcx, v0))
|
||||
];
|
||||
&unsized_args
|
||||
};
|
||||
@ -294,39 +319,44 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
def_id: tcx.lang_items.drop_trait().unwrap(),
|
||||
substs: tcx.mk_substs_trait(t, &[])
|
||||
});
|
||||
let vtbl = match fulfill_obligation(bcx.ccx().shared(), DUMMY_SP, trait_ref) {
|
||||
let vtbl = match fulfill_obligation(bcx.ccx.shared(), DUMMY_SP, trait_ref) {
|
||||
traits::VtableImpl(data) => data,
|
||||
_ => bug!("dtor for {:?} is not an impl???", t)
|
||||
};
|
||||
let dtor_did = def.destructor().unwrap();
|
||||
bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs)
|
||||
.call(bcx, DebugLoc::None, args, None).bcx;
|
||||
|
||||
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
|
||||
let callee = Callee::def(bcx.ccx, dtor_did, vtbl.substs);
|
||||
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
|
||||
let llret;
|
||||
if let Some(landing_pad) = contents_scope.landing_pad {
|
||||
let normal_bcx = bcx.fcx().build_new_block("normal-return");
|
||||
llret = bcx.invoke(callee.reify(bcx.ccx), args, normal_bcx.llbb(), landing_pad, None);
|
||||
bcx = normal_bcx;
|
||||
} else {
|
||||
llret = bcx.call(callee.reify(bcx.ccx), args, None);
|
||||
}
|
||||
fn_ty.apply_attrs_callsite(llret);
|
||||
contents_scope.trans(&bcx);
|
||||
bcx
|
||||
}
|
||||
|
||||
pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
t: Ty<'tcx>, info: ValueRef)
|
||||
-> (ValueRef, ValueRef) {
|
||||
pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
t: Ty<'tcx>, info: ValueRef)
|
||||
-> (ValueRef, ValueRef) {
|
||||
debug!("calculate size of DST: {}; with lost info: {:?}",
|
||||
t, Value(info));
|
||||
if type_is_sized(bcx.tcx(), t) {
|
||||
let sizing_type = sizing_type_of(bcx.ccx(), t);
|
||||
let size = llsize_of_alloc(bcx.ccx(), sizing_type);
|
||||
let align = align_of(bcx.ccx(), t);
|
||||
if bcx.ccx.shared().type_is_sized(t) {
|
||||
let sizing_type = sizing_type_of(bcx.ccx, t);
|
||||
let size = llsize_of_alloc(bcx.ccx, sizing_type);
|
||||
let align = align_of(bcx.ccx, t);
|
||||
debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}",
|
||||
t, Value(info), size, align);
|
||||
let size = C_uint(bcx.ccx(), size);
|
||||
let align = C_uint(bcx.ccx(), align);
|
||||
let size = C_uint(bcx.ccx, size);
|
||||
let align = C_uint(bcx.ccx, align);
|
||||
return (size, align);
|
||||
}
|
||||
if bcx.is_unreachable() {
|
||||
let llty = Type::int(bcx.ccx());
|
||||
return (C_undef(llty), C_undef(llty));
|
||||
}
|
||||
match t.sty {
|
||||
ty::TyAdt(def, substs) => {
|
||||
let ccx = bcx.ccx();
|
||||
let ccx = bcx.ccx;
|
||||
// First get the size of all statically known fields.
|
||||
// Don't use type_of::sizing_type_of because that expects t to be sized,
|
||||
// and it also rounds up to alignment, which we want to avoid,
|
||||
@ -389,7 +419,7 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
//
|
||||
// `(size + (align-1)) & -align`
|
||||
|
||||
let addend = bcx.sub(align, C_uint(bcx.ccx(), 1_u64));
|
||||
let addend = bcx.sub(align, C_uint(bcx.ccx, 1_u64));
|
||||
let size = bcx.and(bcx.add(size, addend), bcx.neg(align));
|
||||
|
||||
(size, align)
|
||||
@ -397,7 +427,7 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
ty::TyDynamic(..) => {
|
||||
// info points to the vtable and the second entry in the vtable is the
|
||||
// dynamic size of the object.
|
||||
let info = bcx.pointercast(info, Type::int(bcx.ccx()).ptr_to());
|
||||
let info = bcx.pointercast(info, Type::int(bcx.ccx).ptr_to());
|
||||
let size_ptr = bcx.gepi(info, &[1]);
|
||||
let align_ptr = bcx.gepi(info, &[2]);
|
||||
(bcx.load(size_ptr), bcx.load(align_ptr))
|
||||
@ -406,126 +436,40 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
let unit_ty = t.sequence_element_type(bcx.tcx());
|
||||
// The info in this case is the length of the str, so the size is that
|
||||
// times the unit size.
|
||||
let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
|
||||
let unit_align = llalign_of_min(bcx.ccx(), llunit_ty);
|
||||
let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
|
||||
(bcx.mul(info, C_uint(bcx.ccx(), unit_size)),
|
||||
C_uint(bcx.ccx(), unit_align))
|
||||
let llunit_ty = sizing_type_of(bcx.ccx, unit_ty);
|
||||
let unit_align = llalign_of_min(bcx.ccx, llunit_ty);
|
||||
let unit_size = llsize_of_alloc(bcx.ccx, llunit_ty);
|
||||
(bcx.mul(info, C_uint(bcx.ccx, unit_size)),
|
||||
C_uint(bcx.ccx, unit_align))
|
||||
}
|
||||
_ => bug!("Unexpected unsized type, found {}", t)
|
||||
}
|
||||
}
|
||||
|
||||
fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
v0: ValueRef,
|
||||
g: DropGlueKind<'tcx>)
|
||||
-> Block<'blk, 'tcx> {
|
||||
let t = g.ty();
|
||||
|
||||
let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
|
||||
// NB: v0 is an *alias* of type t here, not a direct value.
|
||||
let _icx = push_ctxt("make_drop_glue");
|
||||
|
||||
// Only drop the value when it ... well, we used to check for
|
||||
// non-null, (and maybe we need to continue doing so), but we now
|
||||
// must definitely check for special bit-patterns corresponding to
|
||||
// the special dtor markings.
|
||||
|
||||
match t.sty {
|
||||
ty::TyBox(content_ty) => {
|
||||
// Support for TyBox is built-in and its drop glue is
|
||||
// special. It may move to library and have Drop impl. As
|
||||
// a safe-guard, assert TyBox not used with TyContents.
|
||||
assert!(!skip_dtor);
|
||||
if !type_is_sized(bcx.tcx(), content_ty) {
|
||||
let llval = get_dataptr(bcx, v0);
|
||||
let llbox = Load(bcx, llval);
|
||||
let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
|
||||
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
|
||||
let info = get_meta(bcx, v0);
|
||||
let info = Load(bcx, info);
|
||||
let (llsize, llalign) =
|
||||
size_and_align_of_dst(&bcx.build(), content_ty, info);
|
||||
|
||||
// `Box<ZeroSizeType>` does not allocate.
|
||||
let needs_free = ICmp(bcx,
|
||||
llvm::IntNE,
|
||||
llsize,
|
||||
C_uint(bcx.ccx(), 0u64),
|
||||
DebugLoc::None);
|
||||
with_cond(bcx, needs_free, |bcx| {
|
||||
trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
|
||||
})
|
||||
} else {
|
||||
let llval = v0;
|
||||
let llbox = Load(bcx, llval);
|
||||
let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
|
||||
trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
|
||||
}
|
||||
}
|
||||
ty::TyDynamic(..) => {
|
||||
// No support in vtable for distinguishing destroying with
|
||||
// versus without calling Drop::drop. Assert caller is
|
||||
// okay with always calling the Drop impl, if any.
|
||||
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
|
||||
assert!(!skip_dtor);
|
||||
let data_ptr = get_dataptr(bcx, v0);
|
||||
let vtable_ptr = Load(bcx, get_meta(bcx, v0));
|
||||
let dtor = Load(bcx, vtable_ptr);
|
||||
Call(bcx,
|
||||
dtor,
|
||||
&[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))],
|
||||
DebugLoc::None);
|
||||
bcx
|
||||
}
|
||||
ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => {
|
||||
trans_custom_dtor(bcx, t, v0, def.is_union())
|
||||
}
|
||||
ty::TyAdt(def, ..) if def.is_union() => {
|
||||
bcx
|
||||
}
|
||||
_ => {
|
||||
if bcx.fcx.type_needs_drop(t) {
|
||||
drop_structural_ty(bcx, v0, t)
|
||||
} else {
|
||||
bcx
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Iterates through the elements of a structural type, dropping them.
|
||||
fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
||||
av: ValueRef,
|
||||
t: Ty<'tcx>)
|
||||
-> Block<'blk, 'tcx> {
|
||||
let _icx = push_ctxt("drop_structural_ty");
|
||||
|
||||
fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
||||
t: Ty<'tcx>,
|
||||
av: adt::MaybeSizedValue,
|
||||
variant: &'tcx ty::VariantDef,
|
||||
substs: &Substs<'tcx>)
|
||||
-> Block<'blk, 'tcx> {
|
||||
let _icx = push_ctxt("iter_variant");
|
||||
fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>,
|
||||
av: ValueRef,
|
||||
t: Ty<'tcx>)
|
||||
-> BlockAndBuilder<'a, 'tcx> {
|
||||
fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>,
|
||||
t: Ty<'tcx>,
|
||||
av: adt::MaybeSizedValue,
|
||||
variant: &'tcx ty::VariantDef,
|
||||
substs: &Substs<'tcx>) {
|
||||
let tcx = cx.tcx();
|
||||
let mut cx = cx;
|
||||
|
||||
for (i, field) in variant.fields.iter().enumerate() {
|
||||
let arg = monomorphize::field_ty(tcx, substs, field);
|
||||
cx = drop_ty(cx,
|
||||
adt::trans_field_ptr(cx, t, av, Disr::from(variant.disr_val), i),
|
||||
arg, DebugLoc::None);
|
||||
let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i);
|
||||
drop_ty(&cx, field_ptr, arg);
|
||||
}
|
||||
return cx;
|
||||
}
|
||||
|
||||
let value = if type_is_sized(cx.tcx(), t) {
|
||||
let value = if cx.ccx.shared().type_is_sized(t) {
|
||||
adt::MaybeSizedValue::sized(av)
|
||||
} else {
|
||||
// FIXME(#36457) -- we should pass unsized values as two arguments
|
||||
let data = Load(cx, get_dataptr(cx, av));
|
||||
let info = Load(cx, get_meta(cx, av));
|
||||
let data = cx.load(get_dataptr(&cx, av));
|
||||
let info = cx.load(get_meta(&cx, av));
|
||||
adt::MaybeSizedValue::unsized_(data, info)
|
||||
};
|
||||
|
||||
@ -533,67 +477,65 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
||||
match t.sty {
|
||||
ty::TyClosure(def_id, substs) => {
|
||||
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
|
||||
let llupvar = adt::trans_field_ptr(cx, t, value, Disr(0), i);
|
||||
cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None);
|
||||
let llupvar = adt::trans_field_ptr(&cx, t, value, Disr(0), i);
|
||||
drop_ty(&cx, llupvar, upvar_ty);
|
||||
}
|
||||
}
|
||||
ty::TyArray(_, n) => {
|
||||
let base = get_dataptr(cx, value.value);
|
||||
let len = C_uint(cx.ccx(), n);
|
||||
let base = get_dataptr(&cx, value.value);
|
||||
let len = C_uint(cx.ccx, n);
|
||||
let unit_ty = t.sequence_element_type(cx.tcx());
|
||||
cx = tvec::slice_for_each(cx, base, unit_ty, len,
|
||||
|bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None));
|
||||
cx = tvec::slice_for_each(&cx, base, unit_ty, len, |bb, vv| drop_ty(bb, vv, unit_ty));
|
||||
}
|
||||
ty::TySlice(_) | ty::TyStr => {
|
||||
let unit_ty = t.sequence_element_type(cx.tcx());
|
||||
cx = tvec::slice_for_each(cx, value.value, unit_ty, value.meta,
|
||||
|bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None));
|
||||
cx = tvec::slice_for_each(&cx, value.value, unit_ty, value.meta,
|
||||
|bb, vv| drop_ty(bb, vv, unit_ty));
|
||||
}
|
||||
ty::TyTuple(ref args) => {
|
||||
for (i, arg) in args.iter().enumerate() {
|
||||
let llfld_a = adt::trans_field_ptr(cx, t, value, Disr(0), i);
|
||||
cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None);
|
||||
let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr(0), i);
|
||||
drop_ty(&cx, llfld_a, *arg);
|
||||
}
|
||||
}
|
||||
ty::TyAdt(adt, substs) => match adt.adt_kind() {
|
||||
AdtKind::Struct => {
|
||||
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
|
||||
for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
|
||||
let llfld_a = adt::trans_field_ptr(cx, t, value, Disr::from(discr), i);
|
||||
let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr::from(discr), i);
|
||||
|
||||
let val = if type_is_sized(cx.tcx(), field_ty) {
|
||||
let val = if cx.ccx.shared().type_is_sized(field_ty) {
|
||||
llfld_a
|
||||
} else {
|
||||
// FIXME(#36457) -- we should pass unsized values as two arguments
|
||||
let scratch = alloc_ty(cx, field_ty, "__fat_ptr_iter");
|
||||
Store(cx, llfld_a, get_dataptr(cx, scratch));
|
||||
Store(cx, value.meta, get_meta(cx, scratch));
|
||||
let scratch = alloc_ty(&cx, field_ty, "__fat_ptr_iter");
|
||||
cx.store(llfld_a, get_dataptr(&cx, scratch));
|
||||
cx.store(value.meta, get_meta(&cx, scratch));
|
||||
scratch
|
||||
};
|
||||
cx = drop_ty(cx, val, field_ty, DebugLoc::None);
|
||||
drop_ty(&cx, val, field_ty);
|
||||
}
|
||||
}
|
||||
AdtKind::Union => {
|
||||
bug!("Union in `glue::drop_structural_ty`");
|
||||
}
|
||||
AdtKind::Enum => {
|
||||
let fcx = cx.fcx;
|
||||
let ccx = fcx.ccx;
|
||||
let n_variants = adt.variants.len();
|
||||
|
||||
// NB: we must hit the discriminant first so that structural
|
||||
// comparison know not to proceed when the discriminants differ.
|
||||
|
||||
match adt::trans_switch(cx, t, av, false) {
|
||||
match adt::trans_switch(&cx, t, av, false) {
|
||||
(adt::BranchKind::Single, None) => {
|
||||
if n_variants != 0 {
|
||||
assert!(n_variants == 1);
|
||||
cx = iter_variant(cx, t, adt::MaybeSizedValue::sized(av),
|
||||
iter_variant(&cx, t, adt::MaybeSizedValue::sized(av),
|
||||
&adt.variants[0], substs);
|
||||
}
|
||||
}
|
||||
(adt::BranchKind::Switch, Some(lldiscrim_a)) => {
|
||||
cx = drop_ty(cx, lldiscrim_a, cx.tcx().types.isize, DebugLoc::None);
|
||||
let tcx = cx.tcx();
|
||||
drop_ty(&cx, lldiscrim_a, tcx.types.isize);
|
||||
|
||||
// Create a fall-through basic block for the "else" case of
|
||||
// the switch instruction we're about to generate. Note that
|
||||
@ -608,27 +550,23 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
||||
// from the outer function, and any other use case will only
|
||||
// call this for an already-valid enum in which case the `ret
|
||||
// void` will never be hit.
|
||||
let ret_void_cx = fcx.new_block("enum-iter-ret-void");
|
||||
RetVoid(ret_void_cx, DebugLoc::None);
|
||||
let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants);
|
||||
let next_cx = fcx.new_block("enum-iter-next");
|
||||
let ret_void_cx = cx.fcx().build_new_block("enum-iter-ret-void");
|
||||
ret_void_cx.ret_void();
|
||||
let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants);
|
||||
let next_cx = cx.fcx().build_new_block("enum-iter-next");
|
||||
|
||||
for variant in &adt.variants {
|
||||
let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}",
|
||||
&variant.disr_val
|
||||
.to_string()));
|
||||
let case_val = adt::trans_case(cx, t, Disr::from(variant.disr_val));
|
||||
AddCase(llswitch, case_val, variant_cx.llbb);
|
||||
let variant_cx = iter_variant(variant_cx,
|
||||
t,
|
||||
value,
|
||||
variant,
|
||||
substs);
|
||||
Br(variant_cx, next_cx.llbb, DebugLoc::None);
|
||||
let variant_cx_name = format!("enum-iter-variant-{}",
|
||||
&variant.disr_val.to_string());
|
||||
let variant_cx = cx.fcx().build_new_block(&variant_cx_name);
|
||||
let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val));
|
||||
variant_cx.add_case(llswitch, case_val, variant_cx.llbb());
|
||||
iter_variant(&variant_cx, t, value, variant, substs);
|
||||
variant_cx.br(next_cx.llbb());
|
||||
}
|
||||
cx = next_cx;
|
||||
}
|
||||
_ => ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"),
|
||||
_ => cx.ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"),
|
||||
}
|
||||
}
|
||||
},
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -36,6 +36,7 @@
|
||||
#![feature(slice_patterns)]
|
||||
#![feature(staged_api)]
|
||||
#![feature(unicode)]
|
||||
#![feature(conservative_impl_trait)]
|
||||
|
||||
use rustc::dep_graph::WorkProduct;
|
||||
|
||||
@ -95,8 +96,6 @@ mod asm;
|
||||
mod assert_module_sources;
|
||||
mod attributes;
|
||||
mod base;
|
||||
mod basic_block;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod cabi_aarch64;
|
||||
mod cabi_arm;
|
||||
|
@ -9,16 +9,11 @@
|
||||
// except according to those terms.
|
||||
|
||||
use attributes;
|
||||
use arena::TypedArena;
|
||||
use llvm::{ValueRef, get_params};
|
||||
use rustc::traits;
|
||||
use abi::FnType;
|
||||
use base::*;
|
||||
use build::*;
|
||||
use callee::Callee;
|
||||
use callee::{Callee, CalleeData};
|
||||
use common::*;
|
||||
use consts;
|
||||
use debuginfo::DebugLoc;
|
||||
use declare;
|
||||
use glue;
|
||||
use machine;
|
||||
@ -32,15 +27,15 @@ use rustc::ty;
|
||||
const VTABLE_OFFSET: usize = 3;
|
||||
|
||||
/// Extracts a method from a trait object's vtable, at the specified index.
|
||||
pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
llvtable: ValueRef,
|
||||
vtable_index: usize)
|
||||
-> ValueRef {
|
||||
pub fn get_virtual_method<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
llvtable: ValueRef,
|
||||
vtable_index: usize)
|
||||
-> ValueRef {
|
||||
// Load the data pointer from the object.
|
||||
debug!("get_virtual_method(vtable_index={}, llvtable={:?})",
|
||||
vtable_index, Value(llvtable));
|
||||
|
||||
Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET]))
|
||||
bcx.load(bcx.gepi(llvtable, &[vtable_index + VTABLE_OFFSET]))
|
||||
}
|
||||
|
||||
/// Generate a shim function that allows an object type like `SomeTrait` to
|
||||
@ -67,36 +62,47 @@ pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
||||
pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
|
||||
callee: Callee<'tcx>)
|
||||
-> ValueRef {
|
||||
let _icx = push_ctxt("trans_object_shim");
|
||||
let tcx = ccx.tcx();
|
||||
|
||||
debug!("trans_object_shim({:?})", callee);
|
||||
|
||||
let (sig, abi, function_name) = match callee.ty.sty {
|
||||
ty::TyFnDef(def_id, substs, f) => {
|
||||
let function_name = match callee.ty.sty {
|
||||
ty::TyFnDef(def_id, substs, _) => {
|
||||
let instance = Instance::new(def_id, substs);
|
||||
(&f.sig, f.abi, instance.symbol_name(ccx.shared()))
|
||||
instance.symbol_name(ccx.shared())
|
||||
}
|
||||
_ => bug!()
|
||||
};
|
||||
|
||||
let sig = tcx.erase_late_bound_regions_and_normalize(sig);
|
||||
let fn_ty = FnType::new(ccx, abi, &sig, &[]);
|
||||
|
||||
let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty);
|
||||
attributes::set_frame_pointer_elimination(ccx, llfn);
|
||||
|
||||
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
|
||||
block_arena = TypedArena::new();
|
||||
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
|
||||
let mut bcx = fcx.init(false);
|
||||
let fcx = FunctionContext::new(ccx, llfn);
|
||||
let bcx = fcx.get_entry_block();
|
||||
|
||||
let dest = fcx.llretslotptr.get();
|
||||
let llargs = get_params(fcx.llfn);
|
||||
bcx = callee.call(bcx, DebugLoc::None,
|
||||
&llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).bcx;
|
||||
let mut llargs = get_params(fcx.llfn);
|
||||
let fn_ret = callee.ty.fn_ret();
|
||||
let fn_ty = callee.direct_fn_type(ccx, &[]);
|
||||
|
||||
fcx.finish(bcx, DebugLoc::None);
|
||||
let fn_ptr = match callee.data {
|
||||
CalleeData::Virtual(idx) => {
|
||||
let fn_ptr = get_virtual_method(&bcx,
|
||||
llargs.remove(fn_ty.ret.is_indirect() as usize + 1), idx);
|
||||
let llty = fn_ty.llvm_type(bcx.ccx).ptr_to();
|
||||
bcx.pointercast(fn_ptr, llty)
|
||||
},
|
||||
_ => bug!("trans_object_shim called with non-virtual callee"),
|
||||
};
|
||||
let llret = bcx.call(fn_ptr, &llargs, None);
|
||||
fn_ty.apply_attrs_callsite(llret);
|
||||
|
||||
if fn_ret.0.is_never() {
|
||||
bcx.unreachable();
|
||||
} else {
|
||||
if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() {
|
||||
bcx.ret_void();
|
||||
} else {
|
||||
bcx.ret(llret);
|
||||
}
|
||||
}
|
||||
|
||||
llfn
|
||||
}
|
||||
@ -115,7 +121,6 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
||||
-> ValueRef
|
||||
{
|
||||
let tcx = ccx.tcx();
|
||||
let _icx = push_ctxt("meth::get_vtable");
|
||||
|
||||
debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref);
|
||||
|
||||
|
@ -16,31 +16,30 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec};
|
||||
use rustc::mir::{self, Location, TerminatorKind};
|
||||
use rustc::mir::visit::{Visitor, LvalueContext};
|
||||
use rustc::mir::traversal;
|
||||
use common::{self, Block, BlockAndBuilder};
|
||||
use glue;
|
||||
use common;
|
||||
use super::MirContext;
|
||||
use super::rvalue;
|
||||
|
||||
pub fn lvalue_locals<'bcx, 'tcx>(bcx: Block<'bcx,'tcx>,
|
||||
mir: &mir::Mir<'tcx>) -> BitVector {
|
||||
let bcx = bcx.build();
|
||||
let mut analyzer = LocalAnalyzer::new(mir, &bcx);
|
||||
pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
|
||||
let mir = mircx.mir;
|
||||
let mut analyzer = LocalAnalyzer::new(mircx);
|
||||
|
||||
analyzer.visit_mir(mir);
|
||||
|
||||
for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() {
|
||||
let ty = bcx.monomorphize(&ty);
|
||||
let ty = mircx.monomorphize(&ty);
|
||||
debug!("local {} has type {:?}", index, ty);
|
||||
if ty.is_scalar() ||
|
||||
ty.is_unique() ||
|
||||
ty.is_region_ptr() ||
|
||||
ty.is_simd() ||
|
||||
common::type_is_zero_size(bcx.ccx(), ty)
|
||||
common::type_is_zero_size(mircx.ccx, ty)
|
||||
{
|
||||
// These sorts of types are immediates that we can store
|
||||
// in an ValueRef without an alloca.
|
||||
assert!(common::type_is_immediate(bcx.ccx(), ty) ||
|
||||
common::type_is_fat_ptr(bcx.tcx(), ty));
|
||||
} else if common::type_is_imm_pair(bcx.ccx(), ty) {
|
||||
assert!(common::type_is_immediate(mircx.ccx, ty) ||
|
||||
common::type_is_fat_ptr(mircx.ccx, ty));
|
||||
} else if common::type_is_imm_pair(mircx.ccx, ty) {
|
||||
// We allow pairs and uses of any of their 2 fields.
|
||||
} else {
|
||||
// These sorts of types require an alloca. Note that
|
||||
@ -56,22 +55,18 @@ pub fn lvalue_locals<'bcx, 'tcx>(bcx: Block<'bcx,'tcx>,
|
||||
analyzer.lvalue_locals
|
||||
}
|
||||
|
||||
struct LocalAnalyzer<'mir, 'bcx: 'mir, 'tcx: 'bcx> {
|
||||
mir: &'mir mir::Mir<'tcx>,
|
||||
bcx: &'mir BlockAndBuilder<'bcx, 'tcx>,
|
||||
struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> {
|
||||
cx: &'mir MirContext<'a, 'tcx>,
|
||||
lvalue_locals: BitVector,
|
||||
seen_assigned: BitVector
|
||||
}
|
||||
|
||||
impl<'mir, 'bcx, 'tcx> LocalAnalyzer<'mir, 'bcx, 'tcx> {
|
||||
fn new(mir: &'mir mir::Mir<'tcx>,
|
||||
bcx: &'mir BlockAndBuilder<'bcx, 'tcx>)
|
||||
-> LocalAnalyzer<'mir, 'bcx, 'tcx> {
|
||||
impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> {
|
||||
fn new(mircx: &'mir MirContext<'a, 'tcx>) -> LocalAnalyzer<'mir, 'a, 'tcx> {
|
||||
LocalAnalyzer {
|
||||
mir: mir,
|
||||
bcx: bcx,
|
||||
lvalue_locals: BitVector::new(mir.local_decls.len()),
|
||||
seen_assigned: BitVector::new(mir.local_decls.len())
|
||||
cx: mircx,
|
||||
lvalue_locals: BitVector::new(mircx.mir.local_decls.len()),
|
||||
seen_assigned: BitVector::new(mircx.mir.local_decls.len())
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,7 +82,7 @@ impl<'mir, 'bcx, 'tcx> LocalAnalyzer<'mir, 'bcx, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> {
|
||||
impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
|
||||
fn visit_assign(&mut self,
|
||||
block: mir::BasicBlock,
|
||||
lvalue: &mir::Lvalue<'tcx>,
|
||||
@ -97,7 +92,7 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> {
|
||||
|
||||
if let mir::Lvalue::Local(index) = *lvalue {
|
||||
self.mark_assigned(index);
|
||||
if !rvalue::rvalue_creates_operand(self.mir, self.bcx, rvalue) {
|
||||
if !rvalue::rvalue_creates_operand(rvalue) {
|
||||
self.mark_as_lvalue(index);
|
||||
}
|
||||
} else {
|
||||
@ -117,7 +112,7 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> {
|
||||
literal: mir::Literal::Item { def_id, .. }, ..
|
||||
}),
|
||||
ref args, ..
|
||||
} if Some(def_id) == self.bcx.tcx().lang_items.box_free_fn() => {
|
||||
} if Some(def_id) == self.cx.ccx.tcx().lang_items.box_free_fn() => {
|
||||
// box_free(x) shares with `drop x` the property that it
|
||||
// is not guaranteed to be statically dominated by the
|
||||
// definition of x, so x must always be in an alloca.
|
||||
@ -140,10 +135,10 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> {
|
||||
// Allow uses of projections of immediate pair fields.
|
||||
if let mir::Lvalue::Projection(ref proj) = *lvalue {
|
||||
if let mir::Lvalue::Local(_) = proj.base {
|
||||
let ty = proj.base.ty(self.mir, self.bcx.tcx());
|
||||
let ty = proj.base.ty(self.cx.mir, self.cx.ccx.tcx());
|
||||
|
||||
let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx()));
|
||||
if common::type_is_imm_pair(self.bcx.ccx(), ty) {
|
||||
let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx()));
|
||||
if common::type_is_imm_pair(self.cx.ccx, ty) {
|
||||
if let mir::ProjectionElem::Field(..) = proj.elem {
|
||||
if let LvalueContext::Consume = context {
|
||||
return;
|
||||
@ -171,11 +166,11 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
LvalueContext::Drop => {
|
||||
let ty = lvalue.ty(self.mir, self.bcx.tcx());
|
||||
let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx()));
|
||||
let ty = lvalue.ty(self.cx.mir, self.cx.ccx.tcx());
|
||||
let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx()));
|
||||
|
||||
// Only need the lvalue if we're actually dropping it.
|
||||
if glue::type_needs_drop(self.bcx.tcx(), ty) {
|
||||
if self.cx.ccx.shared().type_needs_drop(ty) {
|
||||
self.mark_as_lvalue(index);
|
||||
}
|
||||
}
|
||||
@ -200,10 +195,7 @@ pub enum CleanupKind {
|
||||
Internal { funclet: mir::BasicBlock }
|
||||
}
|
||||
|
||||
pub fn cleanup_kinds<'bcx,'tcx>(_bcx: Block<'bcx,'tcx>,
|
||||
mir: &mir::Mir<'tcx>)
|
||||
-> IndexVec<mir::BasicBlock, CleanupKind>
|
||||
{
|
||||
pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec<mir::BasicBlock, CleanupKind> {
|
||||
fn discover_masters<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
|
||||
mir: &mir::Mir<'tcx>) {
|
||||
for (bb, data) in mir.basic_blocks().iter_enumerated() {
|
||||
|
@ -8,20 +8,18 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use llvm::{self, ValueRef};
|
||||
use llvm::{self, ValueRef, BasicBlockRef};
|
||||
use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err};
|
||||
use rustc::middle::lang_items;
|
||||
use rustc::ty::{self, layout};
|
||||
use rustc::mir;
|
||||
use abi::{Abi, FnType, ArgType};
|
||||
use adt;
|
||||
use base;
|
||||
use build;
|
||||
use base::{self, Lifetime};
|
||||
use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual};
|
||||
use common::{self, Block, BlockAndBuilder, LandingPad};
|
||||
use common::{self, BlockAndBuilder, Funclet};
|
||||
use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef};
|
||||
use consts;
|
||||
use debuginfo::DebugLoc;
|
||||
use Disr;
|
||||
use machine::{llalign_of_min, llbitsize_of_real};
|
||||
use meth;
|
||||
@ -29,6 +27,7 @@ use type_of;
|
||||
use glue;
|
||||
use type_::Type;
|
||||
|
||||
use rustc_data_structures::indexed_vec::IndexVec;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use syntax::symbol::Symbol;
|
||||
|
||||
@ -39,21 +38,27 @@ use super::lvalue::{LvalueRef};
|
||||
use super::operand::OperandRef;
|
||||
use super::operand::OperandValue::{Pair, Ref, Immediate};
|
||||
|
||||
use std::cell::Ref as CellRef;
|
||||
use std::ptr;
|
||||
|
||||
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
pub fn trans_block(&mut self, bb: mir::BasicBlock) {
|
||||
let mut bcx = self.bcx(bb);
|
||||
let data = &CellRef::clone(&self.mir)[bb];
|
||||
impl<'a, 'tcx> MirContext<'a, 'tcx> {
|
||||
pub fn trans_block(&mut self, bb: mir::BasicBlock,
|
||||
funclets: &IndexVec<mir::BasicBlock, Option<Funclet>>) {
|
||||
let mut bcx = self.build_block(bb);
|
||||
let data = &self.mir[bb];
|
||||
|
||||
debug!("trans_block({:?}={:?})", bb, data);
|
||||
|
||||
let funclet = match self.cleanup_kinds[bb] {
|
||||
CleanupKind::Internal { funclet } => funclets[funclet].as_ref(),
|
||||
_ => funclets[bb].as_ref(),
|
||||
};
|
||||
|
||||
// Create the cleanup bundle, if needed.
|
||||
let cleanup_pad = bcx.lpad().and_then(|lp| lp.cleanuppad());
|
||||
let cleanup_bundle = bcx.lpad().and_then(|l| l.bundle());
|
||||
let cleanup_pad = funclet.map(|lp| lp.cleanuppad());
|
||||
let cleanup_bundle = funclet.map(|l| l.bundle());
|
||||
|
||||
let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| {
|
||||
let lltarget = this.blocks[bb].llbb;
|
||||
let lltarget = this.blocks[bb];
|
||||
if let Some(cp) = cleanup_pad {
|
||||
match this.cleanup_kinds[bb] {
|
||||
CleanupKind::Funclet => {
|
||||
@ -70,7 +75,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
};
|
||||
|
||||
let llblock = |this: &mut Self, target: mir::BasicBlock| {
|
||||
let lltarget = this.blocks[target].llbb;
|
||||
let lltarget = this.blocks[target];
|
||||
|
||||
if let Some(cp) = cleanup_pad {
|
||||
match this.cleanup_kinds[target] {
|
||||
@ -79,8 +84,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
debug!("llblock: creating cleanup trampoline for {:?}", target);
|
||||
let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
|
||||
let trampoline = this.fcx.new_block(name).build();
|
||||
trampoline.set_personality_fn(this.fcx.eh_personality());
|
||||
let trampoline = this.fcx.build_new_block(name);
|
||||
trampoline.cleanup_ret(cp, Some(lltarget));
|
||||
trampoline.llbb()
|
||||
}
|
||||
@ -93,7 +97,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
(this.cleanup_kinds[bb], this.cleanup_kinds[target])
|
||||
{
|
||||
// jump *into* cleanup - need a landing pad if GNU
|
||||
this.landing_pad_to(target).llbb
|
||||
this.landing_pad_to(target)
|
||||
} else {
|
||||
lltarget
|
||||
}
|
||||
@ -108,23 +112,22 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
debug!("trans_block: terminator: {:?}", terminator);
|
||||
|
||||
let span = terminator.source_info.span;
|
||||
let debug_loc = self.debug_loc(terminator.source_info);
|
||||
debug_loc.apply_to_bcx(&bcx);
|
||||
debug_loc.apply(bcx.fcx());
|
||||
self.set_debug_loc(&bcx, terminator.source_info);
|
||||
match terminator.kind {
|
||||
mir::TerminatorKind::Resume => {
|
||||
if let Some(cleanup_pad) = cleanup_pad {
|
||||
bcx.cleanup_ret(cleanup_pad, None);
|
||||
} else {
|
||||
let llpersonality = bcx.fcx().eh_personality();
|
||||
bcx.set_personality_fn(llpersonality);
|
||||
|
||||
let ps = self.get_personality_slot(&bcx);
|
||||
let lp = bcx.load(ps);
|
||||
bcx.with_block(|bcx| {
|
||||
base::call_lifetime_end(bcx, ps);
|
||||
base::trans_unwind_resume(bcx, lp);
|
||||
});
|
||||
Lifetime::End.call(&bcx, ps);
|
||||
if !bcx.sess().target.target.options.custom_unwind_resume {
|
||||
bcx.resume(lp);
|
||||
} else {
|
||||
let exc_ptr = bcx.extract_value(lp, 0);
|
||||
bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], cleanup_bundle);
|
||||
bcx.unreachable();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -143,9 +146,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => {
|
||||
let discr_lvalue = self.trans_lvalue(&bcx, discr);
|
||||
let ty = discr_lvalue.ty.to_ty(bcx.tcx());
|
||||
let discr = bcx.with_block(|bcx|
|
||||
adt::trans_get_discr(bcx, ty, discr_lvalue.llval, None, true)
|
||||
);
|
||||
let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true);
|
||||
|
||||
let mut bb_hist = FxHashMap();
|
||||
for target in targets {
|
||||
@ -162,16 +163,15 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// We're generating an exhaustive switch, so the else branch
|
||||
// can't be hit. Branching to an unreachable instruction
|
||||
// lets LLVM know this
|
||||
_ => (None, self.unreachable_block().llbb)
|
||||
_ => (None, self.unreachable_block())
|
||||
};
|
||||
let switch = bcx.switch(discr, default_blk, targets.len());
|
||||
assert_eq!(adt_def.variants.len(), targets.len());
|
||||
for (adt_variant, &target) in adt_def.variants.iter().zip(targets) {
|
||||
if default_bb != Some(target) {
|
||||
let llbb = llblock(self, target);
|
||||
let llval = bcx.with_block(|bcx| adt::trans_case(
|
||||
bcx, ty, Disr::from(adt_variant.disr_val)));
|
||||
build::AddCase(switch, llval, llbb)
|
||||
let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val));
|
||||
bcx.add_case(switch, llval, llbb)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -179,17 +179,17 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
|
||||
let (otherwise, targets) = targets.split_last().unwrap();
|
||||
let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
|
||||
let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty));
|
||||
let discr = base::to_immediate(&bcx, discr, switch_ty);
|
||||
let switch = bcx.switch(discr, llblock(self, *otherwise), values.len());
|
||||
for (value, target) in values.iter().zip(targets) {
|
||||
let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty);
|
||||
let val = Const::from_constval(bcx.ccx, value.clone(), switch_ty);
|
||||
let llbb = llblock(self, *target);
|
||||
build::AddCase(switch, val.llval, llbb)
|
||||
bcx.add_case(switch, val.llval, llbb)
|
||||
}
|
||||
}
|
||||
|
||||
mir::TerminatorKind::Return => {
|
||||
let ret = bcx.fcx().fn_ty.ret;
|
||||
let ret = self.fn_ty.ret;
|
||||
if ret.is_ignore() || ret.is_indirect() {
|
||||
bcx.ret_void();
|
||||
return;
|
||||
@ -208,14 +208,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
};
|
||||
let llslot = match op.val {
|
||||
Immediate(_) | Pair(..) => {
|
||||
let llscratch = build::AllocaFcx(bcx.fcx(), ret.original_ty, "ret");
|
||||
let llscratch = bcx.fcx().alloca(ret.original_ty, "ret");
|
||||
self.store_operand(&bcx, llscratch, op);
|
||||
llscratch
|
||||
}
|
||||
Ref(llval) => llval
|
||||
};
|
||||
let load = bcx.load(bcx.pointercast(llslot, cast_ty.ptr_to()));
|
||||
let llalign = llalign_of_min(bcx.ccx(), ret.ty);
|
||||
let llalign = llalign_of_min(bcx.ccx, ret.ty);
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(load, llalign);
|
||||
}
|
||||
@ -233,21 +233,21 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
mir::TerminatorKind::Drop { ref location, target, unwind } => {
|
||||
let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx());
|
||||
let ty = bcx.monomorphize(&ty);
|
||||
let ty = self.monomorphize(&ty);
|
||||
|
||||
// Double check for necessity to drop
|
||||
if !glue::type_needs_drop(bcx.tcx(), ty) {
|
||||
if !bcx.ccx.shared().type_needs_drop(ty) {
|
||||
funclet_br(self, bcx, target);
|
||||
return;
|
||||
}
|
||||
|
||||
let lvalue = self.trans_lvalue(&bcx, location);
|
||||
let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
|
||||
let drop_ty = glue::get_drop_glue_type(bcx.tcx(), ty);
|
||||
let is_sized = common::type_is_sized(bcx.tcx(), ty);
|
||||
let drop_fn = glue::get_drop_glue(bcx.ccx, ty);
|
||||
let drop_ty = glue::get_drop_glue_type(bcx.ccx.shared(), ty);
|
||||
let is_sized = bcx.ccx.shared().type_is_sized(ty);
|
||||
let llvalue = if is_sized {
|
||||
if drop_ty != ty {
|
||||
bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to())
|
||||
bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to())
|
||||
} else {
|
||||
lvalue.llval
|
||||
}
|
||||
@ -259,18 +259,16 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// but I am shooting for a quick fix to #35546
|
||||
// here that can be cleanly backported to beta, so
|
||||
// I want to avoid touching all of trans.
|
||||
bcx.with_block(|bcx| {
|
||||
let scratch = base::alloc_ty(bcx, ty, "drop");
|
||||
base::call_lifetime_start(bcx, scratch);
|
||||
build::Store(bcx, lvalue.llval, base::get_dataptr(bcx, scratch));
|
||||
build::Store(bcx, lvalue.llextra, base::get_meta(bcx, scratch));
|
||||
scratch
|
||||
})
|
||||
let scratch = base::alloc_ty(&bcx, ty, "drop");
|
||||
Lifetime::Start.call(&bcx, scratch);
|
||||
bcx.store(lvalue.llval, base::get_dataptr(&bcx, scratch));
|
||||
bcx.store(lvalue.llextra, base::get_meta(&bcx, scratch));
|
||||
scratch
|
||||
};
|
||||
if let Some(unwind) = unwind {
|
||||
bcx.invoke(drop_fn,
|
||||
&[llvalue],
|
||||
self.blocks[target].llbb,
|
||||
self.blocks[target],
|
||||
llblock(self, unwind),
|
||||
cleanup_bundle);
|
||||
} else {
|
||||
@ -290,7 +288,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// NOTE: Unlike binops, negation doesn't have its own
|
||||
// checked operation, just a comparison with the minimum
|
||||
// value, so we have to check for the assert message.
|
||||
if !bcx.ccx().check_overflow() {
|
||||
if !bcx.ccx.check_overflow() {
|
||||
use rustc_const_math::ConstMathErr::Overflow;
|
||||
use rustc_const_math::Op::Neg;
|
||||
|
||||
@ -306,27 +304,27 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
// Pass the condition through llvm.expect for branch hinting.
|
||||
let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
|
||||
let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx(), expected)], None);
|
||||
let expect = bcx.ccx.get_intrinsic(&"llvm.expect.i1");
|
||||
let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx, expected)], None);
|
||||
|
||||
// Create the failure block and the conditional branch to it.
|
||||
let lltarget = llblock(self, target);
|
||||
let panic_block = self.fcx.new_block("panic");
|
||||
let panic_block = self.fcx.build_new_block("panic");
|
||||
if expected {
|
||||
bcx.cond_br(cond, lltarget, panic_block.llbb);
|
||||
bcx.cond_br(cond, lltarget, panic_block.llbb());
|
||||
} else {
|
||||
bcx.cond_br(cond, panic_block.llbb, lltarget);
|
||||
bcx.cond_br(cond, panic_block.llbb(), lltarget);
|
||||
}
|
||||
|
||||
// After this point, bcx is the block for the call to panic.
|
||||
bcx = panic_block.build();
|
||||
debug_loc.apply_to_bcx(&bcx);
|
||||
bcx = panic_block;
|
||||
self.set_debug_loc(&bcx, terminator.source_info);
|
||||
|
||||
// Get the location information.
|
||||
let loc = bcx.sess().codemap().lookup_char_pos(span.lo);
|
||||
let filename = Symbol::intern(&loc.file.name).as_str();
|
||||
let filename = C_str_slice(bcx.ccx(), filename);
|
||||
let line = C_u32(bcx.ccx(), loc.line as u32);
|
||||
let filename = C_str_slice(bcx.ccx, filename);
|
||||
let line = C_u32(bcx.ccx, loc.line as u32);
|
||||
|
||||
// Put together the arguments to the panic entry point.
|
||||
let (lang_item, args, const_err) = match *msg {
|
||||
@ -343,9 +341,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
})
|
||||
});
|
||||
|
||||
let file_line = C_struct(bcx.ccx(), &[filename, line], false);
|
||||
let align = llalign_of_min(bcx.ccx(), common::val_ty(file_line));
|
||||
let file_line = consts::addr_of(bcx.ccx(),
|
||||
let file_line = C_struct(bcx.ccx, &[filename, line], false);
|
||||
let align = llalign_of_min(bcx.ccx, common::val_ty(file_line));
|
||||
let file_line = consts::addr_of(bcx.ccx,
|
||||
file_line,
|
||||
align,
|
||||
"panic_bounds_check_loc");
|
||||
@ -355,12 +353,12 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
mir::AssertMessage::Math(ref err) => {
|
||||
let msg_str = Symbol::intern(err.description()).as_str();
|
||||
let msg_str = C_str_slice(bcx.ccx(), msg_str);
|
||||
let msg_file_line = C_struct(bcx.ccx(),
|
||||
let msg_str = C_str_slice(bcx.ccx, msg_str);
|
||||
let msg_file_line = C_struct(bcx.ccx,
|
||||
&[msg_str, filename, line],
|
||||
false);
|
||||
let align = llalign_of_min(bcx.ccx(), common::val_ty(msg_file_line));
|
||||
let msg_file_line = consts::addr_of(bcx.ccx(),
|
||||
let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line));
|
||||
let msg_file_line = consts::addr_of(bcx.ccx,
|
||||
msg_file_line,
|
||||
align,
|
||||
"panic_loc");
|
||||
@ -384,15 +382,15 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
// Obtain the panic entry point.
|
||||
let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item);
|
||||
let callee = Callee::def(bcx.ccx(), def_id,
|
||||
bcx.ccx().empty_substs_for_def_id(def_id));
|
||||
let llfn = callee.reify(bcx.ccx());
|
||||
let callee = Callee::def(bcx.ccx, def_id,
|
||||
bcx.ccx.empty_substs_for_def_id(def_id));
|
||||
let llfn = callee.reify(bcx.ccx);
|
||||
|
||||
// Translate the actual panic invoke/call.
|
||||
if let Some(unwind) = cleanup {
|
||||
bcx.invoke(llfn,
|
||||
&args,
|
||||
self.unreachable_block().llbb,
|
||||
self.unreachable_block(),
|
||||
llblock(self, unwind),
|
||||
cleanup_bundle);
|
||||
} else {
|
||||
@ -411,7 +409,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
let (mut callee, abi, sig) = match callee.ty.sty {
|
||||
ty::TyFnDef(def_id, substs, f) => {
|
||||
(Callee::def(bcx.ccx(), def_id, substs), f.abi, &f.sig)
|
||||
(Callee::def(bcx.ccx, def_id, substs), f.abi, &f.sig)
|
||||
}
|
||||
ty::TyFnPtr(f) => {
|
||||
(Callee {
|
||||
@ -443,6 +441,65 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
return;
|
||||
}
|
||||
|
||||
// FIXME: This should proxy to the drop glue in the future when the ABI matches;
|
||||
// most of the below code was copied from the match arm for TerminatorKind::Drop.
|
||||
if intrinsic == Some("drop_in_place") {
|
||||
let &(_, target) = destination.as_ref().unwrap();
|
||||
let ty = if let ty::TyFnDef(_, substs, _) = callee.ty.sty {
|
||||
substs.type_at(0)
|
||||
} else {
|
||||
bug!("Unexpected ty: {}", callee.ty);
|
||||
};
|
||||
|
||||
// Double check for necessity to drop
|
||||
if !bcx.ccx.shared().type_needs_drop(ty) {
|
||||
funclet_br(self, bcx, target);
|
||||
return;
|
||||
}
|
||||
|
||||
let ptr = self.trans_operand(&bcx, &args[0]);
|
||||
let (llval, llextra) = match ptr.val {
|
||||
Immediate(llptr) => (llptr, ptr::null_mut()),
|
||||
Pair(llptr, llextra) => (llptr, llextra),
|
||||
Ref(_) => bug!("Deref of by-Ref type {:?}", ptr.ty)
|
||||
};
|
||||
|
||||
let drop_fn = glue::get_drop_glue(bcx.ccx, ty);
|
||||
let drop_ty = glue::get_drop_glue_type(bcx.ccx.shared(), ty);
|
||||
let is_sized = bcx.ccx.shared().type_is_sized(ty);
|
||||
let llvalue = if is_sized {
|
||||
if drop_ty != ty {
|
||||
bcx.pointercast(llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to())
|
||||
} else {
|
||||
llval
|
||||
}
|
||||
} else {
|
||||
// FIXME(#36457) Currently drop glue takes sized
|
||||
// values as a `*(data, meta)`, but elsewhere in
|
||||
// MIR we pass `(data, meta)` as two separate
|
||||
// arguments. It would be better to fix drop glue,
|
||||
// but I am shooting for a quick fix to #35546
|
||||
// here that can be cleanly backported to beta, so
|
||||
// I want to avoid touching all of trans.
|
||||
let scratch = base::alloc_ty(&bcx, ty, "drop");
|
||||
Lifetime::Start.call(&bcx, scratch);
|
||||
bcx.store(llval, base::get_dataptr(&bcx, scratch));
|
||||
bcx.store(llextra, base::get_meta(&bcx, scratch));
|
||||
scratch
|
||||
};
|
||||
if let Some(unwind) = *cleanup {
|
||||
bcx.invoke(drop_fn,
|
||||
&[llvalue],
|
||||
self.blocks[target],
|
||||
llblock(self, unwind),
|
||||
cleanup_bundle);
|
||||
} else {
|
||||
bcx.call(drop_fn, &[llvalue], cleanup_bundle);
|
||||
funclet_br(self, bcx, target);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if intrinsic == Some("transmute") {
|
||||
let &(ref dest, target) = destination.as_ref().unwrap();
|
||||
self.with_lvalue_ref(&bcx, dest, |this, dest| {
|
||||
@ -456,9 +513,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
let extra_args = &args[sig.inputs().len()..];
|
||||
let extra_args = extra_args.iter().map(|op_arg| {
|
||||
let op_ty = op_arg.ty(&self.mir, bcx.tcx());
|
||||
bcx.monomorphize(&op_ty)
|
||||
self.monomorphize(&op_ty)
|
||||
}).collect::<Vec<_>>();
|
||||
let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args);
|
||||
let fn_ty = callee.direct_fn_type(bcx.ccx, &extra_args);
|
||||
|
||||
// The arguments we'll be passing. Plus one to account for outptr, if used.
|
||||
let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize;
|
||||
@ -519,7 +576,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
let fn_ptr = match callee.data {
|
||||
NamedTupleConstructor(_) => {
|
||||
// FIXME translate this like mir::Rvalue::Aggregate.
|
||||
callee.reify(bcx.ccx())
|
||||
callee.reify(bcx.ccx)
|
||||
}
|
||||
Intrinsic => {
|
||||
use intrinsic::trans_intrinsic_call;
|
||||
@ -537,10 +594,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
bug!("Cannot use direct operand with an intrinsic call")
|
||||
};
|
||||
|
||||
bcx.with_block(|bcx| {
|
||||
trans_intrinsic_call(bcx, callee.ty, &fn_ty,
|
||||
&llargs, dest, debug_loc);
|
||||
});
|
||||
trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest,
|
||||
terminator.source_info.span);
|
||||
|
||||
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
|
||||
// Make a fake operand for store_return
|
||||
@ -554,8 +609,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
if let Some((_, target)) = *destination {
|
||||
funclet_br(self, bcx, target);
|
||||
} else {
|
||||
// trans_intrinsic_call already used Unreachable.
|
||||
// bcx.unreachable();
|
||||
bcx.unreachable();
|
||||
}
|
||||
|
||||
return;
|
||||
@ -573,15 +627,15 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
};
|
||||
let invokeret = bcx.invoke(fn_ptr,
|
||||
&llargs,
|
||||
ret_bcx.llbb,
|
||||
ret_bcx,
|
||||
llblock(self, cleanup),
|
||||
cleanup_bundle);
|
||||
fn_ty.apply_attrs_callsite(invokeret);
|
||||
|
||||
if destination.is_some() {
|
||||
let ret_bcx = ret_bcx.build();
|
||||
if let Some((_, target)) = *destination {
|
||||
let ret_bcx = self.build_block(target);
|
||||
ret_bcx.at_start(|ret_bcx| {
|
||||
debug_loc.apply_to_bcx(ret_bcx);
|
||||
self.set_debug_loc(&ret_bcx, terminator.source_info);
|
||||
let op = OperandRef {
|
||||
val: Immediate(invokeret),
|
||||
ty: sig.output(),
|
||||
@ -608,7 +662,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
fn trans_argument(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
op: OperandRef<'tcx>,
|
||||
llargs: &mut Vec<ValueRef>,
|
||||
fn_ty: &FnType,
|
||||
@ -616,14 +670,12 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
callee: &mut CalleeData) {
|
||||
if let Pair(a, b) = op.val {
|
||||
// Treat the values in a fat pointer separately.
|
||||
if common::type_is_fat_ptr(bcx.tcx(), op.ty) {
|
||||
if common::type_is_fat_ptr(bcx.ccx, op.ty) {
|
||||
let (ptr, meta) = (a, b);
|
||||
if *next_idx == 0 {
|
||||
if let Virtual(idx) = *callee {
|
||||
let llfn = bcx.with_block(|bcx| {
|
||||
meth::get_virtual_method(bcx, meta, idx)
|
||||
});
|
||||
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
|
||||
let llfn = meth::get_virtual_method(bcx, meta, idx);
|
||||
let llty = fn_ty.llvm_type(bcx.ccx).ptr_to();
|
||||
*callee = Fn(bcx.pointercast(llfn, llty));
|
||||
}
|
||||
}
|
||||
@ -655,7 +707,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
let (mut llval, by_ref) = match op.val {
|
||||
Immediate(_) | Pair(..) => {
|
||||
if arg.is_indirect() || arg.cast.is_some() {
|
||||
let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg");
|
||||
let llscratch = bcx.fcx().alloca(arg.original_ty, "arg");
|
||||
self.store_operand(bcx, llscratch, op);
|
||||
(llscratch, true)
|
||||
} else {
|
||||
@ -667,13 +719,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
if by_ref && !arg.is_indirect() {
|
||||
// Have to load the argument, maybe while casting it.
|
||||
if arg.original_ty == Type::i1(bcx.ccx()) {
|
||||
if arg.original_ty == Type::i1(bcx.ccx) {
|
||||
// We store bools as i8 so we need to truncate to i1.
|
||||
llval = bcx.load_range_assert(llval, 0, 2, llvm::False);
|
||||
llval = bcx.trunc(llval, arg.original_ty);
|
||||
} else if let Some(ty) = arg.cast {
|
||||
llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()));
|
||||
let llalign = llalign_of_min(bcx.ccx(), arg.ty);
|
||||
let llalign = llalign_of_min(bcx.ccx, arg.ty);
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(llval, llalign);
|
||||
}
|
||||
@ -686,7 +738,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
fn trans_arguments_untupled(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
operand: &mir::Operand<'tcx>,
|
||||
llargs: &mut Vec<ValueRef>,
|
||||
fn_ty: &FnType,
|
||||
@ -705,9 +757,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
Ref(llval) => {
|
||||
let base = adt::MaybeSizedValue::sized(llval);
|
||||
for (n, &ty) in arg_types.iter().enumerate() {
|
||||
let ptr = adt::trans_field_ptr_builder(bcx, tuple.ty, base, Disr(0), n);
|
||||
let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
|
||||
let (lldata, llextra) = base::load_fat_ptr_builder(bcx, ptr, ty);
|
||||
let ptr = adt::trans_field_ptr(bcx, tuple.ty, base, Disr(0), n);
|
||||
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
|
||||
let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty);
|
||||
Pair(lldata, llextra)
|
||||
} else {
|
||||
// trans_argument will load this if it needs to
|
||||
@ -722,7 +774,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
}
|
||||
Immediate(llval) => {
|
||||
let l = bcx.ccx().layout_of(tuple.ty);
|
||||
let l = bcx.ccx.layout_of(tuple.ty);
|
||||
let v = if let layout::Univariant { ref variant, .. } = *l {
|
||||
variant
|
||||
} else {
|
||||
@ -731,8 +783,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
for (n, &ty) in arg_types.iter().enumerate() {
|
||||
let mut elem = bcx.extract_value(llval, v.memory_index[n] as usize);
|
||||
// Truncate bools to i1, if needed
|
||||
if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx()) {
|
||||
elem = bcx.trunc(elem, Type::i1(bcx.ccx()));
|
||||
if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) {
|
||||
elem = bcx.trunc(elem, Type::i1(bcx.ccx));
|
||||
}
|
||||
// If the tuple is immediate, the elements are as well
|
||||
let op = OperandRef {
|
||||
@ -747,8 +799,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
for (n, &ty) in arg_types.iter().enumerate() {
|
||||
let mut elem = elems[n];
|
||||
// Truncate bools to i1, if needed
|
||||
if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx()) {
|
||||
elem = bcx.trunc(elem, Type::i1(bcx.ccx()));
|
||||
if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) {
|
||||
elem = bcx.trunc(elem, Type::i1(bcx.ccx));
|
||||
}
|
||||
// Pair is always made up of immediates
|
||||
let op = OperandRef {
|
||||
@ -762,91 +814,61 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
}
|
||||
|
||||
fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef {
|
||||
let ccx = bcx.ccx();
|
||||
fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> ValueRef {
|
||||
let ccx = bcx.ccx;
|
||||
if let Some(slot) = self.llpersonalityslot {
|
||||
slot
|
||||
} else {
|
||||
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
|
||||
bcx.with_block(|bcx| {
|
||||
let slot = base::alloca(bcx, llretty, "personalityslot");
|
||||
self.llpersonalityslot = Some(slot);
|
||||
base::call_lifetime_start(bcx, slot);
|
||||
slot
|
||||
})
|
||||
let slot = bcx.fcx().alloca(llretty, "personalityslot");
|
||||
self.llpersonalityslot = Some(slot);
|
||||
Lifetime::Start.call(bcx, slot);
|
||||
slot
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the landingpad wrapper around the given basic block
|
||||
///
|
||||
/// No-op in MSVC SEH scheme.
|
||||
fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> Block<'bcx, 'tcx>
|
||||
{
|
||||
fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> BasicBlockRef {
|
||||
if let Some(block) = self.landing_pads[target_bb] {
|
||||
return block;
|
||||
}
|
||||
|
||||
if base::wants_msvc_seh(self.fcx.ccx.sess()) {
|
||||
if base::wants_msvc_seh(self.ccx.sess()) {
|
||||
return self.blocks[target_bb];
|
||||
}
|
||||
|
||||
let target = self.bcx(target_bb);
|
||||
let target = self.build_block(target_bb);
|
||||
|
||||
let block = self.fcx.new_block("cleanup");
|
||||
self.landing_pads[target_bb] = Some(block);
|
||||
let bcx = self.fcx.build_new_block("cleanup");
|
||||
self.landing_pads[target_bb] = Some(bcx.llbb());
|
||||
|
||||
let bcx = block.build();
|
||||
let ccx = bcx.ccx();
|
||||
let llpersonality = self.fcx.eh_personality();
|
||||
let ccx = bcx.ccx;
|
||||
let llpersonality = self.ccx.eh_personality();
|
||||
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
|
||||
let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn);
|
||||
bcx.set_cleanup(llretval);
|
||||
let slot = self.get_personality_slot(&bcx);
|
||||
bcx.store(llretval, slot);
|
||||
bcx.br(target.llbb());
|
||||
block
|
||||
bcx.llbb()
|
||||
}
|
||||
|
||||
pub fn init_cpad(&mut self, bb: mir::BasicBlock) {
|
||||
let bcx = self.bcx(bb);
|
||||
let data = &self.mir[bb];
|
||||
debug!("init_cpad({:?})", data);
|
||||
|
||||
match self.cleanup_kinds[bb] {
|
||||
CleanupKind::NotCleanup => {
|
||||
bcx.set_lpad(None)
|
||||
}
|
||||
_ if !base::wants_msvc_seh(bcx.sess()) => {
|
||||
bcx.set_lpad(Some(LandingPad::gnu()))
|
||||
}
|
||||
CleanupKind::Internal { funclet } => {
|
||||
// FIXME: is this needed?
|
||||
bcx.set_personality_fn(self.fcx.eh_personality());
|
||||
bcx.set_lpad_ref(self.bcx(funclet).lpad());
|
||||
}
|
||||
CleanupKind::Funclet => {
|
||||
bcx.set_personality_fn(self.fcx.eh_personality());
|
||||
DebugLoc::None.apply_to_bcx(&bcx);
|
||||
let cleanup_pad = bcx.cleanup_pad(None, &[]);
|
||||
bcx.set_lpad(Some(LandingPad::msvc(cleanup_pad)));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> {
|
||||
fn unreachable_block(&mut self) -> BasicBlockRef {
|
||||
self.unreachable_block.unwrap_or_else(|| {
|
||||
let bl = self.fcx.new_block("unreachable");
|
||||
bl.build().unreachable();
|
||||
self.unreachable_block = Some(bl);
|
||||
bl
|
||||
let bl = self.fcx.build_new_block("unreachable");
|
||||
bl.unreachable();
|
||||
self.unreachable_block = Some(bl.llbb());
|
||||
bl.llbb()
|
||||
})
|
||||
}
|
||||
|
||||
fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> {
|
||||
self.blocks[bb].build()
|
||||
pub fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'a, 'tcx> {
|
||||
BlockAndBuilder::new(self.blocks[bb], self.fcx)
|
||||
}
|
||||
|
||||
fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType,
|
||||
llargs: &mut Vec<ValueRef>, is_intrinsic: bool) -> ReturnDest {
|
||||
// If the return is ignored, we can just return a do-nothing ReturnDest
|
||||
@ -863,18 +885,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
return if fn_ret_ty.is_indirect() {
|
||||
// Odd, but possible, case, we have an operand temporary,
|
||||
// but the calling convention has an indirect return.
|
||||
let tmp = bcx.with_block(|bcx| {
|
||||
base::alloc_ty(bcx, ret_ty, "tmp_ret")
|
||||
});
|
||||
let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret");
|
||||
llargs.push(tmp);
|
||||
ReturnDest::IndirectOperand(tmp, index)
|
||||
} else if is_intrinsic {
|
||||
// Currently, intrinsics always need a location to store
|
||||
// the result. so we create a temporary alloca for the
|
||||
// result
|
||||
let tmp = bcx.with_block(|bcx| {
|
||||
base::alloc_ty(bcx, ret_ty, "tmp_ret")
|
||||
});
|
||||
let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret");
|
||||
ReturnDest::IndirectOperand(tmp, index)
|
||||
} else {
|
||||
ReturnDest::DirectOperand(index)
|
||||
@ -895,27 +913,27 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) {
|
||||
let mut val = self.trans_operand(bcx, src);
|
||||
if let ty::TyFnDef(def_id, substs, _) = val.ty.sty {
|
||||
let llouttype = type_of::type_of(bcx.ccx(), dst.ty.to_ty(bcx.tcx()));
|
||||
let out_type_size = llbitsize_of_real(bcx.ccx(), llouttype);
|
||||
let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.tcx()));
|
||||
let out_type_size = llbitsize_of_real(bcx.ccx, llouttype);
|
||||
if out_type_size != 0 {
|
||||
// FIXME #19925 Remove this hack after a release cycle.
|
||||
let f = Callee::def(bcx.ccx(), def_id, substs);
|
||||
let f = Callee::def(bcx.ccx, def_id, substs);
|
||||
let ty = match f.ty.sty {
|
||||
ty::TyFnDef(.., f) => bcx.tcx().mk_fn_ptr(f),
|
||||
_ => f.ty
|
||||
};
|
||||
val = OperandRef {
|
||||
val: Immediate(f.reify(bcx.ccx())),
|
||||
val: Immediate(f.reify(bcx.ccx)),
|
||||
ty: ty
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
let llty = type_of::type_of(bcx.ccx(), val.ty);
|
||||
let llty = type_of::type_of(bcx.ccx, val.ty);
|
||||
let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
|
||||
self.store_operand(bcx, cast_ptr, val);
|
||||
}
|
||||
@ -923,7 +941,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
// Stores the return value of a function call into it's final location.
|
||||
fn store_return(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
dest: ReturnDest,
|
||||
ret_ty: ArgType,
|
||||
op: OperandRef<'tcx>) {
|
||||
@ -939,9 +957,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
DirectOperand(index) => {
|
||||
// If there is a cast, we have to store and reload.
|
||||
let op = if ret_ty.cast.is_some() {
|
||||
let tmp = bcx.with_block(|bcx| {
|
||||
base::alloc_ty(bcx, op.ty, "tmp_ret")
|
||||
});
|
||||
let tmp = base::alloc_ty(bcx, op.ty, "tmp_ret");
|
||||
ret_ty.store(bcx, op.immediate(), tmp);
|
||||
self.trans_load(bcx, tmp, op.ty)
|
||||
} else {
|
||||
|
@ -25,7 +25,7 @@ use rustc::ty::subst::Substs;
|
||||
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
|
||||
use {abi, adt, base, Disr, machine};
|
||||
use callee::Callee;
|
||||
use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty, type_is_sized};
|
||||
use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty};
|
||||
use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral};
|
||||
use common::{C_null, C_struct, C_str_slice, C_undef, C_uint};
|
||||
use common::{const_to_opt_int, const_to_opt_uint};
|
||||
@ -401,7 +401,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
|
||||
.projection_ty(tcx, &projection.elem);
|
||||
let base = tr_base.to_const(span);
|
||||
let projected_ty = self.monomorphize(&projected_ty).to_ty(tcx);
|
||||
let is_sized = common::type_is_sized(tcx, projected_ty);
|
||||
let is_sized = self.ccx.shared().type_is_sized(projected_ty);
|
||||
|
||||
let (projected, llextra) = match projection.elem {
|
||||
mir::ProjectionElem::Deref => {
|
||||
@ -598,11 +598,11 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
|
||||
mir::CastKind::Unsize => {
|
||||
// unsize targets other than to a fat pointer currently
|
||||
// can't be in constants.
|
||||
assert!(common::type_is_fat_ptr(tcx, cast_ty));
|
||||
assert!(common::type_is_fat_ptr(self.ccx, cast_ty));
|
||||
|
||||
let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference)
|
||||
.expect("consts: unsizing got non-pointer type").ty;
|
||||
let (base, old_info) = if !common::type_is_sized(tcx, pointee_ty) {
|
||||
let (base, old_info) = if !self.ccx.shared().type_is_sized(pointee_ty) {
|
||||
// Normally, the source is a thin pointer and we are
|
||||
// adding extra info to make a fat pointer. The exception
|
||||
// is when we are upcasting an existing object fat pointer
|
||||
@ -685,9 +685,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
|
||||
mir::CastKind::Misc => { // Casts from a fat-ptr.
|
||||
let ll_cast_ty = type_of::immediate_type_of(self.ccx, cast_ty);
|
||||
let ll_from_ty = type_of::immediate_type_of(self.ccx, operand.ty);
|
||||
if common::type_is_fat_ptr(tcx, operand.ty) {
|
||||
if common::type_is_fat_ptr(self.ccx, operand.ty) {
|
||||
let (data_ptr, meta_ptr) = operand.get_fat_ptr();
|
||||
if common::type_is_fat_ptr(tcx, cast_ty) {
|
||||
if common::type_is_fat_ptr(self.ccx, cast_ty) {
|
||||
let ll_cft = ll_cast_ty.field_types();
|
||||
let ll_fft = ll_from_ty.field_types();
|
||||
let data_cast = consts::ptrcast(data_ptr, ll_cft[0]);
|
||||
@ -716,7 +716,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
|
||||
let base = match tr_lvalue.base {
|
||||
Base::Value(llval) => {
|
||||
// FIXME: may be wrong for &*(&simd_vec as &fmt::Debug)
|
||||
let align = if type_is_sized(self.ccx.tcx(), ty) {
|
||||
let align = if self.ccx.shared().type_is_sized(ty) {
|
||||
type_of::align_of(self.ccx, ty)
|
||||
} else {
|
||||
self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign
|
||||
@ -731,7 +731,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
|
||||
Base::Static(llval) => llval
|
||||
};
|
||||
|
||||
let ptr = if common::type_is_sized(tcx, ty) {
|
||||
let ptr = if self.ccx.shared().type_is_sized(ty) {
|
||||
base
|
||||
} else {
|
||||
C_struct(self.ccx, &[base, tr_lvalue.llextra], false)
|
||||
@ -945,40 +945,39 @@ pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
}
|
||||
}
|
||||
|
||||
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
impl<'a, 'tcx> MirContext<'a, 'tcx> {
|
||||
pub fn trans_constant(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
constant: &mir::Constant<'tcx>)
|
||||
-> Const<'tcx>
|
||||
{
|
||||
debug!("trans_constant({:?})", constant);
|
||||
let ty = bcx.monomorphize(&constant.ty);
|
||||
let ty = self.monomorphize(&constant.ty);
|
||||
let result = match constant.literal.clone() {
|
||||
mir::Literal::Item { def_id, substs } => {
|
||||
// Shortcut for zero-sized types, including function item
|
||||
// types, which would not work with MirConstContext.
|
||||
if common::type_is_zero_size(bcx.ccx(), ty) {
|
||||
let llty = type_of::type_of(bcx.ccx(), ty);
|
||||
if common::type_is_zero_size(bcx.ccx, ty) {
|
||||
let llty = type_of::type_of(bcx.ccx, ty);
|
||||
return Const::new(C_null(llty), ty);
|
||||
}
|
||||
|
||||
let substs = bcx.monomorphize(&substs);
|
||||
let substs = self.monomorphize(&substs);
|
||||
let instance = Instance::new(def_id, substs);
|
||||
MirConstContext::trans_def(bcx.ccx(), instance, IndexVec::new())
|
||||
MirConstContext::trans_def(bcx.ccx, instance, IndexVec::new())
|
||||
}
|
||||
mir::Literal::Promoted { index } => {
|
||||
let mir = &self.mir.promoted[index];
|
||||
MirConstContext::new(bcx.ccx(), mir, bcx.fcx().param_substs,
|
||||
IndexVec::new()).trans()
|
||||
MirConstContext::new(bcx.ccx, mir, self.param_substs, IndexVec::new()).trans()
|
||||
}
|
||||
mir::Literal::Value { value } => {
|
||||
Ok(Const::from_constval(bcx.ccx(), value, ty))
|
||||
Ok(Const::from_constval(bcx.ccx, value, ty))
|
||||
}
|
||||
};
|
||||
|
||||
let result = result.unwrap_or_else(|_| {
|
||||
// We've errored, so we don't have to produce working code.
|
||||
let llty = type_of::type_of(bcx.ccx(), ty);
|
||||
let llty = type_of::type_of(bcx.ccx, ty);
|
||||
Const::new(C_undef(llty), ty)
|
||||
});
|
||||
|
||||
|
@ -44,13 +44,13 @@ impl<'tcx> LvalueRef<'tcx> {
|
||||
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
|
||||
}
|
||||
|
||||
pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
pub fn alloca<'a>(bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
ty: Ty<'tcx>,
|
||||
name: &str)
|
||||
-> LvalueRef<'tcx>
|
||||
{
|
||||
assert!(!ty.has_erasable_regions());
|
||||
let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name));
|
||||
let lltemp = base::alloc_ty(bcx, ty, name);
|
||||
LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
|
||||
}
|
||||
|
||||
@ -67,14 +67,14 @@ impl<'tcx> LvalueRef<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
impl<'a, 'tcx> MirContext<'a, 'tcx> {
|
||||
pub fn trans_lvalue(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
lvalue: &mir::Lvalue<'tcx>)
|
||||
-> LvalueRef<'tcx> {
|
||||
debug!("trans_lvalue(lvalue={:?})", lvalue);
|
||||
|
||||
let ccx = bcx.ccx();
|
||||
let ccx = bcx.ccx;
|
||||
let tcx = bcx.tcx();
|
||||
|
||||
if let mir::Lvalue::Local(index) = *lvalue {
|
||||
@ -103,7 +103,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
let ptr = self.trans_consume(bcx, base);
|
||||
let projected_ty = LvalueTy::from_ty(ptr.ty)
|
||||
.projection_ty(tcx, &mir::ProjectionElem::Deref);
|
||||
let projected_ty = bcx.monomorphize(&projected_ty);
|
||||
let projected_ty = self.monomorphize(&projected_ty);
|
||||
let (llptr, llextra) = match ptr.val {
|
||||
OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()),
|
||||
OperandValue::Pair(llptr, llextra) => (llptr, llextra),
|
||||
@ -118,14 +118,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
mir::Lvalue::Projection(ref projection) => {
|
||||
let tr_base = self.trans_lvalue(bcx, &projection.base);
|
||||
let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
|
||||
let projected_ty = bcx.monomorphize(&projected_ty);
|
||||
let projected_ty = self.monomorphize(&projected_ty);
|
||||
|
||||
let project_index = |llindex| {
|
||||
let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty {
|
||||
// Slices already point to the array element type.
|
||||
bcx.inbounds_gep(tr_base.llval, &[llindex])
|
||||
} else {
|
||||
let zero = common::C_uint(bcx.ccx(), 0u64);
|
||||
let zero = common::C_uint(bcx.ccx, 0u64);
|
||||
bcx.inbounds_gep(tr_base.llval, &[zero, llindex])
|
||||
};
|
||||
element
|
||||
@ -140,14 +140,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v,
|
||||
};
|
||||
let discr = discr as u64;
|
||||
let is_sized = common::type_is_sized(tcx, projected_ty.to_ty(tcx));
|
||||
let is_sized = self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx));
|
||||
let base = if is_sized {
|
||||
adt::MaybeSizedValue::sized(tr_base.llval)
|
||||
} else {
|
||||
adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra)
|
||||
};
|
||||
let llprojected = adt::trans_field_ptr_builder(bcx, base_ty, base,
|
||||
Disr(discr), field.index());
|
||||
let llprojected = adt::trans_field_ptr(bcx, base_ty, base, Disr(discr),
|
||||
field.index());
|
||||
let llextra = if is_sized {
|
||||
ptr::null_mut()
|
||||
} else {
|
||||
@ -162,19 +162,19 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
mir::ProjectionElem::ConstantIndex { offset,
|
||||
from_end: false,
|
||||
min_length: _ } => {
|
||||
let lloffset = C_uint(bcx.ccx(), offset);
|
||||
let lloffset = C_uint(bcx.ccx, offset);
|
||||
(project_index(lloffset), ptr::null_mut())
|
||||
}
|
||||
mir::ProjectionElem::ConstantIndex { offset,
|
||||
from_end: true,
|
||||
min_length: _ } => {
|
||||
let lloffset = C_uint(bcx.ccx(), offset);
|
||||
let lllen = tr_base.len(bcx.ccx());
|
||||
let lloffset = C_uint(bcx.ccx, offset);
|
||||
let lllen = tr_base.len(bcx.ccx);
|
||||
let llindex = bcx.sub(lllen, lloffset);
|
||||
(project_index(llindex), ptr::null_mut())
|
||||
}
|
||||
mir::ProjectionElem::Subslice { from, to } => {
|
||||
let llindex = C_uint(bcx.ccx(), from);
|
||||
let llindex = C_uint(bcx.ccx, from);
|
||||
let llbase = project_index(llindex);
|
||||
|
||||
let base_ty = tr_base.ty.to_ty(bcx.tcx());
|
||||
@ -183,14 +183,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// must cast the lvalue pointer type to the new
|
||||
// array type (*[%_; new_len]).
|
||||
let base_ty = self.monomorphized_lvalue_ty(lvalue);
|
||||
let llbasety = type_of::type_of(bcx.ccx(), base_ty).ptr_to();
|
||||
let llbasety = type_of::type_of(bcx.ccx, base_ty).ptr_to();
|
||||
let llbase = bcx.pointercast(llbase, llbasety);
|
||||
(llbase, ptr::null_mut())
|
||||
}
|
||||
ty::TySlice(..) => {
|
||||
assert!(tr_base.llextra != ptr::null_mut());
|
||||
let lllen = bcx.sub(tr_base.llextra,
|
||||
C_uint(bcx.ccx(), from+to));
|
||||
C_uint(bcx.ccx, from+to));
|
||||
(llbase, lllen)
|
||||
}
|
||||
_ => bug!("unexpected type {:?} in Subslice", base_ty)
|
||||
@ -214,7 +214,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// Perform an action using the given Lvalue.
|
||||
// If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot
|
||||
// is created first, then used as an operand to update the Lvalue.
|
||||
pub fn with_lvalue_ref<F, U>(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
pub fn with_lvalue_ref<F, U>(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
lvalue: &mir::Lvalue<'tcx>, f: F) -> U
|
||||
where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U
|
||||
{
|
||||
@ -235,9 +235,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// See comments in LocalRef::new_operand as to why
|
||||
// we always have Some in a ZST LocalRef::Operand.
|
||||
let ty = self.monomorphized_lvalue_ty(lvalue);
|
||||
if common::type_is_zero_size(bcx.ccx(), ty) {
|
||||
if common::type_is_zero_size(bcx.ccx, ty) {
|
||||
// Pass an undef pointer as no stores can actually occur.
|
||||
let llptr = C_undef(type_of(bcx.ccx(), ty).ptr_to());
|
||||
let llptr = C_undef(type_of(bcx.ccx, ty).ptr_to());
|
||||
f(self, LvalueRef::new_sized(llptr, LvalueTy::from_ty(ty)))
|
||||
} else {
|
||||
bug!("Lvalue local already set");
|
||||
@ -255,13 +255,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
///
|
||||
/// nmatsakis: is this still necessary? Not sure.
|
||||
fn prepare_index(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
llindex: ValueRef)
|
||||
-> ValueRef
|
||||
{
|
||||
let ccx = bcx.ccx();
|
||||
let index_size = machine::llbitsize_of_real(bcx.ccx(), common::val_ty(llindex));
|
||||
let int_size = machine::llbitsize_of_real(bcx.ccx(), ccx.int_type());
|
||||
let ccx = bcx.ccx;
|
||||
let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex));
|
||||
let int_size = machine::llbitsize_of_real(bcx.ccx, ccx.int_type());
|
||||
if index_size < int_size {
|
||||
bcx.zext(llindex, ccx.int_type())
|
||||
} else if index_size > int_size {
|
||||
@ -272,8 +272,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
|
||||
let tcx = self.fcx.ccx.tcx();
|
||||
let tcx = self.ccx.tcx();
|
||||
let lvalue_ty = lvalue.ty(&self.mir, tcx);
|
||||
self.fcx.monomorphize(&lvalue_ty.to_ty(tcx))
|
||||
self.monomorphize(&lvalue_ty.to_ty(tcx))
|
||||
}
|
||||
}
|
||||
|
@ -9,40 +9,50 @@
|
||||
// except according to those terms.
|
||||
|
||||
use libc::c_uint;
|
||||
use llvm::{self, ValueRef};
|
||||
use llvm::{self, ValueRef, BasicBlockRef};
|
||||
use llvm::debuginfo::DIScope;
|
||||
use rustc::ty::{self, layout};
|
||||
use rustc::mir;
|
||||
use rustc::mir::{self, Mir};
|
||||
use rustc::mir::tcx::LvalueTy;
|
||||
use rustc::ty::subst::Substs;
|
||||
use rustc::infer::TransNormalize;
|
||||
use rustc::ty::TypeFoldable;
|
||||
use session::config::FullDebugInfo;
|
||||
use base;
|
||||
use common::{self, Block, BlockAndBuilder, CrateContext, FunctionContext, C_null};
|
||||
use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind, FunctionDebugContext};
|
||||
use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet};
|
||||
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
|
||||
use monomorphize::{self, Instance};
|
||||
use abi::FnType;
|
||||
use type_of;
|
||||
|
||||
use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos};
|
||||
use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos, Span};
|
||||
use syntax::symbol::keywords;
|
||||
use syntax::abi::Abi;
|
||||
|
||||
use std::cell::Ref;
|
||||
use std::iter;
|
||||
|
||||
use basic_block::BasicBlock;
|
||||
|
||||
use rustc_data_structures::bitvec::BitVector;
|
||||
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
|
||||
|
||||
pub use self::constant::trans_static_initializer;
|
||||
|
||||
use self::analyze::CleanupKind;
|
||||
use self::lvalue::{LvalueRef};
|
||||
use rustc::mir::traversal;
|
||||
|
||||
use self::operand::{OperandRef, OperandValue};
|
||||
|
||||
/// Master context for translating MIR.
|
||||
pub struct MirContext<'bcx, 'tcx:'bcx> {
|
||||
mir: Ref<'tcx, mir::Mir<'tcx>>,
|
||||
pub struct MirContext<'a, 'tcx:'a> {
|
||||
mir: &'a mir::Mir<'tcx>,
|
||||
|
||||
/// Function context
|
||||
fcx: &'bcx common::FunctionContext<'bcx, 'tcx>,
|
||||
debug_context: debuginfo::FunctionDebugContext,
|
||||
|
||||
fcx: &'a common::FunctionContext<'a, 'tcx>,
|
||||
|
||||
ccx: &'a CrateContext<'a, 'tcx>,
|
||||
|
||||
fn_ty: FnType,
|
||||
|
||||
/// When unwinding is initiated, we have to store this personality
|
||||
/// value somewhere so that we can load it and re-use it in the
|
||||
@ -54,17 +64,17 @@ pub struct MirContext<'bcx, 'tcx:'bcx> {
|
||||
llpersonalityslot: Option<ValueRef>,
|
||||
|
||||
/// A `Block` for each MIR `BasicBlock`
|
||||
blocks: IndexVec<mir::BasicBlock, Block<'bcx, 'tcx>>,
|
||||
blocks: IndexVec<mir::BasicBlock, BasicBlockRef>,
|
||||
|
||||
/// The funclet status of each basic block
|
||||
cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
|
||||
|
||||
/// This stores the landing-pad block for a given BB, computed lazily on GNU
|
||||
/// and eagerly on MSVC.
|
||||
landing_pads: IndexVec<mir::BasicBlock, Option<Block<'bcx, 'tcx>>>,
|
||||
landing_pads: IndexVec<mir::BasicBlock, Option<BasicBlockRef>>,
|
||||
|
||||
/// Cached unreachable block
|
||||
unreachable_block: Option<Block<'bcx, 'tcx>>,
|
||||
unreachable_block: Option<BasicBlockRef>,
|
||||
|
||||
/// The location where each MIR arg/var/tmp/ret is stored. This is
|
||||
/// usually an `LvalueRef` representing an alloca, but not always:
|
||||
@ -85,18 +95,28 @@ pub struct MirContext<'bcx, 'tcx:'bcx> {
|
||||
|
||||
/// Debug information for MIR scopes.
|
||||
scopes: IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>,
|
||||
|
||||
/// If this function is being monomorphized, this contains the type substitutions used.
|
||||
param_substs: &'tcx Substs<'tcx>,
|
||||
}
|
||||
|
||||
impl<'blk, 'tcx> MirContext<'blk, 'tcx> {
|
||||
pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> DebugLoc {
|
||||
impl<'a, 'tcx> MirContext<'a, 'tcx> {
|
||||
pub fn monomorphize<T>(&self, value: &T) -> T
|
||||
where T: TransNormalize<'tcx> {
|
||||
monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value)
|
||||
}
|
||||
|
||||
pub fn set_debug_loc(&mut self, bcx: &BlockAndBuilder, source_info: mir::SourceInfo) {
|
||||
let (scope, span) = self.debug_loc(source_info);
|
||||
debuginfo::set_source_location(&self.debug_context, bcx, scope, span);
|
||||
}
|
||||
|
||||
pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (DIScope, Span) {
|
||||
// Bail out if debug info emission is not enabled.
|
||||
match self.fcx.debug_context {
|
||||
match self.debug_context {
|
||||
FunctionDebugContext::DebugInfoDisabled |
|
||||
FunctionDebugContext::FunctionWithoutDebugInfo => {
|
||||
// Can't return DebugLoc::None here because intrinsic::trans_intrinsic_call()
|
||||
// relies on debug location to obtain span of the call site.
|
||||
return DebugLoc::ScopeAt(self.scopes[source_info.scope].scope_metadata,
|
||||
source_info.span);
|
||||
return (self.scopes[source_info.scope].scope_metadata, source_info.span);
|
||||
}
|
||||
FunctionDebugContext::RegularContext(_) =>{}
|
||||
}
|
||||
@ -106,13 +126,12 @@ impl<'blk, 'tcx> MirContext<'blk, 'tcx> {
|
||||
// (unless the crate is being compiled with `-Z debug-macros`).
|
||||
if source_info.span.expn_id == NO_EXPANSION ||
|
||||
source_info.span.expn_id == COMMAND_LINE_EXPN ||
|
||||
self.fcx.ccx.sess().opts.debugging_opts.debug_macros {
|
||||
self.ccx.sess().opts.debugging_opts.debug_macros {
|
||||
|
||||
let scope_metadata = self.scope_metadata_for_loc(source_info.scope,
|
||||
source_info.span.lo);
|
||||
DebugLoc::ScopeAt(scope_metadata, source_info.span)
|
||||
let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo);
|
||||
(scope, source_info.span)
|
||||
} else {
|
||||
let cm = self.fcx.ccx.sess().codemap();
|
||||
let cm = self.ccx.sess().codemap();
|
||||
// Walk up the macro expansion chain until we reach a non-expanded span.
|
||||
let mut span = source_info.span;
|
||||
while span.expn_id != NO_EXPANSION && span.expn_id != COMMAND_LINE_EXPN {
|
||||
@ -123,9 +142,9 @@ impl<'blk, 'tcx> MirContext<'blk, 'tcx> {
|
||||
break;
|
||||
}
|
||||
}
|
||||
let scope_metadata = self.scope_metadata_for_loc(source_info.scope, span.lo);
|
||||
let scope = self.scope_metadata_for_loc(source_info.scope, span.lo);
|
||||
// Use span of the outermost call site, while keeping the original lexical scope
|
||||
DebugLoc::ScopeAt(scope_metadata, span)
|
||||
(scope, span)
|
||||
}
|
||||
}
|
||||
|
||||
@ -138,10 +157,8 @@ impl<'blk, 'tcx> MirContext<'blk, 'tcx> {
|
||||
let scope_metadata = self.scopes[scope_id].scope_metadata;
|
||||
if pos < self.scopes[scope_id].file_start_pos ||
|
||||
pos >= self.scopes[scope_id].file_end_pos {
|
||||
let cm = self.fcx.ccx.sess().codemap();
|
||||
debuginfo::extend_scope_to_file(self.fcx.ccx,
|
||||
scope_metadata,
|
||||
&cm.lookup_char_pos(pos).file)
|
||||
let cm = self.ccx.sess().codemap();
|
||||
debuginfo::extend_scope_to_file(self.ccx, scope_metadata, &cm.lookup_char_pos(pos).file)
|
||||
} else {
|
||||
scope_metadata
|
||||
}
|
||||
@ -154,7 +171,7 @@ enum LocalRef<'tcx> {
|
||||
}
|
||||
|
||||
impl<'tcx> LocalRef<'tcx> {
|
||||
fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>,
|
||||
fn new_operand<'a>(ccx: &CrateContext<'a, 'tcx>,
|
||||
ty: ty::Ty<'tcx>) -> LocalRef<'tcx> {
|
||||
if common::type_is_zero_size(ccx, ty) {
|
||||
// Zero-size temporaries aren't always initialized, which
|
||||
@ -180,19 +197,22 @@ impl<'tcx> LocalRef<'tcx> {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
|
||||
let bcx = fcx.init(true).build();
|
||||
let mir = bcx.mir();
|
||||
pub fn trans_mir<'a, 'tcx: 'a>(
|
||||
fcx: &'a FunctionContext<'a, 'tcx>,
|
||||
fn_ty: FnType,
|
||||
mir: &'a Mir<'tcx>,
|
||||
instance: Instance<'tcx>,
|
||||
sig: &ty::FnSig<'tcx>,
|
||||
abi: Abi,
|
||||
) {
|
||||
let debug_context =
|
||||
debuginfo::create_function_debug_context(fcx.ccx, instance, sig, abi, fcx.llfn, mir);
|
||||
let bcx = fcx.get_entry_block();
|
||||
|
||||
// Analyze the temps to determine which must be lvalues
|
||||
// FIXME
|
||||
let (lvalue_locals, cleanup_kinds) = bcx.with_block(|bcx| {
|
||||
(analyze::lvalue_locals(bcx, &mir),
|
||||
analyze::cleanup_kinds(bcx, &mir))
|
||||
});
|
||||
let cleanup_kinds = analyze::cleanup_kinds(&mir);
|
||||
|
||||
// Allocate a `Block` for every basic block
|
||||
let block_bcxs: IndexVec<mir::BasicBlock, Block<'blk,'tcx>> =
|
||||
let block_bcxs: IndexVec<mir::BasicBlock, BasicBlockRef> =
|
||||
mir.basic_blocks().indices().map(|bb| {
|
||||
if bb == mir::START_BLOCK {
|
||||
fcx.new_block("start")
|
||||
@ -202,11 +222,13 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
|
||||
}).collect();
|
||||
|
||||
// Compute debuginfo scopes from MIR scopes.
|
||||
let scopes = debuginfo::create_mir_scopes(fcx);
|
||||
let scopes = debuginfo::create_mir_scopes(fcx, mir, &debug_context);
|
||||
|
||||
let mut mircx = MirContext {
|
||||
mir: Ref::clone(&mir),
|
||||
mir: mir,
|
||||
fcx: fcx,
|
||||
fn_ty: fn_ty,
|
||||
ccx: fcx.ccx,
|
||||
llpersonalityslot: None,
|
||||
blocks: block_bcxs,
|
||||
unreachable_block: None,
|
||||
@ -214,15 +236,22 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
|
||||
landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
|
||||
scopes: scopes,
|
||||
locals: IndexVec::new(),
|
||||
debug_context: debug_context,
|
||||
param_substs: {
|
||||
assert!(!instance.substs.needs_infer());
|
||||
instance.substs
|
||||
},
|
||||
};
|
||||
|
||||
let lvalue_locals = analyze::lvalue_locals(&mircx);
|
||||
|
||||
// Allocate variable and temp allocas
|
||||
mircx.locals = {
|
||||
let args = arg_local_refs(&bcx, &mir, &mircx.scopes, &lvalue_locals);
|
||||
let args = arg_local_refs(&bcx, &mircx, &mircx.scopes, &lvalue_locals);
|
||||
|
||||
let mut allocate_local = |local| {
|
||||
let decl = &mir.local_decls[local];
|
||||
let ty = bcx.monomorphize(&decl.ty);
|
||||
let ty = mircx.monomorphize(&decl.ty);
|
||||
|
||||
if let Some(name) = decl.name {
|
||||
// User variable
|
||||
@ -232,27 +261,21 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
|
||||
|
||||
if !lvalue_locals.contains(local.index()) && !dbg {
|
||||
debug!("alloc: {:?} ({}) -> operand", local, name);
|
||||
return LocalRef::new_operand(bcx.ccx(), ty);
|
||||
return LocalRef::new_operand(bcx.ccx, ty);
|
||||
}
|
||||
|
||||
debug!("alloc: {:?} ({}) -> lvalue", local, name);
|
||||
let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str());
|
||||
if dbg {
|
||||
let dbg_loc = mircx.debug_loc(source_info);
|
||||
if let DebugLoc::ScopeAt(scope, span) = dbg_loc {
|
||||
bcx.with_block(|bcx| {
|
||||
declare_local(bcx, name, ty, scope,
|
||||
VariableAccess::DirectVariable { alloca: lvalue.llval },
|
||||
VariableKind::LocalVariable, span);
|
||||
});
|
||||
} else {
|
||||
panic!("Unexpected");
|
||||
}
|
||||
let (scope, span) = mircx.debug_loc(source_info);
|
||||
declare_local(&bcx, &mircx.debug_context, name, ty, scope,
|
||||
VariableAccess::DirectVariable { alloca: lvalue.llval },
|
||||
VariableKind::LocalVariable, span);
|
||||
}
|
||||
LocalRef::Lvalue(lvalue)
|
||||
} else {
|
||||
// Temporary or return pointer
|
||||
if local == mir::RETURN_POINTER && fcx.fn_ty.ret.is_indirect() {
|
||||
if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() {
|
||||
debug!("alloc: {:?} (return pointer) -> lvalue", local);
|
||||
let llretptr = llvm::get_param(fcx.llfn, 0);
|
||||
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
|
||||
@ -264,7 +287,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
|
||||
// alloca in advance. Instead we wait until we see the
|
||||
// definition and update the operand there.
|
||||
debug!("alloc: {:?} -> operand", local);
|
||||
LocalRef::new_operand(bcx.ccx(), ty)
|
||||
LocalRef::new_operand(bcx.ccx, ty)
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -278,57 +301,61 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
|
||||
|
||||
// Branch to the START block
|
||||
let start_bcx = mircx.blocks[mir::START_BLOCK];
|
||||
bcx.br(start_bcx.llbb);
|
||||
bcx.br(start_bcx);
|
||||
|
||||
// Up until here, IR instructions for this function have explicitly not been annotated with
|
||||
// source code location, so we don't step into call setup code. From here on, source location
|
||||
// emitting should be enabled.
|
||||
debuginfo::start_emitting_source_locations(fcx);
|
||||
debuginfo::start_emitting_source_locations(&mircx.debug_context);
|
||||
|
||||
let funclets: IndexVec<mir::BasicBlock, Option<Funclet>> =
|
||||
mircx.cleanup_kinds.iter_enumerated().map(|(bb, cleanup_kind)| {
|
||||
if let CleanupKind::Funclet = *cleanup_kind {
|
||||
let bcx = mircx.build_block(bb);
|
||||
bcx.set_personality_fn(mircx.ccx.eh_personality());
|
||||
if base::wants_msvc_seh(fcx.ccx.sess()) {
|
||||
return Some(Funclet::new(bcx.cleanup_pad(None, &[])));
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}).collect();
|
||||
|
||||
let rpo = traversal::reverse_postorder(&mir);
|
||||
let mut visited = BitVector::new(mir.basic_blocks().len());
|
||||
|
||||
let mut rpo = traversal::reverse_postorder(&mir);
|
||||
|
||||
// Prepare each block for translation.
|
||||
for (bb, _) in rpo.by_ref() {
|
||||
mircx.init_cpad(bb);
|
||||
}
|
||||
rpo.reset();
|
||||
|
||||
// Translate the body of each block using reverse postorder
|
||||
for (bb, _) in rpo {
|
||||
visited.insert(bb.index());
|
||||
mircx.trans_block(bb);
|
||||
mircx.trans_block(bb, &funclets);
|
||||
}
|
||||
|
||||
// Remove blocks that haven't been visited, or have no
|
||||
// predecessors.
|
||||
for bb in mir.basic_blocks().indices() {
|
||||
let block = mircx.blocks[bb];
|
||||
let block = BasicBlock(block.llbb);
|
||||
// Unreachable block
|
||||
if !visited.contains(bb.index()) {
|
||||
debug!("trans_mir: block {:?} was not visited", bb);
|
||||
block.delete();
|
||||
unsafe {
|
||||
llvm::LLVMDeleteBasicBlock(mircx.blocks[bb]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DebugLoc::None.apply(fcx);
|
||||
fcx.cleanup();
|
||||
}
|
||||
|
||||
/// Produce, for each argument, a `ValueRef` pointing at the
|
||||
/// argument's value. As arguments are lvalues, these are always
|
||||
/// indirect.
|
||||
fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
mir: &mir::Mir<'tcx>,
|
||||
scopes: &IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>,
|
||||
lvalue_locals: &BitVector)
|
||||
-> Vec<LocalRef<'tcx>> {
|
||||
fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
mircx: &MirContext<'a, 'tcx>,
|
||||
scopes: &IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>,
|
||||
lvalue_locals: &BitVector)
|
||||
-> Vec<LocalRef<'tcx>> {
|
||||
let mir = mircx.mir;
|
||||
let fcx = bcx.fcx();
|
||||
let tcx = bcx.tcx();
|
||||
let mut idx = 0;
|
||||
let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize;
|
||||
let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize;
|
||||
|
||||
// Get the argument scope, if it exists and if we need it.
|
||||
let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE];
|
||||
@ -340,7 +367,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
|
||||
mir.args_iter().enumerate().map(|(arg_index, local)| {
|
||||
let arg_decl = &mir.local_decls[local];
|
||||
let arg_ty = bcx.monomorphize(&arg_decl.ty);
|
||||
let arg_ty = mircx.monomorphize(&arg_decl.ty);
|
||||
|
||||
if Some(local) == mir.spread_arg {
|
||||
// This argument (e.g. the last argument in the "rust-call" ABI)
|
||||
@ -353,22 +380,18 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
_ => bug!("spread argument isn't a tuple?!")
|
||||
};
|
||||
|
||||
let lltemp = bcx.with_block(|bcx| {
|
||||
base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
|
||||
});
|
||||
let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index));
|
||||
for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
|
||||
let dst = bcx.struct_gep(lltemp, i);
|
||||
let arg = &fcx.fn_ty.args[idx];
|
||||
let arg = &mircx.fn_ty.args[idx];
|
||||
idx += 1;
|
||||
if common::type_is_fat_ptr(tcx, tupled_arg_ty) {
|
||||
if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) {
|
||||
// We pass fat pointers as two words, but inside the tuple
|
||||
// they are the two sub-fields of a single aggregate field.
|
||||
let meta = &fcx.fn_ty.args[idx];
|
||||
let meta = &mircx.fn_ty.args[idx];
|
||||
idx += 1;
|
||||
arg.store_fn_arg(bcx, &mut llarg_idx,
|
||||
base::get_dataptr_builder(bcx, dst));
|
||||
meta.store_fn_arg(bcx, &mut llarg_idx,
|
||||
base::get_meta_builder(bcx, dst));
|
||||
arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, dst));
|
||||
meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, dst));
|
||||
} else {
|
||||
arg.store_fn_arg(bcx, &mut llarg_idx, dst);
|
||||
}
|
||||
@ -376,20 +399,25 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
|
||||
// Now that we have one alloca that contains the aggregate value,
|
||||
// we can create one debuginfo entry for the argument.
|
||||
bcx.with_block(|bcx| arg_scope.map(|scope| {
|
||||
arg_scope.map(|scope| {
|
||||
let variable_access = VariableAccess::DirectVariable {
|
||||
alloca: lltemp
|
||||
};
|
||||
declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()),
|
||||
arg_ty, scope, variable_access,
|
||||
VariableKind::ArgumentVariable(arg_index + 1),
|
||||
bcx.fcx().span.unwrap_or(DUMMY_SP));
|
||||
}));
|
||||
declare_local(
|
||||
bcx,
|
||||
&mircx.debug_context,
|
||||
arg_decl.name.unwrap_or(keywords::Invalid.name()),
|
||||
arg_ty, scope,
|
||||
variable_access,
|
||||
VariableKind::ArgumentVariable(arg_index + 1),
|
||||
DUMMY_SP
|
||||
);
|
||||
});
|
||||
|
||||
return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty)));
|
||||
}
|
||||
|
||||
let arg = &fcx.fn_ty.args[idx];
|
||||
let arg = &mircx.fn_ty.args[idx];
|
||||
idx += 1;
|
||||
let llval = if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo {
|
||||
// Don't copy an indirect argument to an alloca, the caller
|
||||
@ -406,7 +434,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
!arg.is_indirect() && arg.cast.is_none() &&
|
||||
arg_scope.is_none() {
|
||||
if arg.is_ignore() {
|
||||
return LocalRef::new_operand(bcx.ccx(), arg_ty);
|
||||
return LocalRef::new_operand(bcx.ccx, arg_ty);
|
||||
}
|
||||
|
||||
// We don't have to cast or keep the argument in the alloca.
|
||||
@ -417,8 +445,8 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
}
|
||||
let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
|
||||
llarg_idx += 1;
|
||||
let val = if common::type_is_fat_ptr(tcx, arg_ty) {
|
||||
let meta = &fcx.fn_ty.args[idx];
|
||||
let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
|
||||
let meta = &mircx.fn_ty.args[idx];
|
||||
idx += 1;
|
||||
assert_eq!((meta.cast, meta.pad), (None, None));
|
||||
let llmeta = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
|
||||
@ -433,19 +461,15 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
};
|
||||
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
|
||||
} else {
|
||||
let lltemp = bcx.with_block(|bcx| {
|
||||
base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
|
||||
});
|
||||
if common::type_is_fat_ptr(tcx, arg_ty) {
|
||||
let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index));
|
||||
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
|
||||
// we pass fat pointers as two words, but we want to
|
||||
// represent them internally as a pointer to two words,
|
||||
// so make an alloca to store them in.
|
||||
let meta = &fcx.fn_ty.args[idx];
|
||||
let meta = &mircx.fn_ty.args[idx];
|
||||
idx += 1;
|
||||
arg.store_fn_arg(bcx, &mut llarg_idx,
|
||||
base::get_dataptr_builder(bcx, lltemp));
|
||||
meta.store_fn_arg(bcx, &mut llarg_idx,
|
||||
base::get_meta_builder(bcx, lltemp));
|
||||
arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp));
|
||||
meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp));
|
||||
} else {
|
||||
// otherwise, arg is passed by value, so make a
|
||||
// temporary and store it there
|
||||
@ -453,13 +477,19 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
}
|
||||
lltemp
|
||||
};
|
||||
bcx.with_block(|bcx| arg_scope.map(|scope| {
|
||||
arg_scope.map(|scope| {
|
||||
// Is this a regular argument?
|
||||
if arg_index > 0 || mir.upvar_decls.is_empty() {
|
||||
declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty,
|
||||
scope, VariableAccess::DirectVariable { alloca: llval },
|
||||
VariableKind::ArgumentVariable(arg_index + 1),
|
||||
bcx.fcx().span.unwrap_or(DUMMY_SP));
|
||||
declare_local(
|
||||
bcx,
|
||||
&mircx.debug_context,
|
||||
arg_decl.name.unwrap_or(keywords::Invalid.name()),
|
||||
arg_ty,
|
||||
scope,
|
||||
VariableAccess::DirectVariable { alloca: llval },
|
||||
VariableKind::ArgumentVariable(arg_index + 1),
|
||||
DUMMY_SP
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -483,17 +513,14 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
// doesn't actually strip the offset when splitting the closure
|
||||
// environment into its components so it ends up out of bounds.
|
||||
let env_ptr = if !env_ref {
|
||||
use base::*;
|
||||
use build::*;
|
||||
use common::*;
|
||||
let alloc = alloca(bcx, val_ty(llval), "__debuginfo_env_ptr");
|
||||
Store(bcx, llval, alloc);
|
||||
let alloc = bcx.fcx().alloca(common::val_ty(llval), "__debuginfo_env_ptr");
|
||||
bcx.store(llval, alloc);
|
||||
alloc
|
||||
} else {
|
||||
llval
|
||||
};
|
||||
|
||||
let layout = bcx.ccx().layout_of(closure_ty);
|
||||
let layout = bcx.ccx.layout_of(closure_ty);
|
||||
let offsets = match *layout {
|
||||
layout::Univariant { ref variant, .. } => &variant.offsets[..],
|
||||
_ => bug!("Closures are only supposed to be Univariant")
|
||||
@ -502,7 +529,6 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
|
||||
let byte_offset_of_var_in_env = offsets[i].bytes();
|
||||
|
||||
|
||||
let ops = unsafe {
|
||||
[llvm::LLVMRustDIBuilderCreateOpDeref(),
|
||||
llvm::LLVMRustDIBuilderCreateOpPlus(),
|
||||
@ -527,11 +553,18 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
alloca: env_ptr,
|
||||
address_operations: &ops
|
||||
};
|
||||
declare_local(bcx, decl.debug_name, ty, scope, variable_access,
|
||||
VariableKind::CapturedVariable,
|
||||
bcx.fcx().span.unwrap_or(DUMMY_SP));
|
||||
declare_local(
|
||||
bcx,
|
||||
&mircx.debug_context,
|
||||
decl.debug_name,
|
||||
ty,
|
||||
scope,
|
||||
variable_access,
|
||||
VariableKind::CapturedVariable,
|
||||
DUMMY_SP
|
||||
);
|
||||
}
|
||||
}));
|
||||
});
|
||||
LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty)))
|
||||
}).collect()
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ use rustc::mir;
|
||||
use rustc_data_structures::indexed_vec::Idx;
|
||||
|
||||
use base;
|
||||
use common::{self, Block, BlockAndBuilder};
|
||||
use common::{self, BlockAndBuilder};
|
||||
use value::Value;
|
||||
use type_of;
|
||||
use type_::Type;
|
||||
@ -73,7 +73,7 @@ impl<'tcx> fmt::Debug for OperandRef<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'bcx, 'tcx> OperandRef<'tcx> {
|
||||
impl<'a, 'tcx> OperandRef<'tcx> {
|
||||
/// Asserts that this operand refers to a scalar and returns
|
||||
/// a reference to its value.
|
||||
pub fn immediate(self) -> ValueRef {
|
||||
@ -85,18 +85,18 @@ impl<'bcx, 'tcx> OperandRef<'tcx> {
|
||||
|
||||
/// If this operand is a Pair, we return an
|
||||
/// Immediate aggregate with the two values.
|
||||
pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>)
|
||||
pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>)
|
||||
-> OperandRef<'tcx> {
|
||||
if let OperandValue::Pair(a, b) = self.val {
|
||||
// Reconstruct the immediate aggregate.
|
||||
let llty = type_of::type_of(bcx.ccx(), self.ty);
|
||||
let llty = type_of::type_of(bcx.ccx, self.ty);
|
||||
let mut llpair = common::C_undef(llty);
|
||||
let elems = [a, b];
|
||||
for i in 0..2 {
|
||||
let mut elem = elems[i];
|
||||
// Extend boolean i1's to i8.
|
||||
if common::val_ty(elem) == Type::i1(bcx.ccx()) {
|
||||
elem = bcx.zext(elem, Type::i8(bcx.ccx()));
|
||||
if common::val_ty(elem) == Type::i1(bcx.ccx) {
|
||||
elem = bcx.zext(elem, Type::i8(bcx.ccx));
|
||||
}
|
||||
llpair = bcx.insert_value(llpair, elem, i);
|
||||
}
|
||||
@ -107,23 +107,23 @@ impl<'bcx, 'tcx> OperandRef<'tcx> {
|
||||
|
||||
/// If this operand is a pair in an Immediate,
|
||||
/// we return a Pair with the two halves.
|
||||
pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>)
|
||||
pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>)
|
||||
-> OperandRef<'tcx> {
|
||||
if let OperandValue::Immediate(llval) = self.val {
|
||||
// Deconstruct the immediate aggregate.
|
||||
if common::type_is_imm_pair(bcx.ccx(), self.ty) {
|
||||
if common::type_is_imm_pair(bcx.ccx, self.ty) {
|
||||
debug!("Operand::unpack_if_pair: unpacking {:?}", self);
|
||||
|
||||
let mut a = bcx.extract_value(llval, 0);
|
||||
let mut b = bcx.extract_value(llval, 1);
|
||||
|
||||
let pair_fields = common::type_pair_fields(bcx.ccx(), self.ty);
|
||||
let pair_fields = common::type_pair_fields(bcx.ccx, self.ty);
|
||||
if let Some([a_ty, b_ty]) = pair_fields {
|
||||
if a_ty.is_bool() {
|
||||
a = bcx.trunc(a, Type::i1(bcx.ccx()));
|
||||
a = bcx.trunc(a, Type::i1(bcx.ccx));
|
||||
}
|
||||
if b_ty.is_bool() {
|
||||
b = bcx.trunc(b, Type::i1(bcx.ccx()));
|
||||
b = bcx.trunc(b, Type::i1(bcx.ccx));
|
||||
}
|
||||
}
|
||||
|
||||
@ -134,29 +134,29 @@ impl<'bcx, 'tcx> OperandRef<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
impl<'a, 'tcx> MirContext<'a, 'tcx> {
|
||||
pub fn trans_load(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
llval: ValueRef,
|
||||
ty: Ty<'tcx>)
|
||||
-> OperandRef<'tcx>
|
||||
{
|
||||
debug!("trans_load: {:?} @ {:?}", Value(llval), ty);
|
||||
|
||||
let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
|
||||
let (lldata, llextra) = base::load_fat_ptr_builder(bcx, llval, ty);
|
||||
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
|
||||
let (lldata, llextra) = base::load_fat_ptr(bcx, llval, ty);
|
||||
OperandValue::Pair(lldata, llextra)
|
||||
} else if common::type_is_imm_pair(bcx.ccx(), ty) {
|
||||
let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx(), ty).unwrap();
|
||||
} else if common::type_is_imm_pair(bcx.ccx, ty) {
|
||||
let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap();
|
||||
let a_ptr = bcx.struct_gep(llval, 0);
|
||||
let b_ptr = bcx.struct_gep(llval, 1);
|
||||
|
||||
OperandValue::Pair(
|
||||
base::load_ty_builder(bcx, a_ptr, a_ty),
|
||||
base::load_ty_builder(bcx, b_ptr, b_ty)
|
||||
base::load_ty(bcx, a_ptr, a_ty),
|
||||
base::load_ty(bcx, b_ptr, b_ty)
|
||||
)
|
||||
} else if common::type_is_immediate(bcx.ccx(), ty) {
|
||||
OperandValue::Immediate(base::load_ty_builder(bcx, llval, ty))
|
||||
} else if common::type_is_immediate(bcx.ccx, ty) {
|
||||
OperandValue::Immediate(base::load_ty(bcx, llval, ty))
|
||||
} else {
|
||||
OperandValue::Ref(llval)
|
||||
};
|
||||
@ -165,7 +165,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn trans_consume(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
lvalue: &mir::Lvalue<'tcx>)
|
||||
-> OperandRef<'tcx>
|
||||
{
|
||||
@ -197,7 +197,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
let llval = [a, b][f.index()];
|
||||
let op = OperandRef {
|
||||
val: OperandValue::Immediate(llval),
|
||||
ty: bcx.monomorphize(&ty)
|
||||
ty: self.monomorphize(&ty)
|
||||
};
|
||||
|
||||
// Handle nested pairs.
|
||||
@ -217,7 +217,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn trans_operand(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
operand: &mir::Operand<'tcx>)
|
||||
-> OperandRef<'tcx>
|
||||
{
|
||||
@ -230,7 +230,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
mir::Operand::Constant(ref constant) => {
|
||||
let val = self.trans_constant(bcx, constant);
|
||||
let operand = val.to_operand(bcx.ccx());
|
||||
let operand = val.to_operand(bcx.ccx);
|
||||
if let OperandValue::Ref(ptr) = operand.val {
|
||||
// If this is a OperandValue::Ref to an immediate constant, load it.
|
||||
self.trans_load(bcx, ptr, operand.ty)
|
||||
@ -242,33 +242,23 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn store_operand(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
lldest: ValueRef,
|
||||
operand: OperandRef<'tcx>)
|
||||
{
|
||||
debug!("store_operand: operand={:?} lldest={:?}", operand, lldest);
|
||||
bcx.with_block(|bcx| self.store_operand_direct(bcx, lldest, operand))
|
||||
}
|
||||
|
||||
pub fn store_operand_direct(&mut self,
|
||||
bcx: Block<'bcx, 'tcx>,
|
||||
lldest: ValueRef,
|
||||
operand: OperandRef<'tcx>)
|
||||
{
|
||||
operand: OperandRef<'tcx>) {
|
||||
debug!("store_operand: operand={:?}", operand);
|
||||
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
|
||||
// value is through `undef`, and store itself is useless.
|
||||
if common::type_is_zero_size(bcx.ccx(), operand.ty) {
|
||||
if common::type_is_zero_size(bcx.ccx, operand.ty) {
|
||||
return;
|
||||
}
|
||||
match operand.val {
|
||||
OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty),
|
||||
OperandValue::Immediate(s) => base::store_ty(bcx, s, lldest, operand.ty),
|
||||
OperandValue::Pair(a, b) => {
|
||||
use build::*;
|
||||
let a = base::from_immediate(bcx, a);
|
||||
let b = base::from_immediate(bcx, b);
|
||||
Store(bcx, a, StructGEP(bcx, lldest, 0));
|
||||
Store(bcx, b, StructGEP(bcx, lldest, 1));
|
||||
bcx.store(a, bcx.struct_gep(lldest, 0));
|
||||
bcx.store(b, bcx.struct_gep(lldest, 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -13,13 +13,13 @@ use rustc::ty::{self, Ty};
|
||||
use rustc::ty::cast::{CastTy, IntTy};
|
||||
use rustc::ty::layout::Layout;
|
||||
use rustc::mir;
|
||||
use middle::lang_items::ExchangeMallocFnLangItem;
|
||||
|
||||
use asm;
|
||||
use base;
|
||||
use callee::Callee;
|
||||
use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result};
|
||||
use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder};
|
||||
use common::{C_integral};
|
||||
use debuginfo::DebugLoc;
|
||||
use adt;
|
||||
use machine;
|
||||
use type_::Type;
|
||||
@ -33,13 +33,12 @@ use super::constant::const_scalar_checked_binop;
|
||||
use super::operand::{OperandRef, OperandValue};
|
||||
use super::lvalue::{LvalueRef};
|
||||
|
||||
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
impl<'a, 'tcx> MirContext<'a, 'tcx> {
|
||||
pub fn trans_rvalue(&mut self,
|
||||
bcx: BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: BlockAndBuilder<'a, 'tcx>,
|
||||
dest: LvalueRef<'tcx>,
|
||||
rvalue: &mir::Rvalue<'tcx>,
|
||||
debug_loc: DebugLoc)
|
||||
-> BlockAndBuilder<'bcx, 'tcx>
|
||||
rvalue: &mir::Rvalue<'tcx>)
|
||||
-> BlockAndBuilder<'a, 'tcx>
|
||||
{
|
||||
debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
|
||||
Value(dest.llval), rvalue);
|
||||
@ -54,12 +53,12 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
|
||||
let cast_ty = bcx.monomorphize(&cast_ty);
|
||||
let cast_ty = self.monomorphize(&cast_ty);
|
||||
|
||||
if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
|
||||
if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
|
||||
// into-coerce of a thin pointer to a fat pointer - just
|
||||
// use the operand path.
|
||||
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
|
||||
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
|
||||
self.store_operand(&bcx, dest.llval, temp);
|
||||
return bcx;
|
||||
}
|
||||
@ -70,71 +69,57 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// so the (generic) MIR may not be able to expand it.
|
||||
let operand = self.trans_operand(&bcx, source);
|
||||
let operand = operand.pack_if_pair(&bcx);
|
||||
bcx.with_block(|bcx| {
|
||||
match operand.val {
|
||||
OperandValue::Pair(..) => bug!(),
|
||||
OperandValue::Immediate(llval) => {
|
||||
// unsize from an immediate structure. We don't
|
||||
// really need a temporary alloca here, but
|
||||
// avoiding it would require us to have
|
||||
// `coerce_unsized_into` use extractvalue to
|
||||
// index into the struct, and this case isn't
|
||||
// important enough for it.
|
||||
debug!("trans_rvalue: creating ugly alloca");
|
||||
let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
|
||||
base::store_ty(bcx, llval, lltemp, operand.ty);
|
||||
base::coerce_unsized_into(bcx,
|
||||
lltemp, operand.ty,
|
||||
dest.llval, cast_ty);
|
||||
}
|
||||
OperandValue::Ref(llref) => {
|
||||
base::coerce_unsized_into(bcx,
|
||||
llref, operand.ty,
|
||||
dest.llval, cast_ty);
|
||||
}
|
||||
let llref = match operand.val {
|
||||
OperandValue::Pair(..) => bug!(),
|
||||
OperandValue::Immediate(llval) => {
|
||||
// unsize from an immediate structure. We don't
|
||||
// really need a temporary alloca here, but
|
||||
// avoiding it would require us to have
|
||||
// `coerce_unsized_into` use extractvalue to
|
||||
// index into the struct, and this case isn't
|
||||
// important enough for it.
|
||||
debug!("trans_rvalue: creating ugly alloca");
|
||||
let lltemp = base::alloc_ty(&bcx, operand.ty, "__unsize_temp");
|
||||
base::store_ty(&bcx, llval, lltemp, operand.ty);
|
||||
lltemp
|
||||
}
|
||||
});
|
||||
OperandValue::Ref(llref) => llref
|
||||
};
|
||||
base::coerce_unsized_into(&bcx, llref, operand.ty, dest.llval, cast_ty);
|
||||
bcx
|
||||
}
|
||||
|
||||
mir::Rvalue::Repeat(ref elem, ref count) => {
|
||||
let tr_elem = self.trans_operand(&bcx, elem);
|
||||
let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
|
||||
let size = C_uint(bcx.ccx(), size);
|
||||
let base = base::get_dataptr_builder(&bcx, dest.llval);
|
||||
let bcx = bcx.map_block(|block| {
|
||||
tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| {
|
||||
self.store_operand_direct(block, llslot, tr_elem);
|
||||
block
|
||||
})
|
||||
});
|
||||
bcx
|
||||
let size = C_uint(bcx.ccx, size);
|
||||
let base = base::get_dataptr(&bcx, dest.llval);
|
||||
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
|
||||
self.store_operand(bcx, llslot, tr_elem);
|
||||
})
|
||||
}
|
||||
|
||||
mir::Rvalue::Aggregate(ref kind, ref operands) => {
|
||||
match *kind {
|
||||
mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
|
||||
let disr = Disr::from(adt_def.variants[variant_index].disr_val);
|
||||
bcx.with_block(|bcx| {
|
||||
adt::trans_set_discr(bcx,
|
||||
dest.ty.to_ty(bcx.tcx()), dest.llval, Disr::from(disr));
|
||||
});
|
||||
let dest_ty = dest.ty.to_ty(bcx.tcx());
|
||||
adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr));
|
||||
for (i, operand) in operands.iter().enumerate() {
|
||||
let op = self.trans_operand(&bcx, operand);
|
||||
// Do not generate stores and GEPis for zero-sized fields.
|
||||
if !common::type_is_zero_size(bcx.ccx(), op.ty) {
|
||||
if !common::type_is_zero_size(bcx.ccx, op.ty) {
|
||||
let val = adt::MaybeSizedValue::sized(dest.llval);
|
||||
let field_index = active_field_index.unwrap_or(i);
|
||||
let lldest_i = adt::trans_field_ptr_builder(&bcx,
|
||||
dest.ty.to_ty(bcx.tcx()),
|
||||
val, disr, field_index);
|
||||
let lldest_i = adt::trans_field_ptr(&bcx, dest_ty, val, disr,
|
||||
field_index);
|
||||
self.store_operand(&bcx, lldest_i, op);
|
||||
}
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
// If this is a tuple or closure, we need to translate GEP indices.
|
||||
let layout = bcx.ccx().layout_of(dest.ty.to_ty(bcx.tcx()));
|
||||
let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
|
||||
let translation = if let Layout::Univariant { ref variant, .. } = *layout {
|
||||
Some(&variant.memory_index)
|
||||
} else {
|
||||
@ -143,7 +128,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
for (i, operand) in operands.iter().enumerate() {
|
||||
let op = self.trans_operand(&bcx, operand);
|
||||
// Do not generate stores and GEPis for zero-sized fields.
|
||||
if !common::type_is_zero_size(bcx.ccx(), op.ty) {
|
||||
if !common::type_is_zero_size(bcx.ccx, op.ty) {
|
||||
// Note: perhaps this should be StructGep, but
|
||||
// note that in some cases the values here will
|
||||
// not be structs but arrays.
|
||||
@ -171,16 +156,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
self.trans_operand(&bcx, input).immediate()
|
||||
}).collect();
|
||||
|
||||
bcx.with_block(|bcx| {
|
||||
asm::trans_inline_asm(bcx, asm, outputs, input_vals);
|
||||
});
|
||||
|
||||
asm::trans_inline_asm(&bcx, asm, outputs, input_vals);
|
||||
bcx
|
||||
}
|
||||
|
||||
_ => {
|
||||
assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue));
|
||||
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
|
||||
assert!(rvalue_creates_operand(rvalue));
|
||||
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
|
||||
self.store_operand(&bcx, dest.llval, temp);
|
||||
bcx
|
||||
}
|
||||
@ -188,27 +170,25 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn trans_rvalue_operand(&mut self,
|
||||
bcx: BlockAndBuilder<'bcx, 'tcx>,
|
||||
rvalue: &mir::Rvalue<'tcx>,
|
||||
debug_loc: DebugLoc)
|
||||
-> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
|
||||
bcx: BlockAndBuilder<'a, 'tcx>,
|
||||
rvalue: &mir::Rvalue<'tcx>)
|
||||
-> (BlockAndBuilder<'a, 'tcx>, OperandRef<'tcx>)
|
||||
{
|
||||
assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue),
|
||||
"cannot trans {:?} to operand", rvalue);
|
||||
assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
|
||||
|
||||
match *rvalue {
|
||||
mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
|
||||
let operand = self.trans_operand(&bcx, source);
|
||||
debug!("cast operand is {:?}", operand);
|
||||
let cast_ty = bcx.monomorphize(&cast_ty);
|
||||
let cast_ty = self.monomorphize(&cast_ty);
|
||||
|
||||
let val = match *kind {
|
||||
mir::CastKind::ReifyFnPointer => {
|
||||
match operand.ty.sty {
|
||||
ty::TyFnDef(def_id, substs, _) => {
|
||||
OperandValue::Immediate(
|
||||
Callee::def(bcx.ccx(), def_id, substs)
|
||||
.reify(bcx.ccx()))
|
||||
Callee::def(bcx.ccx, def_id, substs)
|
||||
.reify(bcx.ccx))
|
||||
}
|
||||
_ => {
|
||||
bug!("{} cannot be reified to a fn ptr", operand.ty)
|
||||
@ -222,7 +202,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
mir::CastKind::Unsize => {
|
||||
// unsize targets other than to a fat pointer currently
|
||||
// can't be operands.
|
||||
assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
|
||||
assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty));
|
||||
|
||||
match operand.val {
|
||||
OperandValue::Pair(lldata, llextra) => {
|
||||
@ -232,16 +212,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// &'a fmt::Debug+Send => &'a fmt::Debug,
|
||||
// So we need to pointercast the base to ensure
|
||||
// the types match up.
|
||||
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty);
|
||||
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty);
|
||||
let lldata = bcx.pointercast(lldata, llcast_ty);
|
||||
OperandValue::Pair(lldata, llextra)
|
||||
}
|
||||
OperandValue::Immediate(lldata) => {
|
||||
// "standard" unsize
|
||||
let (lldata, llextra) = bcx.with_block(|bcx| {
|
||||
base::unsize_thin_ptr(bcx, lldata,
|
||||
operand.ty, cast_ty)
|
||||
});
|
||||
let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata,
|
||||
operand.ty, cast_ty);
|
||||
OperandValue::Pair(lldata, llextra)
|
||||
}
|
||||
OperandValue::Ref(_) => {
|
||||
@ -250,11 +228,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
}
|
||||
}
|
||||
mir::CastKind::Misc if common::type_is_fat_ptr(bcx.tcx(), operand.ty) => {
|
||||
let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
|
||||
let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
|
||||
mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => {
|
||||
let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty);
|
||||
let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty);
|
||||
if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
|
||||
if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
|
||||
if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
|
||||
let ll_cft = ll_cast_ty.field_types();
|
||||
let ll_fft = ll_from_ty.field_types();
|
||||
let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
|
||||
@ -271,19 +249,17 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
}
|
||||
mir::CastKind::Misc => {
|
||||
debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
|
||||
debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty));
|
||||
let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
|
||||
let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
|
||||
let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
|
||||
let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
|
||||
let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty);
|
||||
let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty);
|
||||
let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
|
||||
let l = bcx.ccx().layout_of(operand.ty);
|
||||
let l = bcx.ccx.layout_of(operand.ty);
|
||||
let discr = match operand.val {
|
||||
OperandValue::Immediate(llval) => llval,
|
||||
OperandValue::Ref(llptr) => {
|
||||
bcx.with_block(|bcx| {
|
||||
adt::trans_get_discr(bcx, operand.ty, llptr, None, true)
|
||||
})
|
||||
adt::trans_get_discr(&bcx, operand.ty, llptr, None, true)
|
||||
}
|
||||
OperandValue::Pair(..) => bug!("Unexpected Pair operand")
|
||||
};
|
||||
@ -376,7 +352,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
// Note: lvalues are indirect, so storing the `llval` into the
|
||||
// destination effectively creates a reference.
|
||||
let operand = if common::type_is_sized(bcx.tcx(), ty) {
|
||||
let operand = if bcx.ccx.shared().type_is_sized(ty) {
|
||||
OperandRef {
|
||||
val: OperandValue::Immediate(tr_lvalue.llval),
|
||||
ty: ref_ty,
|
||||
@ -394,7 +370,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
mir::Rvalue::Len(ref lvalue) => {
|
||||
let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
|
||||
let operand = OperandRef {
|
||||
val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx())),
|
||||
val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)),
|
||||
ty: bcx.tcx().types.usize,
|
||||
};
|
||||
(bcx, operand)
|
||||
@ -403,7 +379,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
|
||||
let lhs = self.trans_operand(&bcx, lhs);
|
||||
let rhs = self.trans_operand(&bcx, rhs);
|
||||
let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
|
||||
let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) {
|
||||
match (lhs.val, rhs.val) {
|
||||
(OperandValue::Pair(lhs_addr, lhs_extra),
|
||||
OperandValue::Pair(rhs_addr, rhs_extra)) => {
|
||||
@ -461,26 +437,27 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
mir::Rvalue::Box(content_ty) => {
|
||||
let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
|
||||
let llty = type_of::type_of(bcx.ccx(), content_ty);
|
||||
let llsize = machine::llsize_of(bcx.ccx(), llty);
|
||||
let align = type_of::align_of(bcx.ccx(), content_ty);
|
||||
let llalign = C_uint(bcx.ccx(), align);
|
||||
let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
|
||||
let llty = type_of::type_of(bcx.ccx, content_ty);
|
||||
let llsize = machine::llsize_of(bcx.ccx, llty);
|
||||
let align = type_of::align_of(bcx.ccx, content_ty);
|
||||
let llalign = C_uint(bcx.ccx, align);
|
||||
let llty_ptr = llty.ptr_to();
|
||||
let box_ty = bcx.tcx().mk_box(content_ty);
|
||||
let mut llval = None;
|
||||
let bcx = bcx.map_block(|bcx| {
|
||||
let Result { bcx, val } = base::malloc_raw_dyn(bcx,
|
||||
llty_ptr,
|
||||
box_ty,
|
||||
llsize,
|
||||
llalign,
|
||||
debug_loc);
|
||||
llval = Some(val);
|
||||
bcx
|
||||
});
|
||||
|
||||
// Allocate space:
|
||||
let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
|
||||
Ok(id) => id,
|
||||
Err(s) => {
|
||||
bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
|
||||
}
|
||||
};
|
||||
let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[]))
|
||||
.reify(bcx.ccx);
|
||||
let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
|
||||
|
||||
let operand = OperandRef {
|
||||
val: OperandValue::Immediate(llval.unwrap()),
|
||||
val: OperandValue::Immediate(val),
|
||||
ty: box_ty,
|
||||
};
|
||||
(bcx, operand)
|
||||
@ -500,7 +477,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn trans_scalar_binop(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
op: mir::BinOp,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
@ -542,26 +519,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
mir::BinOp::BitOr => bcx.or(lhs, rhs),
|
||||
mir::BinOp::BitAnd => bcx.and(lhs, rhs),
|
||||
mir::BinOp::BitXor => bcx.xor(lhs, rhs),
|
||||
mir::BinOp::Shl => {
|
||||
bcx.with_block(|bcx| {
|
||||
common::build_unchecked_lshift(bcx,
|
||||
lhs,
|
||||
rhs,
|
||||
DebugLoc::None)
|
||||
})
|
||||
}
|
||||
mir::BinOp::Shr => {
|
||||
bcx.with_block(|bcx| {
|
||||
common::build_unchecked_rshift(bcx,
|
||||
input_ty,
|
||||
lhs,
|
||||
rhs,
|
||||
DebugLoc::None)
|
||||
})
|
||||
}
|
||||
mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs),
|
||||
mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs),
|
||||
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
|
||||
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
|
||||
C_bool(bcx.ccx(), match op {
|
||||
C_bool(bcx.ccx, match op {
|
||||
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
|
||||
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
|
||||
_ => unreachable!()
|
||||
@ -575,8 +537,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
let (lhs, rhs) = if is_bool {
|
||||
// FIXME(#36856) -- extend the bools into `i8` because
|
||||
// LLVM's i1 comparisons are broken.
|
||||
(bcx.zext(lhs, Type::i8(bcx.ccx())),
|
||||
bcx.zext(rhs, Type::i8(bcx.ccx())))
|
||||
(bcx.zext(lhs, Type::i8(bcx.ccx)),
|
||||
bcx.zext(rhs, Type::i8(bcx.ccx)))
|
||||
} else {
|
||||
(lhs, rhs)
|
||||
};
|
||||
@ -590,7 +552,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn trans_fat_ptr_binop(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
op: mir::BinOp,
|
||||
lhs_addr: ValueRef,
|
||||
lhs_extra: ValueRef,
|
||||
@ -637,7 +599,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn trans_scalar_checked_binop(&mut self,
|
||||
bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
op: mir::BinOp,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
@ -646,9 +608,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// with #[rustc_inherit_overflow_checks] and inlined from
|
||||
// another crate (mostly core::num generic/#[inline] fns),
|
||||
// while the current crate doesn't use overflow checks.
|
||||
if !bcx.ccx().check_overflow() {
|
||||
if !bcx.ccx.check_overflow() {
|
||||
let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
|
||||
return OperandValue::Pair(val, C_bool(bcx.ccx(), false));
|
||||
return OperandValue::Pair(val, C_bool(bcx.ccx, false));
|
||||
}
|
||||
|
||||
// First try performing the operation on constants, which
|
||||
@ -656,7 +618,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// This is necessary to determine when an overflow Assert
|
||||
// will always panic at runtime, and produce a warning.
|
||||
if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
|
||||
return OperandValue::Pair(val, C_bool(bcx.ccx(), of));
|
||||
return OperandValue::Pair(val, C_bool(bcx.ccx, of));
|
||||
}
|
||||
|
||||
let (val, of) = match op {
|
||||
@ -677,9 +639,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
mir::BinOp::Shl | mir::BinOp::Shr => {
|
||||
let lhs_llty = val_ty(lhs);
|
||||
let rhs_llty = val_ty(rhs);
|
||||
let invert_mask = bcx.with_block(|bcx| {
|
||||
common::shift_mask_val(bcx, lhs_llty, rhs_llty, true)
|
||||
});
|
||||
let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true);
|
||||
let outer_bits = bcx.and(rhs, invert_mask);
|
||||
|
||||
let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
|
||||
@ -696,9 +656,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>,
|
||||
_bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
rvalue: &mir::Rvalue<'tcx>) -> bool {
|
||||
pub fn rvalue_creates_operand(rvalue: &mir::Rvalue) -> bool {
|
||||
match *rvalue {
|
||||
mir::Rvalue::Ref(..) |
|
||||
mir::Rvalue::Len(..) |
|
||||
@ -789,5 +747,5 @@ fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> Val
|
||||
},
|
||||
};
|
||||
|
||||
bcx.ccx().get_intrinsic(&name)
|
||||
bcx.ccx.get_intrinsic(&name)
|
||||
}
|
||||
|
@ -18,57 +18,52 @@ use super::LocalRef;
|
||||
use super::super::adt;
|
||||
use super::super::disr::Disr;
|
||||
|
||||
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
impl<'a, 'tcx> MirContext<'a, 'tcx> {
|
||||
pub fn trans_statement(&mut self,
|
||||
bcx: BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: BlockAndBuilder<'a, 'tcx>,
|
||||
statement: &mir::Statement<'tcx>)
|
||||
-> BlockAndBuilder<'bcx, 'tcx> {
|
||||
-> BlockAndBuilder<'a, 'tcx> {
|
||||
debug!("trans_statement(statement={:?})", statement);
|
||||
|
||||
let debug_loc = self.debug_loc(statement.source_info);
|
||||
debug_loc.apply_to_bcx(&bcx);
|
||||
debug_loc.apply(bcx.fcx());
|
||||
self.set_debug_loc(&bcx, statement.source_info);
|
||||
match statement.kind {
|
||||
mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
|
||||
if let mir::Lvalue::Local(index) = *lvalue {
|
||||
match self.locals[index] {
|
||||
LocalRef::Lvalue(tr_dest) => {
|
||||
self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc)
|
||||
self.trans_rvalue(bcx, tr_dest, rvalue)
|
||||
}
|
||||
LocalRef::Operand(None) => {
|
||||
let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue,
|
||||
debug_loc);
|
||||
let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue);
|
||||
self.locals[index] = LocalRef::Operand(Some(operand));
|
||||
bcx
|
||||
}
|
||||
LocalRef::Operand(Some(_)) => {
|
||||
let ty = self.monomorphized_lvalue_ty(lvalue);
|
||||
|
||||
if !common::type_is_zero_size(bcx.ccx(), ty) {
|
||||
if !common::type_is_zero_size(bcx.ccx, ty) {
|
||||
span_bug!(statement.source_info.span,
|
||||
"operand {:?} already assigned",
|
||||
rvalue);
|
||||
} else {
|
||||
// If the type is zero-sized, it's already been set here,
|
||||
// but we still need to make sure we translate the operand
|
||||
self.trans_rvalue_operand(bcx, rvalue, debug_loc).0
|
||||
self.trans_rvalue_operand(bcx, rvalue).0
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let tr_dest = self.trans_lvalue(&bcx, lvalue);
|
||||
self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc)
|
||||
self.trans_rvalue(bcx, tr_dest, rvalue)
|
||||
}
|
||||
}
|
||||
mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => {
|
||||
let ty = self.monomorphized_lvalue_ty(lvalue);
|
||||
let lvalue_transed = self.trans_lvalue(&bcx, lvalue);
|
||||
bcx.with_block(|bcx|
|
||||
adt::trans_set_discr(bcx,
|
||||
ty,
|
||||
lvalue_transed.llval,
|
||||
Disr::from(variant_index))
|
||||
);
|
||||
adt::trans_set_discr(&bcx,
|
||||
ty,
|
||||
lvalue_transed.llval,
|
||||
Disr::from(variant_index));
|
||||
bcx
|
||||
}
|
||||
mir::StatementKind::StorageLive(ref lvalue) => {
|
||||
@ -82,10 +77,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
}
|
||||
|
||||
fn trans_storage_liveness(&self,
|
||||
bcx: BlockAndBuilder<'bcx, 'tcx>,
|
||||
bcx: BlockAndBuilder<'a, 'tcx>,
|
||||
lvalue: &mir::Lvalue<'tcx>,
|
||||
intrinsic: base::Lifetime)
|
||||
-> BlockAndBuilder<'bcx, 'tcx> {
|
||||
-> BlockAndBuilder<'a, 'tcx> {
|
||||
if let mir::Lvalue::Local(index) = *lvalue {
|
||||
if let LocalRef::Lvalue(tr_lval) = self.locals[index] {
|
||||
intrinsic.call(&bcx, tr_lval.llval);
|
||||
|
@ -184,7 +184,7 @@ impl<'a, 'tcx> TransItem<'tcx> {
|
||||
linkage: llvm::Linkage,
|
||||
symbol_name: &str) {
|
||||
let tcx = ccx.tcx();
|
||||
assert_eq!(dg.ty(), glue::get_drop_glue_type(tcx, dg.ty()));
|
||||
assert_eq!(dg.ty(), glue::get_drop_glue_type(ccx.shared(), dg.ty()));
|
||||
let t = dg.ty();
|
||||
|
||||
let sig = tcx.mk_fn_sig(iter::once(tcx.mk_mut_ptr(tcx.types.i8)), tcx.mk_nil(), false);
|
||||
|
@ -8,56 +8,46 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![allow(non_camel_case_types)]
|
||||
|
||||
use llvm;
|
||||
use llvm::ValueRef;
|
||||
use base::*;
|
||||
use build::*;
|
||||
use common::*;
|
||||
use debuginfo::DebugLoc;
|
||||
use rustc::ty::Ty;
|
||||
|
||||
pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
|
||||
data_ptr: ValueRef,
|
||||
unit_ty: Ty<'tcx>,
|
||||
len: ValueRef,
|
||||
f: F)
|
||||
-> Block<'blk, 'tcx> where
|
||||
F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>,
|
||||
{
|
||||
let _icx = push_ctxt("tvec::slice_for_each");
|
||||
let fcx = bcx.fcx;
|
||||
|
||||
pub fn slice_for_each<'a, 'tcx, F>(
|
||||
bcx: &BlockAndBuilder<'a, 'tcx>,
|
||||
data_ptr: ValueRef,
|
||||
unit_ty: Ty<'tcx>,
|
||||
len: ValueRef,
|
||||
f: F
|
||||
) -> BlockAndBuilder<'a, 'tcx> where F: FnOnce(&BlockAndBuilder<'a, 'tcx>, ValueRef) {
|
||||
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
|
||||
let zst = type_is_zero_size(bcx.ccx(), unit_ty);
|
||||
let add = |bcx, a, b| if zst {
|
||||
Add(bcx, a, b, DebugLoc::None)
|
||||
let zst = type_is_zero_size(bcx.ccx, unit_ty);
|
||||
let add = |bcx: &BlockAndBuilder, a, b| if zst {
|
||||
bcx.add(a, b)
|
||||
} else {
|
||||
InBoundsGEP(bcx, a, &[b])
|
||||
bcx.inbounds_gep(a, &[b])
|
||||
};
|
||||
|
||||
let header_bcx = fcx.new_block("slice_loop_header");
|
||||
let body_bcx = fcx.new_block("slice_loop_body");
|
||||
let next_bcx = fcx.new_block("slice_loop_next");
|
||||
let body_bcx = bcx.fcx().build_new_block("slice_loop_body");
|
||||
let next_bcx = bcx.fcx().build_new_block("slice_loop_next");
|
||||
let header_bcx = bcx.fcx().build_new_block("slice_loop_header");
|
||||
|
||||
let start = if zst {
|
||||
C_uint(bcx.ccx(), 0 as usize)
|
||||
C_uint(bcx.ccx, 0usize)
|
||||
} else {
|
||||
data_ptr
|
||||
};
|
||||
let end = add(bcx, start, len);
|
||||
let end = add(&bcx, start, len);
|
||||
|
||||
Br(bcx, header_bcx.llbb, DebugLoc::None);
|
||||
let current = Phi(header_bcx, val_ty(start), &[start], &[bcx.llbb]);
|
||||
bcx.br(header_bcx.llbb());
|
||||
let current = header_bcx.phi(val_ty(start), &[start], &[bcx.llbb()]);
|
||||
|
||||
let keep_going =
|
||||
ICmp(header_bcx, llvm::IntNE, current, end, DebugLoc::None);
|
||||
CondBr(header_bcx, keep_going, body_bcx.llbb, next_bcx.llbb, DebugLoc::None);
|
||||
let keep_going = header_bcx.icmp(llvm::IntNE, current, end);
|
||||
header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
|
||||
|
||||
let body_bcx = f(body_bcx, if zst { data_ptr } else { current });
|
||||
let next = add(body_bcx, current, C_uint(bcx.ccx(), 1usize));
|
||||
AddIncomingToPhi(current, next, body_bcx.llbb);
|
||||
Br(body_bcx, header_bcx.llbb, DebugLoc::None);
|
||||
f(&body_bcx, if zst { data_ptr } else { current });
|
||||
let next = add(&body_bcx, current, C_uint(bcx.ccx, 1usize));
|
||||
header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb());
|
||||
body_bcx.br(header_bcx.llbb());
|
||||
next_bcx
|
||||
}
|
||||
|
@ -8,8 +8,6 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![allow(non_camel_case_types)]
|
||||
|
||||
use abi::FnType;
|
||||
use adt;
|
||||
use common::*;
|
||||
@ -41,7 +39,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ
|
||||
let _recursion_lock = cx.enter_type_of(t);
|
||||
|
||||
let llsizingty = match t.sty {
|
||||
_ if !type_is_sized(cx.tcx(), t) => {
|
||||
_ if !cx.shared().type_is_sized(t) => {
|
||||
Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, t)], false)
|
||||
}
|
||||
|
||||
@ -55,7 +53,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ
|
||||
ty::TyBox(ty) |
|
||||
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
|
||||
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
|
||||
if type_is_sized(cx.tcx(), ty) {
|
||||
if cx.shared().type_is_sized(ty) {
|
||||
Type::i8p(cx)
|
||||
} else {
|
||||
Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, ty)], false)
|
||||
@ -104,7 +102,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ
|
||||
|
||||
// FIXME(eddyb) Temporary sanity check for ty::layout.
|
||||
let layout = cx.layout_of(t);
|
||||
if !type_is_sized(cx.tcx(), t) {
|
||||
if !cx.shared().type_is_sized(t) {
|
||||
if !layout.is_unsized() {
|
||||
bug!("layout should be unsized for type `{}` / {:#?}",
|
||||
t, layout);
|
||||
@ -135,7 +133,7 @@ pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) ->
|
||||
match ty.sty {
|
||||
ty::TyBox(t) |
|
||||
ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) |
|
||||
ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if !type_is_sized(ccx.tcx(), t) => {
|
||||
ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if !ccx.shared().type_is_sized(t) => {
|
||||
in_memory_type_of(ccx, t).ptr_to()
|
||||
}
|
||||
_ => bug!("expected fat ptr ty but got {:?}", ty)
|
||||
@ -172,7 +170,7 @@ pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) ->
|
||||
/// is too large for it to be placed in SSA value (by our rules).
|
||||
/// For the raw type without far pointer indirection, see `in_memory_type_of`.
|
||||
pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
|
||||
let ty = if !type_is_sized(cx.tcx(), ty) {
|
||||
let ty = if !cx.shared().type_is_sized(ty) {
|
||||
cx.tcx().mk_imm_ptr(ty)
|
||||
} else {
|
||||
ty
|
||||
@ -232,7 +230,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) ->
|
||||
ty::TyBox(ty) |
|
||||
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
|
||||
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
|
||||
if !type_is_sized(cx.tcx(), ty) {
|
||||
if !cx.shared().type_is_sized(ty) {
|
||||
if let ty::TyStr = ty.sty {
|
||||
// This means we get a nicer name in the output (str is always
|
||||
// unsized).
|
||||
|
@ -9,16 +9,11 @@
|
||||
// except according to those terms.
|
||||
|
||||
use llvm;
|
||||
use llvm::{UseRef, ValueRef};
|
||||
use basic_block::BasicBlock;
|
||||
use common::Block;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use libc::c_uint;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq)]
|
||||
pub struct Value(pub ValueRef);
|
||||
pub struct Value(pub llvm::ValueRef);
|
||||
|
||||
impl fmt::Debug for Value {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
@ -27,152 +22,3 @@ impl fmt::Debug for Value {
|
||||
}).expect("nun-UTF8 value description from LLVM"))
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! opt_val { ($e:expr) => (
|
||||
unsafe {
|
||||
match $e {
|
||||
p if !p.is_null() => Some(Value(p)),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
) }
|
||||
|
||||
/// Wrapper for LLVM ValueRef
|
||||
impl Value {
|
||||
/// Returns the native ValueRef
|
||||
pub fn get(&self) -> ValueRef {
|
||||
let Value(v) = *self; v
|
||||
}
|
||||
|
||||
/// Returns the BasicBlock that contains this value
|
||||
pub fn get_parent(self) -> Option<BasicBlock> {
|
||||
unsafe {
|
||||
match llvm::LLVMGetInstructionParent(self.get()) {
|
||||
p if !p.is_null() => Some(BasicBlock(p)),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes this value from its containing BasicBlock
|
||||
pub fn erase_from_parent(self) {
|
||||
unsafe {
|
||||
llvm::LLVMInstructionEraseFromParent(self.get());
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the single dominating store to this value, if any
|
||||
/// This only performs a search for a trivially dominating store. The store
|
||||
/// must be the only user of this value, and there must not be any conditional
|
||||
/// branches between the store and the given block.
|
||||
pub fn get_dominating_store(self, bcx: Block) -> Option<Value> {
|
||||
match self.get_single_user().and_then(|user| user.as_store_inst()) {
|
||||
Some(store) => {
|
||||
store.get_parent().and_then(|store_bb| {
|
||||
let mut bb = BasicBlock(bcx.llbb);
|
||||
let mut ret = Some(store);
|
||||
while bb.get() != store_bb.get() {
|
||||
match bb.get_single_predecessor() {
|
||||
Some(pred) => bb = pred,
|
||||
None => { ret = None; break }
|
||||
}
|
||||
}
|
||||
ret
|
||||
})
|
||||
}
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the first use of this value, if any
|
||||
pub fn get_first_use(self) -> Option<Use> {
|
||||
unsafe {
|
||||
match llvm::LLVMGetFirstUse(self.get()) {
|
||||
u if !u.is_null() => Some(Use(u)),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tests if there are no uses of this value
|
||||
pub fn has_no_uses(self) -> bool {
|
||||
self.get_first_use().is_none()
|
||||
}
|
||||
|
||||
/// Returns the single user of this value
|
||||
/// If there are no users or multiple users, this returns None
|
||||
pub fn get_single_user(self) -> Option<Value> {
|
||||
let mut iter = self.user_iter();
|
||||
match (iter.next(), iter.next()) {
|
||||
(Some(first), None) => Some(first),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator for the users of this value
|
||||
pub fn user_iter(self) -> Users {
|
||||
Users {
|
||||
next: self.get_first_use()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the requested operand of this instruction
|
||||
/// Returns None, if there's no operand at the given index
|
||||
pub fn get_operand(self, i: usize) -> Option<Value> {
|
||||
opt_val!(llvm::LLVMGetOperand(self.get(), i as c_uint))
|
||||
}
|
||||
|
||||
/// Returns the Store represent by this value, if any
|
||||
pub fn as_store_inst(self) -> Option<Value> {
|
||||
opt_val!(llvm::LLVMIsAStoreInst(self.get()))
|
||||
}
|
||||
|
||||
/// Tests if this value is a terminator instruction
|
||||
pub fn is_a_terminator_inst(self) -> bool {
|
||||
unsafe {
|
||||
!llvm::LLVMIsATerminatorInst(self.get()).is_null()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for LLVM UseRef
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct Use(UseRef);
|
||||
|
||||
impl Use {
|
||||
pub fn get(&self) -> UseRef {
|
||||
let Use(v) = *self; v
|
||||
}
|
||||
|
||||
pub fn get_user(self) -> Value {
|
||||
unsafe {
|
||||
Value(llvm::LLVMGetUser(self.get()))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_next_use(self) -> Option<Use> {
|
||||
unsafe {
|
||||
match llvm::LLVMGetNextUse(self.get()) {
|
||||
u if !u.is_null() => Some(Use(u)),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterator for the users of a value
|
||||
pub struct Users {
|
||||
next: Option<Use>
|
||||
}
|
||||
|
||||
impl Iterator for Users {
|
||||
type Item = Value;
|
||||
|
||||
fn next(&mut self) -> Option<Value> {
|
||||
let current = self.next;
|
||||
|
||||
self.next = current.and_then(|u| u.get_next_use());
|
||||
|
||||
current.map(|u| u.get_user())
|
||||
}
|
||||
}
|
||||
|
14
src/test/run-pass/trans-object-shim.rs
Normal file
14
src/test/run-pass/trans-object-shim.rs
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
fn main() {
|
||||
assert_eq!((ToString::to_string as fn(&(ToString+'static)) -> String)(&"foo"),
|
||||
String::from("foo"));
|
||||
}
|
Loading…
Reference in New Issue
Block a user