Replace BlockAndBuilder with Builder.

This commit is contained in:
Mark Simulacrum 2016-12-31 16:00:24 -07:00
parent d40d01bd0e
commit 1be170b01a
21 changed files with 344 additions and 357 deletions

View File

@ -10,7 +10,8 @@
use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace}; use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace};
use base; use base;
use common::{type_is_fat_ptr, BlockAndBuilder, C_uint}; use builder::Builder;
use common::{type_is_fat_ptr, C_uint};
use context::CrateContext; use context::CrateContext;
use cabi_x86; use cabi_x86;
use cabi_x86_64; use cabi_x86_64;
@ -236,7 +237,7 @@ impl ArgType {
/// lvalue for the original Rust type of this argument/return. /// lvalue for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables /// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations. /// or results of call/invoke instructions into their destinations.
pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) { pub fn store(&self, bcx: &Builder, mut val: ValueRef, dst: ValueRef) {
if self.is_ignore() { if self.is_ignore() {
return; return;
} }
@ -269,7 +270,7 @@ impl ArgType {
// bitcasting to the struct type yields invalid cast errors. // bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space... // We instead thus allocate some scratch space...
let llscratch = bcx.fcx().alloca(ty, "abi_cast"); let llscratch = bcx.alloca(ty, "abi_cast");
base::Lifetime::Start.call(bcx, llscratch); base::Lifetime::Start.call(bcx, llscratch);
// ...where we first store the value... // ...where we first store the value...
@ -293,14 +294,16 @@ impl ArgType {
} }
} }
pub fn store_fn_arg(&self, bcx: &BlockAndBuilder, idx: &mut usize, dst: ValueRef) { pub fn store_fn_arg(
&self, bcx: &Builder, idx: &mut usize, dst: ValueRef
) {
if self.pad.is_some() { if self.pad.is_some() {
*idx += 1; *idx += 1;
} }
if self.is_ignore() { if self.is_ignore() {
return; return;
} }
let val = llvm::get_param(bcx.fcx().llfn, *idx as c_uint); let val = llvm::get_param(bcx.llfn(), *idx as c_uint);
*idx += 1; *idx += 1;
self.store(bcx, val, dst); self.store(bcx, val, dst);
} }

View File

@ -49,6 +49,7 @@ use llvm::{ValueRef, True, IntEQ, IntNE};
use rustc::ty::layout; use rustc::ty::layout;
use rustc::ty::{self, Ty, AdtKind}; use rustc::ty::{self, Ty, AdtKind};
use common::*; use common::*;
use builder::Builder;
use glue; use glue;
use base; use base;
use machine; use machine;
@ -303,7 +304,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>
/// Obtain a representation of the discriminant sufficient to translate /// Obtain a representation of the discriminant sufficient to translate
/// destructuring; this may or may not involve the actual discriminant. /// destructuring; this may or may not involve the actual discriminant.
pub fn trans_switch<'a, 'tcx>( pub fn trans_switch<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
t: Ty<'tcx>, t: Ty<'tcx>,
scrutinee: ValueRef, scrutinee: ValueRef,
range_assert: bool range_assert: bool
@ -331,7 +332,7 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
/// Obtain the actual discriminant of a value. /// Obtain the actual discriminant of a value.
pub fn trans_get_discr<'a, 'tcx>( pub fn trans_get_discr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
t: Ty<'tcx>, t: Ty<'tcx>,
scrutinee: ValueRef, scrutinee: ValueRef,
cast_to: Option<Type>, cast_to: Option<Type>,
@ -374,7 +375,7 @@ pub fn trans_get_discr<'a, 'tcx>(
} }
fn struct_wrapped_nullable_bitdiscr( fn struct_wrapped_nullable_bitdiscr(
bcx: &BlockAndBuilder, bcx: &Builder,
nndiscr: u64, nndiscr: u64,
discrfield: &layout::FieldPath, discrfield: &layout::FieldPath,
scrutinee: ValueRef scrutinee: ValueRef
@ -387,7 +388,7 @@ fn struct_wrapped_nullable_bitdiscr(
} }
/// Helper for cases where the discriminant is simply loaded. /// Helper for cases where the discriminant is simply loaded.
fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64,
range_assert: bool) range_assert: bool)
-> ValueRef { -> ValueRef {
let llty = Type::from_integer(bcx.ccx, ity); let llty = Type::from_integer(bcx.ccx, ity);
@ -415,7 +416,7 @@ fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u
/// discriminant-like value returned by `trans_switch`. /// discriminant-like value returned by `trans_switch`.
/// ///
/// This should ideally be less tightly tied to `_match`. /// This should ideally be less tightly tied to `_match`.
pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { pub fn trans_case<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef {
let l = bcx.ccx.layout_of(t); let l = bcx.ccx.layout_of(t);
match *l { match *l {
layout::CEnum { discr, .. } layout::CEnum { discr, .. }
@ -436,7 +437,7 @@ pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value:
/// Set the discriminant for a new value of the given case of the given /// Set the discriminant for a new value of the given case of the given
/// representation. /// representation.
pub fn trans_set_discr<'a, 'tcx>( pub fn trans_set_discr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr
) { ) {
let l = bcx.ccx.layout_of(t); let l = bcx.ccx.layout_of(t);
match *l { match *l {
@ -484,8 +485,8 @@ pub fn trans_set_discr<'a, 'tcx>(
} }
} }
fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>) -> bool { fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool {
bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" bcx.ccx.sess().target.target.arch == "arm" || bcx.ccx.sess().target.target.arch == "aarch64"
} }
fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) {
@ -498,7 +499,7 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) {
/// Access a field, at a point when the value's case is known. /// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr<'a, 'tcx>( pub fn trans_field_ptr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
t: Ty<'tcx>, t: Ty<'tcx>,
val: MaybeSizedValue, val: MaybeSizedValue,
discr: Disr, discr: Disr,
@ -560,7 +561,7 @@ pub fn trans_field_ptr<'a, 'tcx>(
} }
fn struct_field_ptr<'a, 'tcx>( fn struct_field_ptr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
st: &layout::Struct, st: &layout::Struct,
fields: &Vec<Ty<'tcx>>, fields: &Vec<Ty<'tcx>>,
val: MaybeSizedValue, val: MaybeSizedValue,

View File

@ -15,6 +15,7 @@ use base;
use common::*; use common::*;
use type_of; use type_of;
use type_::Type; use type_::Type;
use builder::Builder;
use rustc::hir; use rustc::hir;
use rustc::ty::Ty; use rustc::ty::Ty;
@ -25,7 +26,7 @@ use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM // Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'a, 'tcx>( pub fn trans_inline_asm<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
ia: &hir::InlineAsm, ia: &hir::InlineAsm,
outputs: Vec<(ValueRef, Ty<'tcx>)>, outputs: Vec<(ValueRef, Ty<'tcx>)>,
mut inputs: Vec<ValueRef> mut inputs: Vec<ValueRef>
@ -61,7 +62,7 @@ pub fn trans_inline_asm<'a, 'tcx>(
// Default per-arch clobbers // Default per-arch clobbers
// Basically what clang does // Basically what clang does
let arch_clobbers = match &bcx.sess().target.target.arch[..] { let arch_clobbers = match &bcx.ccx.sess().target.target.arch[..] {
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
_ => Vec::new() _ => Vec::new()
}; };

View File

@ -38,7 +38,7 @@ use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use middle::lang_items::StartFnLangItem; use middle::lang_items::StartFnLangItem;
use rustc::ty::subst::Substs; use rustc::ty::subst::Substs;
use rustc::traits; use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::adjustment::CustomCoerceUnsized; use rustc::ty::adjustment::CustomCoerceUnsized;
use rustc::dep_graph::{DepNode, WorkProduct}; use rustc::dep_graph::{DepNode, WorkProduct};
use rustc::hir::map as hir_map; use rustc::hir::map as hir_map;
@ -51,7 +51,7 @@ use adt;
use attributes; use attributes;
use builder::Builder; use builder::Builder;
use callee::{Callee}; use callee::{Callee};
use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint}; use common::{C_bool, C_bytes_in_context, C_i32, C_uint};
use collector::{self, TransItemCollectionMode}; use collector::{self, TransItemCollectionMode};
use common::{C_struct_in_context, C_u64, C_undef}; use common::{C_struct_in_context, C_u64, C_undef};
use common::{CrateContext, FunctionContext}; use common::{CrateContext, FunctionContext};
@ -161,7 +161,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate {
} }
pub fn compare_simd_types<'a, 'tcx>( pub fn compare_simd_types<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
lhs: ValueRef, lhs: ValueRef,
rhs: ValueRef, rhs: ValueRef,
t: Ty<'tcx>, t: Ty<'tcx>,
@ -218,7 +218,7 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
pub fn unsize_thin_ptr<'a, 'tcx>( pub fn unsize_thin_ptr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
src: ValueRef, src: ValueRef,
src_ty: Ty<'tcx>, src_ty: Ty<'tcx>,
dst_ty: Ty<'tcx> dst_ty: Ty<'tcx>
@ -242,7 +242,7 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
/// Coerce `src`, which is a reference to a value of type `src_ty`, /// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst` /// to a value of type `dst_ty` and store the result in `dst`
pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
src: ValueRef, src: ValueRef,
src_ty: Ty<'tcx>, src_ty: Ty<'tcx>,
dst: ValueRef, dst: ValueRef,
@ -272,10 +272,10 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
assert_eq!(def_a, def_b); assert_eq!(def_a, def_b);
let src_fields = def_a.variants[0].fields.iter().map(|f| { let src_fields = def_a.variants[0].fields.iter().map(|f| {
monomorphize::field_ty(bcx.tcx(), substs_a, f) monomorphize::field_ty(bcx.ccx.tcx(), substs_a, f)
}); });
let dst_fields = def_b.variants[0].fields.iter().map(|f| { let dst_fields = def_b.variants[0].fields.iter().map(|f| {
monomorphize::field_ty(bcx.tcx(), substs_b, f) monomorphize::field_ty(bcx.ccx.tcx(), substs_b, f)
}); });
let src = adt::MaybeSizedValue::sized(src); let src = adt::MaybeSizedValue::sized(src);
@ -322,7 +322,7 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx
} }
pub fn cast_shift_expr_rhs( pub fn cast_shift_expr_rhs(
cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef cx: &Builder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef
) -> ValueRef { ) -> ValueRef {
cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b))
} }
@ -421,7 +421,7 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> V
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type /// Helper for storing values in memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values. /// differs from the type used for SSA values.
pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
if common::type_is_fat_ptr(cx.ccx, t) { if common::type_is_fat_ptr(cx.ccx, t) {
@ -433,7 +433,7 @@ pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: Valu
} }
} }
pub fn store_fat_ptr<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, pub fn store_fat_ptr<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
data: ValueRef, data: ValueRef,
extra: ValueRef, extra: ValueRef,
dst: ValueRef, dst: ValueRef,
@ -459,7 +459,7 @@ pub fn load_fat_ptr<'a, 'tcx>(
(ptr, meta) (ptr, meta)
} }
pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef {
if val_ty(val) == Type::i1(bcx.ccx) { if val_ty(val) == Type::i1(bcx.ccx) {
bcx.zext(val, Type::i8(bcx.ccx)) bcx.zext(val, Type::i8(bcx.ccx))
} else { } else {
@ -467,7 +467,7 @@ pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef {
} }
} }
pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef { pub fn to_immediate(bcx: &Builder, val: ValueRef, ty: Ty) -> ValueRef {
if ty.is_bool() { if ty.is_bool() {
bcx.trunc(val, Type::i1(bcx.ccx)) bcx.trunc(val, Type::i1(bcx.ccx))
} else { } else {
@ -523,11 +523,13 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>,
b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
} }
pub fn memcpy_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, pub fn memcpy_ty<'a, 'tcx>(
dst: ValueRef, bcx: &Builder<'a, 'tcx>,
src: ValueRef, dst: ValueRef,
t: Ty<'tcx>, src: ValueRef,
align: Option<u32>) { t: Ty<'tcx>,
align: Option<u32>,
) {
let ccx = bcx.ccx; let ccx = bcx.ccx;
if type_is_zero_size(ccx, t) { if type_is_zero_size(ccx, t) {
@ -553,11 +555,6 @@ pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>,
b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
} }
pub fn alloc_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef {
assert!(!ty.has_param_types());
bcx.fcx().alloca(type_of::type_of(bcx.ccx, ty), name)
}
pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) { pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) {
let _s = if ccx.sess().trans_stats() { let _s = if ccx.sess().trans_stats() {
let mut instance_name = String::new(); let mut instance_name = String::new();
@ -623,7 +620,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// We create an alloca to hold a pointer of type `ret.original_ty` // We create an alloca to hold a pointer of type `ret.original_ty`
// which will hold the pointer to the right alloca which has the // which will hold the pointer to the right alloca which has the
// final ret value // final ret value
fcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot") bcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot")
}; };
let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value
let mut llarg_idx = fn_ty.ret.is_indirect() as usize; let mut llarg_idx = fn_ty.ret.is_indirect() as usize;
@ -756,12 +753,7 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) {
// `main` should respect same config for frame pointer elimination as rest of code // `main` should respect same config for frame pointer elimination as rest of code
attributes::set_frame_pointer_elimination(ccx, llfn); attributes::set_frame_pointer_elimination(ccx, llfn);
let llbb = unsafe { let bld = Builder::new_block(ccx, llfn, "top");
let name = CString::new("top").unwrap();
llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, name.as_ptr())
};
let bld = Builder::with_ccx(ccx);
bld.position_at_end(llbb);
debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx, &bld); debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx, &bld);

View File

@ -19,12 +19,16 @@ use machine::llalign_of_pref;
use type_::Type; use type_::Type;
use value::Value; use value::Value;
use libc::{c_uint, c_char}; use libc::{c_uint, c_char};
use rustc::ty::{Ty, TypeFoldable};
use type_of;
use std::borrow::Cow; use std::borrow::Cow;
use std::ffi::CString; use std::ffi::CString;
use std::ptr; use std::ptr;
use syntax_pos::Span; use syntax_pos::Span;
// All Builders must have an llfn associated with them
#[must_use]
pub struct Builder<'a, 'tcx: 'a> { pub struct Builder<'a, 'tcx: 'a> {
pub llbuilder: BuilderRef, pub llbuilder: BuilderRef,
pub ccx: &'a CrateContext<'a, 'tcx>, pub ccx: &'a CrateContext<'a, 'tcx>,
@ -46,6 +50,20 @@ fn noname() -> *const c_char {
} }
impl<'a, 'tcx> Builder<'a, 'tcx> { impl<'a, 'tcx> Builder<'a, 'tcx> {
pub fn new_block<'b>(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef, name: &'b str) -> Self {
let builder = Builder::with_ccx(ccx);
let llbb = unsafe {
let name = CString::new(name).unwrap();
llvm::LLVMAppendBasicBlockInContext(
ccx.llcx(),
llfn,
name.as_ptr()
)
};
builder.position_at_end(llbb);
builder
}
pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self { pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self {
// Create a fresh builder from the crate context. // Create a fresh builder from the crate context.
let llbuilder = unsafe { let llbuilder = unsafe {
@ -57,6 +75,32 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
} }
} }
pub fn build_new_block<'b>(&self, name: &'b str) -> Builder<'a, 'tcx> {
let builder = Builder::with_ccx(self.ccx);
let llbb = unsafe {
let name = CString::new(name).unwrap();
llvm::LLVMAppendBasicBlockInContext(
self.ccx.llcx(),
self.llfn(),
name.as_ptr()
)
};
builder.position_at_end(llbb);
builder
}
pub fn llfn(&self) -> ValueRef {
unsafe {
llvm::LLVMGetBasicBlockParent(self.llbb())
}
}
pub fn llbb(&self) -> BasicBlockRef {
unsafe {
llvm::LLVMGetInsertBlock(self.llbuilder)
}
}
fn count_insn(&self, category: &str) { fn count_insn(&self, category: &str) {
if self.ccx.sess().trans_stats() { if self.ccx.sess().trans_stats() {
self.ccx.stats().n_llvm_insns.set(self.ccx.stats().n_llvm_insns.get() + 1); self.ccx.stats().n_llvm_insns.set(self.ccx.stats().n_llvm_insns.get() + 1);
@ -435,6 +479,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
} }
} }
pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
let builder = Builder::with_ccx(self.ccx);
builder.position_at_start(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn())
});
builder.dynamic_alloca(ty, name)
}
pub fn alloca_ty(&self, ty: Ty<'tcx>, name: &str) -> ValueRef {
assert!(!ty.has_param_types());
self.alloca(type_of::type_of(self.ccx, ty), name)
}
pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef { pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef {
self.count_insn("alloca"); self.count_insn("alloca");
unsafe { unsafe {

View File

@ -23,7 +23,6 @@ use rustc::traits;
use abi::{Abi, FnType}; use abi::{Abi, FnType};
use attributes; use attributes;
use base; use base;
use base::*;
use common::{ use common::{
self, CrateContext, FunctionContext, SharedCrateContext self, CrateContext, FunctionContext, SharedCrateContext
}; };
@ -348,7 +347,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let llenv = if env_arg.is_indirect() { let llenv = if env_arg.is_indirect() {
llargs[self_idx] llargs[self_idx]
} else { } else {
let scratch = alloc_ty(&bcx, closure_ty, "self"); let scratch = bcx.alloca_ty(closure_ty, "self");
let mut llarg_idx = self_idx; let mut llarg_idx = self_idx;
env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch); env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch);
scratch scratch
@ -365,12 +364,12 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
// Call the by-ref closure body with `self` in a cleanup scope, // Call the by-ref closure body with `self` in a cleanup scope,
// to drop `self` when the body returns, or in case it unwinds. // to drop `self` when the body returns, or in case it unwinds.
let self_scope = fcx.schedule_drop_mem(MaybeSizedValue::sized(llenv), closure_ty); let self_scope = fcx.schedule_drop_mem(&bcx, MaybeSizedValue::sized(llenv), closure_ty);
let llfn = callee.reify(bcx.ccx); let llfn = callee.reify(bcx.ccx);
let llret; let llret;
if let Some(landing_pad) = self_scope.landing_pad { if let Some(landing_pad) = self_scope.landing_pad {
let normal_bcx = bcx.fcx().build_new_block("normal-return"); let normal_bcx = bcx.build_new_block("normal-return");
llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None);
bcx = normal_bcx; bcx = normal_bcx;
} else { } else {

View File

@ -21,7 +21,8 @@
use llvm::BasicBlockRef; use llvm::BasicBlockRef;
use base; use base;
use adt::MaybeSizedValue; use adt::MaybeSizedValue;
use common::{BlockAndBuilder, FunctionContext, Funclet}; use builder::Builder;
use common::{FunctionContext, Funclet};
use glue; use glue;
use type_::Type; use type_::Type;
use rustc::ty::Ty; use rustc::ty::Ty;
@ -42,7 +43,7 @@ pub struct DropValue<'tcx> {
} }
impl<'tcx> DropValue<'tcx> { impl<'tcx> DropValue<'tcx> {
fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &BlockAndBuilder<'a, 'tcx>) { fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &Builder<'a, 'tcx>) {
glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet)
} }
@ -52,13 +53,13 @@ impl<'tcx> DropValue<'tcx> {
/// landing_pad -> ... cleanups ... -> [resume] /// landing_pad -> ... cleanups ... -> [resume]
/// ///
/// This should only be called once per function, as it creates an alloca for the landingpad. /// This should only be called once per function, as it creates an alloca for the landingpad.
fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef { fn get_landing_pad<'a>(&self, bcx: &Builder<'a, 'tcx>) -> BasicBlockRef {
debug!("get_landing_pad"); debug!("get_landing_pad");
let bcx = fcx.build_new_block("cleanup_unwind"); let bcx = bcx.build_new_block("cleanup_unwind");
let llpersonality = bcx.ccx.eh_personality(); let llpersonality = bcx.ccx.eh_personality();
bcx.set_personality_fn(llpersonality); bcx.set_personality_fn(llpersonality);
if base::wants_msvc_seh(fcx.ccx.sess()) { if base::wants_msvc_seh(bcx.ccx.sess()) {
let pad = bcx.cleanup_pad(None, &[]); let pad = bcx.cleanup_pad(None, &[]);
let funclet = Some(Funclet::new(pad)); let funclet = Some(Funclet::new(pad));
self.trans(funclet.as_ref(), &bcx); self.trans(funclet.as_ref(), &bcx);
@ -68,10 +69,10 @@ impl<'tcx> DropValue<'tcx> {
// The landing pad return type (the type being propagated). Not sure // The landing pad return type (the type being propagated). Not sure
// what this represents but it's determined by the personality // what this represents but it's determined by the personality
// function and this is what the EH proposal example uses. // function and this is what the EH proposal example uses.
let llretty = Type::struct_(fcx.ccx, &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false); let llretty = Type::struct_(bcx.ccx, &[Type::i8p(bcx.ccx), Type::i32(bcx.ccx)], false);
// The only landing pad clause will be 'cleanup' // The only landing pad clause will be 'cleanup'
let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.fcx().llfn); let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.llfn());
// The landing pad block is a cleanup // The landing pad block is a cleanup
bcx.set_cleanup(llretval); bcx.set_cleanup(llretval);
@ -79,7 +80,7 @@ impl<'tcx> DropValue<'tcx> {
// Insert cleanup instructions into the cleanup block // Insert cleanup instructions into the cleanup block
self.trans(None, &bcx); self.trans(None, &bcx);
if !bcx.sess().target.target.options.custom_unwind_resume { if !bcx.ccx.sess().target.target.options.custom_unwind_resume {
bcx.resume(llretval); bcx.resume(llretval);
} else { } else {
let exc_ptr = bcx.extract_value(llretval, 0); let exc_ptr = bcx.extract_value(llretval, 0);
@ -94,7 +95,9 @@ impl<'tcx> DropValue<'tcx> {
impl<'a, 'tcx> FunctionContext<'a, 'tcx> { impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
pub fn schedule_drop_mem(&self, val: MaybeSizedValue, ty: Ty<'tcx>) -> CleanupScope<'tcx> { pub fn schedule_drop_mem(
&self, bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx>
) -> CleanupScope<'tcx> {
if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
let drop = DropValue { let drop = DropValue {
val: val, val: val,
@ -102,7 +105,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
skip_dtor: false, skip_dtor: false,
}; };
CleanupScope::new(self, drop) CleanupScope::new(bcx, drop)
} }
/// Issue #23611: Schedules a (deep) drop of the contents of /// Issue #23611: Schedules a (deep) drop of the contents of
@ -110,8 +113,9 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
/// `ty`. The scheduled code handles extracting the discriminant /// `ty`. The scheduled code handles extracting the discriminant
/// and dropping the contents associated with that variant /// and dropping the contents associated with that variant
/// *without* executing any associated drop implementation. /// *without* executing any associated drop implementation.
pub fn schedule_drop_adt_contents(&self, val: MaybeSizedValue, ty: Ty<'tcx>) pub fn schedule_drop_adt_contents(
-> CleanupScope<'tcx> { &self, bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx>
) -> CleanupScope<'tcx> {
// `if` below could be "!contents_needs_drop"; skipping drop // `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative. // is just an optimization, so sound to be conservative.
if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
@ -122,16 +126,16 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
skip_dtor: true, skip_dtor: true,
}; };
CleanupScope::new(self, drop) CleanupScope::new(bcx, drop)
} }
} }
impl<'tcx> CleanupScope<'tcx> { impl<'tcx> CleanupScope<'tcx> {
fn new<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { fn new<'a>(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> {
CleanupScope { CleanupScope {
cleanup: Some(drop_val), cleanup: Some(drop_val),
landing_pad: if !fcx.ccx.sess().no_landing_pads() { landing_pad: if !bcx.ccx.sess().no_landing_pads() {
Some(drop_val.get_landing_pad(fcx)) Some(drop_val.get_landing_pad(bcx))
} else { } else {
None None
}, },
@ -145,7 +149,7 @@ impl<'tcx> CleanupScope<'tcx> {
} }
} }
pub fn trans<'a>(self, bcx: &'a BlockAndBuilder<'a, 'tcx>) { pub fn trans<'a>(self, bcx: &'a Builder<'a, 'tcx>) {
if let Some(cleanup) = self.cleanup { if let Some(cleanup) = self.cleanup {
cleanup.trans(None, &bcx); cleanup.trans(None, &bcx);
} }

View File

@ -12,7 +12,6 @@
//! Code that is useful in various trans modules. //! Code that is useful in various trans modules.
use session::Session;
use llvm; use llvm;
use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind}; use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind};
use llvm::{True, False, Bool, OperandBundleDef}; use llvm::{True, False, Bool, OperandBundleDef};
@ -37,7 +36,6 @@ use rustc::hir;
use libc::{c_uint, c_char}; use libc::{c_uint, c_char};
use std::borrow::Cow; use std::borrow::Cow;
use std::iter; use std::iter;
use std::ops::Deref;
use std::ffi::CString; use std::ffi::CString;
use syntax::ast; use syntax::ast;
@ -235,8 +233,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// This function's enclosing crate context. // This function's enclosing crate context.
pub ccx: &'a CrateContext<'a, 'tcx>, pub ccx: &'a CrateContext<'a, 'tcx>,
alloca_builder: Builder<'a, 'tcx>,
} }
impl<'a, 'tcx> FunctionContext<'a, 'tcx> { impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
@ -247,30 +243,18 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
llfn: llfndecl, llfn: llfndecl,
alloca_insert_pt: None, alloca_insert_pt: None,
ccx: ccx, ccx: ccx,
alloca_builder: Builder::with_ccx(ccx),
};
let val = {
let entry_bcx = fcx.build_new_block("entry-block");
let val = entry_bcx.load(C_null(Type::i8p(ccx)));
fcx.alloca_builder.position_at_start(entry_bcx.llbb());
val
}; };
let entry_bcx = Builder::new_block(fcx.ccx, fcx.llfn, "entry-block");
entry_bcx.position_at_start(entry_bcx.llbb());
// Use a dummy instruction as the insertion point for all allocas. // Use a dummy instruction as the insertion point for all allocas.
// This is later removed in the drop of FunctionContext. // This is later removed in the drop of FunctionContext.
fcx.alloca_insert_pt = Some(val); fcx.alloca_insert_pt = Some(entry_bcx.load(C_null(Type::i8p(ccx))));
fcx fcx
} }
pub fn get_entry_block(&'a self) -> BlockAndBuilder<'a, 'tcx> { pub fn new_block(&self, name: &str) -> BasicBlockRef {
BlockAndBuilder::new(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn)
}, self)
}
pub fn new_block(&'a self, name: &str) -> BasicBlockRef {
unsafe { unsafe {
let name = CString::new(name).unwrap(); let name = CString::new(name).unwrap();
llvm::LLVMAppendBasicBlockInContext( llvm::LLVMAppendBasicBlockInContext(
@ -281,12 +265,14 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
} }
} }
pub fn build_new_block(&'a self, name: &str) -> BlockAndBuilder<'a, 'tcx> { pub fn build_new_block(&self, name: &str) -> Builder<'a, 'tcx> {
BlockAndBuilder::new(self.new_block(name), self) Builder::new_block(self.ccx, self.llfn, name)
} }
pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { pub fn get_entry_block(&'a self) -> Builder<'a, 'tcx> {
self.alloca_builder.dynamic_alloca(ty, name) let builder = Builder::with_ccx(self.ccx);
builder.position_at_end(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn) });
builder
} }
} }
@ -298,65 +284,6 @@ impl<'a, 'tcx> Drop for FunctionContext<'a, 'tcx> {
} }
} }
#[must_use]
pub struct BlockAndBuilder<'a, 'tcx: 'a> {
// The BasicBlockRef returned from a call to
// llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
// block to the function pointed to by llfn. We insert
// instructions into that block by way of this block context.
// The block pointing to this one in the function's digraph.
llbb: BasicBlockRef,
// The function context for the function to which this block is
// attached.
fcx: &'a FunctionContext<'a, 'tcx>,
builder: Builder<'a, 'tcx>,
}
impl<'a, 'tcx> BlockAndBuilder<'a, 'tcx> {
pub fn new(llbb: BasicBlockRef, fcx: &'a FunctionContext<'a, 'tcx>) -> Self {
let builder = Builder::with_ccx(fcx.ccx);
// Set the builder's position to this block's end.
builder.position_at_end(llbb);
BlockAndBuilder {
llbb: llbb,
fcx: fcx,
builder: builder,
}
}
pub fn at_start<F, R>(&self, f: F) -> R
where F: FnOnce(&BlockAndBuilder<'a, 'tcx>) -> R
{
self.position_at_start(self.llbb);
let r = f(self);
self.position_at_end(self.llbb);
r
}
pub fn fcx(&self) -> &'a FunctionContext<'a, 'tcx> {
self.fcx
}
pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.ccx.tcx()
}
pub fn sess(&self) -> &'a Session {
self.ccx.sess()
}
pub fn llbb(&self) -> BasicBlockRef {
self.llbb
}
}
impl<'a, 'tcx> Deref for BlockAndBuilder<'a, 'tcx> {
type Target = Builder<'a, 'tcx>;
fn deref(&self) -> &Self::Target {
&self.builder
}
}
/// A structure representing an active landing pad for the duration of a basic /// A structure representing an active landing pad for the duration of a basic
/// block. /// block.
/// ///
@ -725,7 +652,7 @@ pub fn langcall(tcx: TyCtxt,
// of Java. (See related discussion on #1877 and #10183.) // of Java. (See related discussion on #1877 and #10183.)
pub fn build_unchecked_lshift<'a, 'tcx>( pub fn build_unchecked_lshift<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
lhs: ValueRef, lhs: ValueRef,
rhs: ValueRef rhs: ValueRef
) -> ValueRef { ) -> ValueRef {
@ -736,7 +663,7 @@ pub fn build_unchecked_lshift<'a, 'tcx>(
} }
pub fn build_unchecked_rshift<'a, 'tcx>( pub fn build_unchecked_rshift<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef bcx: &Builder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef
) -> ValueRef { ) -> ValueRef {
let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs); let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs);
// #1877, #10183: Ensure that input is always valid // #1877, #10183: Ensure that input is always valid
@ -749,13 +676,13 @@ pub fn build_unchecked_rshift<'a, 'tcx>(
} }
} }
fn shift_mask_rhs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, rhs: ValueRef) -> ValueRef { fn shift_mask_rhs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, rhs: ValueRef) -> ValueRef {
let rhs_llty = val_ty(rhs); let rhs_llty = val_ty(rhs);
bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false)) bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false))
} }
pub fn shift_mask_val<'a, 'tcx>( pub fn shift_mask_val<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
llty: Type, llty: Type,
mask_llty: Type, mask_llty: Type,
invert: bool invert: bool

View File

@ -27,7 +27,8 @@ use rustc::hir::def_id::DefId;
use rustc::ty::subst::Substs; use rustc::ty::subst::Substs;
use abi::Abi; use abi::Abi;
use common::{CrateContext, BlockAndBuilder}; use common::CrateContext;
use builder::Builder;
use monomorphize::{self, Instance}; use monomorphize::{self, Instance};
use rustc::ty::{self, Ty}; use rustc::ty::{self, Ty};
use rustc::mir; use rustc::mir;
@ -423,7 +424,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
} }
} }
pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
dbg_context: &FunctionDebugContext, dbg_context: &FunctionDebugContext,
variable_name: ast::Name, variable_name: ast::Name,
variable_type: Ty<'tcx>, variable_type: Ty<'tcx>,

View File

@ -35,16 +35,17 @@ use type_::Type;
use value::Value; use value::Value;
use Disr; use Disr;
use cleanup::CleanupScope; use cleanup::CleanupScope;
use builder::Builder;
use syntax_pos::DUMMY_SP; use syntax_pos::DUMMY_SP;
pub fn trans_exchange_free_ty<'a, 'tcx>( pub fn trans_exchange_free_ty<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
ptr: MaybeSizedValue, ptr: MaybeSizedValue,
content_ty: Ty<'tcx> content_ty: Ty<'tcx>
) { ) {
let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem); let def_id = langcall(bcx.ccx.tcx(), None, "", BoxFreeFnLangItem);
let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty))); let substs = bcx.ccx.tcx().mk_substs(iter::once(Kind::from(content_ty)));
let callee = Callee::def(bcx.ccx, def_id, substs); let callee = Callee::def(bcx.ccx, def_id, substs);
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
@ -93,12 +94,12 @@ pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'t
} }
} }
fn drop_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, args: MaybeSizedValue, t: Ty<'tcx>) { fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: MaybeSizedValue, t: Ty<'tcx>) {
call_drop_glue(bcx, args, t, false, None) call_drop_glue(bcx, args, t, false, None)
} }
pub fn call_drop_glue<'a, 'tcx>( pub fn call_drop_glue<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
mut args: MaybeSizedValue, mut args: MaybeSizedValue,
t: Ty<'tcx>, t: Ty<'tcx>,
skip_dtor: bool, skip_dtor: bool,
@ -232,7 +233,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
} }
ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => {
let shallow_drop = def.is_union(); let shallow_drop = def.is_union();
let tcx = bcx.tcx(); let tcx = bcx.ccx.tcx();
let def = t.ty_adt_def().unwrap(); let def = t.ty_adt_def().unwrap();
@ -245,7 +246,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
// Issue #23611: schedule cleanup of contents, re-inspecting the // Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code. // discriminant (if any) in case of variant swap in drop code.
let contents_scope = if !shallow_drop { let contents_scope = if !shallow_drop {
bcx.fcx().schedule_drop_adt_contents(ptr, t) fcx.schedule_drop_adt_contents(&bcx, ptr, t)
} else { } else {
CleanupScope::noop() CleanupScope::noop()
}; };
@ -264,7 +265,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
let llret; let llret;
let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize]; let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize];
if let Some(landing_pad) = contents_scope.landing_pad { if let Some(landing_pad) = contents_scope.landing_pad {
let normal_bcx = bcx.fcx().build_new_block("normal-return"); let normal_bcx = bcx.build_new_block("normal-return");
llret = bcx.invoke(callee.reify(ccx), args, normal_bcx.llbb(), landing_pad, None); llret = bcx.invoke(callee.reify(ccx), args, normal_bcx.llbb(), landing_pad, None);
bcx = normal_bcx; bcx = normal_bcx;
} else { } else {
@ -288,8 +289,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
bcx.ret_void(); bcx.ret_void();
} }
pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef)
t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) { -> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {:?}", debug!("calculate size of DST: {}; with lost info: {:?}",
t, Value(info)); t, Value(info));
@ -331,7 +331,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// Recurse to get the size of the dynamically sized field (must be // Recurse to get the size of the dynamically sized field (must be
// the last field). // the last field).
let last_field = def.struct_variant().fields.last().unwrap(); let last_field = def.struct_variant().fields.last().unwrap();
let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field); let field_ty = monomorphize::field_ty(bcx.ccx.tcx(), substs, last_field);
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
// FIXME (#26403, #27023): We should be adding padding // FIXME (#26403, #27023): We should be adding padding
@ -383,7 +383,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
(bcx.load(size_ptr), bcx.load(align_ptr)) (bcx.load(size_ptr), bcx.load(align_ptr))
} }
ty::TySlice(_) | ty::TyStr => { ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx()); let unit_ty = t.sequence_element_type(bcx.ccx.tcx());
// The info in this case is the length of the str, so the size is that // The info in this case is the length of the str, so the size is that
// times the unit size. // times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx, unit_ty); let llunit_ty = sizing_type_of(bcx.ccx, unit_ty);
@ -397,16 +397,16 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
} }
// Iterates through the elements of a structural type, dropping them. // Iterates through the elements of a structural type, dropping them.
fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>,
ptr: MaybeSizedValue, ptr: MaybeSizedValue,
t: Ty<'tcx>) t: Ty<'tcx>)
-> BlockAndBuilder<'a, 'tcx> { -> Builder<'a, 'tcx> {
fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
t: Ty<'tcx>, t: Ty<'tcx>,
av: adt::MaybeSizedValue, av: adt::MaybeSizedValue,
variant: &'tcx ty::VariantDef, variant: &'tcx ty::VariantDef,
substs: &Substs<'tcx>) { substs: &Substs<'tcx>) {
let tcx = cx.tcx(); let tcx = cx.ccx.tcx();
for (i, field) in variant.fields.iter().enumerate() { for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field); let arg = monomorphize::field_ty(tcx, substs, field);
let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i); let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i);
@ -417,7 +417,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>,
let mut cx = cx; let mut cx = cx;
match t.sty { match t.sty {
ty::TyClosure(def_id, substs) => { ty::TyClosure(def_id, substs) => {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { for (i, upvar_ty) in substs.upvar_tys(def_id, cx.ccx.tcx()).enumerate() {
let llupvar = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i); let llupvar = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i);
drop_ty(&cx, MaybeSizedValue::sized(llupvar), upvar_ty); drop_ty(&cx, MaybeSizedValue::sized(llupvar), upvar_ty);
} }
@ -425,12 +425,12 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>,
ty::TyArray(_, n) => { ty::TyArray(_, n) => {
let base = get_dataptr(&cx, ptr.value); let base = get_dataptr(&cx, ptr.value);
let len = C_uint(cx.ccx, n); let len = C_uint(cx.ccx, n);
let unit_ty = t.sequence_element_type(cx.tcx()); let unit_ty = t.sequence_element_type(cx.ccx.tcx());
cx = tvec::slice_for_each(&cx, base, unit_ty, len, cx = tvec::slice_for_each(&cx, base, unit_ty, len,
|bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty));
} }
ty::TySlice(_) | ty::TyStr => { ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(cx.tcx()); let unit_ty = t.sequence_element_type(cx.ccx.tcx());
cx = tvec::slice_for_each(&cx, ptr.value, unit_ty, ptr.meta, cx = tvec::slice_for_each(&cx, ptr.value, unit_ty, ptr.meta,
|bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty));
} }
@ -442,7 +442,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>,
} }
ty::TyAdt(adt, substs) => match adt.adt_kind() { ty::TyAdt(adt, substs) => match adt.adt_kind() {
AdtKind::Struct => { AdtKind::Struct => {
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.ccx.tcx(), t, None);
for (i, &Field(_, field_ty)) in fields.iter().enumerate() { for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr::from(discr), i); let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr::from(discr), i);
let ptr = if cx.ccx.shared().type_is_sized(field_ty) { let ptr = if cx.ccx.shared().type_is_sized(field_ty) {
@ -470,7 +470,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>,
} }
} }
(adt::BranchKind::Switch, Some(lldiscrim_a)) => { (adt::BranchKind::Switch, Some(lldiscrim_a)) => {
let tcx = cx.tcx(); let tcx = cx.ccx.tcx();
drop_ty(&cx, MaybeSizedValue::sized(lldiscrim_a), tcx.types.isize); drop_ty(&cx, MaybeSizedValue::sized(lldiscrim_a), tcx.types.isize);
// Create a fall-through basic block for the "else" case of // Create a fall-through basic block for the "else" case of
@ -486,15 +486,15 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>,
// from the outer function, and any other use case will only // from the outer function, and any other use case will only
// call this for an already-valid enum in which case the `ret // call this for an already-valid enum in which case the `ret
// void` will never be hit. // void` will never be hit.
let ret_void_cx = cx.fcx().build_new_block("enum-iter-ret-void"); let ret_void_cx = cx.build_new_block("enum-iter-ret-void");
ret_void_cx.ret_void(); ret_void_cx.ret_void();
let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants); let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants);
let next_cx = cx.fcx().build_new_block("enum-iter-next"); let next_cx = cx.build_new_block("enum-iter-next");
for variant in &adt.variants { for variant in &adt.variants {
let variant_cx_name = format!("enum-iter-variant-{}", let variant_cx_name = format!("enum-iter-variant-{}",
&variant.disr_val.to_string()); &variant.disr_val.to_string());
let variant_cx = cx.fcx().build_new_block(&variant_cx_name); let variant_cx = cx.build_new_block(&variant_cx_name);
let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val));
variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); variant_cx.add_case(llswitch, case_val, variant_cx.llbb());
iter_variant(&variant_cx, t, ptr, variant, substs); iter_variant(&variant_cx, t, ptr, variant, substs);
@ -508,7 +508,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>,
}, },
_ => { _ => {
cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t)) cx.ccx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
} }
} }
return cx; return cx;

View File

@ -28,6 +28,7 @@ use Disr;
use rustc::hir; use rustc::hir;
use syntax::ast; use syntax::ast;
use syntax::symbol::Symbol; use syntax::symbol::Symbol;
use builder::Builder;
use rustc::session::Session; use rustc::session::Session;
use syntax_pos::Span; use syntax_pos::Span;
@ -87,14 +88,15 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
/// add them to librustc_trans/trans/context.rs /// add them to librustc_trans/trans/context.rs
pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
callee_ty: Ty<'tcx>, callee_ty: Ty<'tcx>,
fn_ty: &FnType, fn_ty: &FnType,
llargs: &[ValueRef], llargs: &[ValueRef],
llresult: ValueRef, llresult: ValueRef,
span: Span) { span: Span) {
let ccx = bcx.ccx; let ccx = bcx.ccx;
let tcx = bcx.tcx(); let tcx = ccx.tcx();
let (def_id, substs, fty) = match callee_ty.sty { let (def_id, substs, fty) = match callee_ty.sty {
ty::TyFnDef(def_id, substs, ref fty) => (def_id, substs, fty), ty::TyFnDef(def_id, substs, ref fty) => (def_id, substs, fty),
@ -125,7 +127,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None) bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None)
} }
"try" => { "try" => {
try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult); try_intrinsic(bcx, fcx, llargs[0], llargs[1], llargs[2], llresult);
C_nil(ccx) C_nil(ccx)
} }
"breakpoint" => { "breakpoint" => {
@ -533,7 +535,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// qux` to be converted into `foo, bar, baz, qux`, integer // qux` to be converted into `foo, bar, baz, qux`, integer
// arguments to be truncated as needed and pointers to be // arguments to be truncated as needed and pointers to be
// cast. // cast.
fn modify_as_needed<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
t: &intrinsics::Type, t: &intrinsics::Type,
arg_type: Ty<'tcx>, arg_type: Ty<'tcx>,
llarg: ValueRef) llarg: ValueRef)
@ -634,7 +636,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
} }
} }
fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
allow_overlap: bool, allow_overlap: bool,
volatile: bool, volatile: bool,
tp_ty: Ty<'tcx>, tp_ty: Ty<'tcx>,
@ -670,7 +672,7 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
} }
fn memset_intrinsic<'a, 'tcx>( fn memset_intrinsic<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
volatile: bool, volatile: bool,
ty: Ty<'tcx>, ty: Ty<'tcx>,
dst: ValueRef, dst: ValueRef,
@ -686,19 +688,20 @@ fn memset_intrinsic<'a, 'tcx>(
} }
fn try_intrinsic<'a, 'tcx>( fn try_intrinsic<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
func: ValueRef, func: ValueRef,
data: ValueRef, data: ValueRef,
local_ptr: ValueRef, local_ptr: ValueRef,
dest: ValueRef, dest: ValueRef,
) { ) {
if bcx.sess().no_landing_pads() { if bcx.ccx.sess().no_landing_pads() {
bcx.call(func, &[data], None); bcx.call(func, &[data], None);
bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None); bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None);
} else if wants_msvc_seh(bcx.sess()) { } else if wants_msvc_seh(bcx.sess()) {
trans_msvc_try(bcx, func, data, local_ptr, dest); trans_msvc_try(bcx, fcx, func, data, local_ptr, dest);
} else { } else {
trans_gnu_try(bcx, func, data, local_ptr, dest); trans_gnu_try(bcx, fcx, func, data, local_ptr, dest);
} }
} }
@ -709,24 +712,25 @@ fn try_intrinsic<'a, 'tcx>(
// instructions are meant to work for all targets, as of the time of this // instructions are meant to work for all targets, as of the time of this
// writing, however, LLVM does not recommend the usage of these new instructions // writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized. // as the old ones are still more optimized.
fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
func: ValueRef, func: ValueRef,
data: ValueRef, data: ValueRef,
local_ptr: ValueRef, local_ptr: ValueRef,
dest: ValueRef) { dest: ValueRef) {
let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { let llfn = get_rust_try_fn(fcx, &mut |bcx| {
let ccx = bcx.ccx; let ccx = bcx.ccx;
bcx.set_personality_fn(bcx.ccx.eh_personality()); bcx.set_personality_fn(bcx.ccx.eh_personality());
let normal = bcx.fcx().build_new_block("normal"); let normal = bcx.build_new_block("normal");
let catchswitch = bcx.fcx().build_new_block("catchswitch"); let catchswitch = bcx.build_new_block("catchswitch");
let catchpad = bcx.fcx().build_new_block("catchpad"); let catchpad = bcx.build_new_block("catchpad");
let caught = bcx.fcx().build_new_block("caught"); let caught = bcx.build_new_block("caught");
let func = llvm::get_param(bcx.fcx().llfn, 0); let func = llvm::get_param(bcx.llfn(), 0);
let data = llvm::get_param(bcx.fcx().llfn, 1); let data = llvm::get_param(bcx.llfn(), 1);
let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); let local_ptr = llvm::get_param(bcx.llfn(), 2);
// We're generating an IR snippet that looks like: // We're generating an IR snippet that looks like:
// //
@ -768,7 +772,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// //
// More information can be found in libstd's seh.rs implementation. // More information can be found in libstd's seh.rs implementation.
let i64p = Type::i64(ccx).ptr_to(); let i64p = Type::i64(ccx).ptr_to();
let slot = bcx.fcx().alloca(i64p, "slot"); let slot = bcx.alloca(i64p, "slot");
bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
None); None);
@ -812,12 +816,13 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// function calling it, and that function may already have other personality // function calling it, and that function may already have other personality
// functions in play. By calling a shim we're guaranteed that our shim will have // functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function. // the right personality function.
fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
func: ValueRef, func: ValueRef,
data: ValueRef, data: ValueRef,
local_ptr: ValueRef, local_ptr: ValueRef,
dest: ValueRef) { dest: ValueRef) {
let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { let llfn = get_rust_try_fn(fcx, &mut |bcx| {
let ccx = bcx.ccx; let ccx = bcx.ccx;
// Translates the shims described above: // Translates the shims described above:
@ -837,12 +842,12 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// expected to be `*mut *mut u8` for this to actually work, but that's // expected to be `*mut *mut u8` for this to actually work, but that's
// managed by the standard library. // managed by the standard library.
let then = bcx.fcx().build_new_block("then"); let then = bcx.build_new_block("then");
let catch = bcx.fcx().build_new_block("catch"); let catch = bcx.build_new_block("catch");
let func = llvm::get_param(bcx.fcx().llfn, 0); let func = llvm::get_param(bcx.llfn(), 0);
let data = llvm::get_param(bcx.fcx().llfn, 1); let data = llvm::get_param(bcx.llfn(), 1);
let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); let local_ptr = llvm::get_param(bcx.llfn(), 2);
bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None); bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
then.ret(C_i32(ccx, 0)); then.ret(C_i32(ccx, 0));
@ -854,7 +859,7 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// rust_try ignores the selector. // rust_try ignores the selector.
let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
false); false);
let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.fcx().llfn); let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.llfn());
catch.add_clause(vals, C_null(Type::i8p(ccx))); catch.add_clause(vals, C_null(Type::i8p(ccx)));
let ptr = catch.extract_value(vals, 0); let ptr = catch.extract_value(vals, 0);
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None); catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None);
@ -873,7 +878,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
name: &str, name: &str,
inputs: Vec<Ty<'tcx>>, inputs: Vec<Ty<'tcx>>,
output: Ty<'tcx>, output: Ty<'tcx>,
trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
-> ValueRef { -> ValueRef {
let ccx = fcx.ccx; let ccx = fcx.ccx;
let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false); let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false);
@ -894,7 +899,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
// //
// This function is only generated once and is then cached. // This function is only generated once and is then cached.
fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
-> ValueRef { -> ValueRef {
let ccx = fcx.ccx; let ccx = fcx.ccx;
if let Some(llfn) = ccx.rust_try_fn().get() { if let Some(llfn) = ccx.rust_try_fn().get() {
@ -920,7 +925,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
} }
fn generic_simd_intrinsic<'a, 'tcx>( fn generic_simd_intrinsic<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
name: &str, name: &str,
callee_ty: Ty<'tcx>, callee_ty: Ty<'tcx>,
llargs: &[ValueRef], llargs: &[ValueRef],
@ -935,7 +940,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
}; };
($msg: tt, $($fmt: tt)*) => { ($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error( span_invalid_monomorphization_error(
bcx.sess(), span, bcx.ccx.sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
$msg), $msg),
name, $($fmt)*)); name, $($fmt)*));
@ -957,7 +962,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
let tcx = bcx.tcx(); let tcx = bcx.ccx.tcx();
let sig = tcx.erase_late_bound_regions_and_normalize(callee_ty.fn_sig()); let sig = tcx.erase_late_bound_regions_and_normalize(callee_ty.fn_sig());
let arg_tys = sig.inputs(); let arg_tys = sig.inputs();

View File

@ -13,6 +13,7 @@ use llvm::{ValueRef, get_params};
use rustc::traits; use rustc::traits;
use callee::{Callee, CalleeData}; use callee::{Callee, CalleeData};
use common::*; use common::*;
use builder::Builder;
use consts; use consts;
use declare; use declare;
use glue; use glue;
@ -27,7 +28,7 @@ use rustc::ty;
const VTABLE_OFFSET: usize = 3; const VTABLE_OFFSET: usize = 3;
/// Extracts a method from a trait object's vtable, at the specified index. /// Extracts a method from a trait object's vtable, at the specified index.
pub fn get_virtual_method<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, pub fn get_virtual_method<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
llvtable: ValueRef, llvtable: ValueRef,
vtable_index: usize) vtable_index: usize)
-> ValueRef { -> ValueRef {

View File

@ -17,7 +17,8 @@ use abi::{Abi, FnType, ArgType};
use adt::{self, MaybeSizedValue}; use adt::{self, MaybeSizedValue};
use base::{self, Lifetime}; use base::{self, Lifetime};
use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual};
use common::{self, BlockAndBuilder, Funclet}; use builder::Builder;
use common::{self, Funclet};
use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef};
use consts; use consts;
use Disr; use Disr;
@ -57,7 +58,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); let cleanup_pad = funclet.map(|lp| lp.cleanuppad());
let cleanup_bundle = funclet.map(|l| l.bundle()); let cleanup_bundle = funclet.map(|l| l.bundle());
let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| { let funclet_br = |this: &Self, bcx: Builder, bb: mir::BasicBlock| {
let lltarget = this.blocks[bb]; let lltarget = this.blocks[bb];
if let Some(cp) = cleanup_pad { if let Some(cp) = cleanup_pad {
match this.cleanup_kinds[bb] { match this.cleanup_kinds[bb] {
@ -74,7 +75,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
}; };
let llblock = |this: &mut Self, target: mir::BasicBlock| { let llblock = |this: &mut Self, bcx: &Builder, target: mir::BasicBlock| {
let lltarget = this.blocks[target]; let lltarget = this.blocks[target];
if let Some(cp) = cleanup_pad { if let Some(cp) = cleanup_pad {
@ -84,7 +85,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
debug!("llblock: creating cleanup trampoline for {:?}", target); debug!("llblock: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
let trampoline = this.fcx.build_new_block(name); let trampoline = bcx.build_new_block(name);
trampoline.cleanup_ret(cp, Some(lltarget)); trampoline.cleanup_ret(cp, Some(lltarget));
trampoline.llbb() trampoline.llbb()
} }
@ -121,7 +122,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let ps = self.get_personality_slot(&bcx); let ps = self.get_personality_slot(&bcx);
let lp = bcx.load(ps); let lp = bcx.load(ps);
Lifetime::End.call(&bcx, ps); Lifetime::End.call(&bcx, ps);
if !bcx.sess().target.target.options.custom_unwind_resume { if !bcx.ccx.sess().target.target.options.custom_unwind_resume {
bcx.resume(lp); bcx.resume(lp);
} else { } else {
let exc_ptr = bcx.extract_value(lp, 0); let exc_ptr = bcx.extract_value(lp, 0);
@ -138,14 +139,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => { mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => {
let cond = self.trans_operand(&bcx, cond); let cond = self.trans_operand(&bcx, cond);
let lltrue = llblock(self, true_bb); let lltrue = llblock(self, &bcx, true_bb);
let llfalse = llblock(self, false_bb); let llfalse = llblock(self, &bcx, false_bb);
bcx.cond_br(cond.immediate(), lltrue, llfalse); bcx.cond_br(cond.immediate(), lltrue, llfalse);
} }
mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => { mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => {
let discr_lvalue = self.trans_lvalue(&bcx, discr); let discr_lvalue = self.trans_lvalue(&bcx, discr);
let ty = discr_lvalue.ty.to_ty(bcx.tcx()); let ty = discr_lvalue.ty.to_ty(bcx.ccx.tcx());
let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true); let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true);
let mut bb_hist = FxHashMap(); let mut bb_hist = FxHashMap();
@ -158,7 +159,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// code. This is especially helpful in cases like an if-let on a huge enum. // code. This is especially helpful in cases like an if-let on a huge enum.
// Note: This optimization is only valid for exhaustive matches. // Note: This optimization is only valid for exhaustive matches.
Some((&&bb, &c)) if c > targets.len() / 2 => { Some((&&bb, &c)) if c > targets.len() / 2 => {
(Some(bb), llblock(self, bb)) (Some(bb), llblock(self, &bcx, bb))
} }
// We're generating an exhaustive switch, so the else branch // We're generating an exhaustive switch, so the else branch
// can't be hit. Branching to an unreachable instruction // can't be hit. Branching to an unreachable instruction
@ -169,7 +170,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
assert_eq!(adt_def.variants.len(), targets.len()); assert_eq!(adt_def.variants.len(), targets.len());
for (adt_variant, &target) in adt_def.variants.iter().zip(targets) { for (adt_variant, &target) in adt_def.variants.iter().zip(targets) {
if default_bb != Some(target) { if default_bb != Some(target) {
let llbb = llblock(self, target); let llbb = llblock(self, &bcx, target);
let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val)); let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val));
bcx.add_case(switch, llval, llbb) bcx.add_case(switch, llval, llbb)
} }
@ -180,10 +181,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let (otherwise, targets) = targets.split_last().unwrap(); let (otherwise, targets) = targets.split_last().unwrap();
let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
let discr = base::to_immediate(&bcx, discr, switch_ty); let discr = base::to_immediate(&bcx, discr, switch_ty);
let switch = bcx.switch(discr, llblock(self, *otherwise), values.len()); let switch = bcx.switch(discr, llblock(self, &bcx, *otherwise), values.len());
for (value, target) in values.iter().zip(targets) { for (value, target) in values.iter().zip(targets) {
let val = Const::from_constval(bcx.ccx, value.clone(), switch_ty); let val = Const::from_constval(bcx.ccx, value.clone(), switch_ty);
let llbb = llblock(self, *target); let llbb = llblock(self, &bcx, *target);
bcx.add_case(switch, val.llval, llbb) bcx.add_case(switch, val.llval, llbb)
} }
} }
@ -202,7 +203,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
LocalRef::Lvalue(tr_lvalue) => { LocalRef::Lvalue(tr_lvalue) => {
OperandRef { OperandRef {
val: Ref(tr_lvalue.llval), val: Ref(tr_lvalue.llval),
ty: tr_lvalue.ty.to_ty(bcx.tcx()) ty: tr_lvalue.ty.to_ty(bcx.ccx.tcx())
} }
} }
}; };
@ -232,7 +233,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
mir::TerminatorKind::Drop { ref location, target, unwind } => { mir::TerminatorKind::Drop { ref location, target, unwind } => {
let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx()); let ty = location.ty(&self.mir, bcx.ccx.tcx()).to_ty(bcx.ccx.tcx());
let ty = self.monomorphize(&ty); let ty = self.monomorphize(&ty);
// Double check for necessity to drop // Double check for necessity to drop
@ -260,7 +261,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
drop_fn, drop_fn,
args, args,
self.blocks[target], self.blocks[target],
llblock(self, unwind), llblock(self, &bcx, unwind),
cleanup_bundle cleanup_bundle
); );
} else { } else {
@ -300,7 +301,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx, expected)], None); let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx, expected)], None);
// Create the failure block and the conditional branch to it. // Create the failure block and the conditional branch to it.
let lltarget = llblock(self, target); let lltarget = llblock(self, &bcx, target);
let panic_block = self.fcx.build_new_block("panic"); let panic_block = self.fcx.build_new_block("panic");
if expected { if expected {
bcx.cond_br(cond, lltarget, panic_block.llbb()); bcx.cond_br(cond, lltarget, panic_block.llbb());
@ -313,7 +314,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
self.set_debug_loc(&bcx, terminator.source_info); self.set_debug_loc(&bcx, terminator.source_info);
// Get the location information. // Get the location information.
let loc = bcx.sess().codemap().lookup_char_pos(span.lo); let loc = bcx.ccx.sess().codemap().lookup_char_pos(span.lo);
let filename = Symbol::intern(&loc.file.name).as_str(); let filename = Symbol::intern(&loc.file.name).as_str();
let filename = C_str_slice(bcx.ccx, filename); let filename = C_str_slice(bcx.ccx, filename);
let line = C_u32(bcx.ccx, loc.line as u32); let line = C_u32(bcx.ccx, loc.line as u32);
@ -363,15 +364,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
if const_cond == Some(!expected) { if const_cond == Some(!expected) {
if let Some(err) = const_err { if let Some(err) = const_err {
let err = ConstEvalErr{ span: span, kind: err }; let err = ConstEvalErr{ span: span, kind: err };
let mut diag = bcx.tcx().sess.struct_span_warn( let mut diag = bcx.ccx.tcx().sess.struct_span_warn(
span, "this expression will panic at run-time"); span, "this expression will panic at run-time");
note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); note_const_eval_err(bcx.ccx.tcx(), &err, span, "expression", &mut diag);
diag.emit(); diag.emit();
} }
} }
// Obtain the panic entry point. // Obtain the panic entry point.
let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item); let def_id = common::langcall(bcx.ccx.tcx(), Some(span), "", lang_item);
let callee = Callee::def(bcx.ccx, def_id, let callee = Callee::def(bcx.ccx, def_id,
bcx.ccx.empty_substs_for_def_id(def_id)); bcx.ccx.empty_substs_for_def_id(def_id));
let llfn = callee.reify(bcx.ccx); let llfn = callee.reify(bcx.ccx);
@ -381,7 +382,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
bcx.invoke(llfn, bcx.invoke(llfn,
&args, &args,
self.unreachable_block(), self.unreachable_block(),
llblock(self, unwind), llblock(self, &bcx, unwind),
cleanup_bundle); cleanup_bundle);
} else { } else {
bcx.call(llfn, &args, cleanup_bundle); bcx.call(llfn, &args, cleanup_bundle);
@ -410,12 +411,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
_ => bug!("{} is not callable", callee.ty) _ => bug!("{} is not callable", callee.ty)
}; };
let sig = bcx.tcx().erase_late_bound_regions_and_normalize(sig); let sig = bcx.ccx.tcx().erase_late_bound_regions_and_normalize(sig);
// Handle intrinsics old trans wants Expr's for, ourselves. // Handle intrinsics old trans wants Expr's for, ourselves.
let intrinsic = match (&callee.ty.sty, &callee.data) { let intrinsic = match (&callee.ty.sty, &callee.data) {
(&ty::TyFnDef(def_id, ..), &Intrinsic) => { (&ty::TyFnDef(def_id, ..), &Intrinsic) => {
Some(bcx.tcx().item_name(def_id).as_str()) Some(bcx.ccx.tcx().item_name(def_id).as_str())
} }
_ => None _ => None
}; };
@ -443,7 +444,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let extra_args = &args[sig.inputs().len()..]; let extra_args = &args[sig.inputs().len()..];
let extra_args = extra_args.iter().map(|op_arg| { let extra_args = extra_args.iter().map(|op_arg| {
let op_ty = op_arg.ty(&self.mir, bcx.tcx()); let op_ty = op_arg.ty(&self.mir, bcx.ccx.tcx());
self.monomorphize(&op_ty) self.monomorphize(&op_ty)
}).collect::<Vec<_>>(); }).collect::<Vec<_>>();
let fn_ty = callee.direct_fn_type(bcx.ccx, &extra_args); let fn_ty = callee.direct_fn_type(bcx.ccx, &extra_args);
@ -545,7 +546,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
bug!("Cannot use direct operand with an intrinsic call") bug!("Cannot use direct operand with an intrinsic call")
}; };
trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest, trans_intrinsic_call(&bcx, self.fcx, callee.ty, &fn_ty, &llargs, dest,
terminator.source_info.span); terminator.source_info.span);
if let ReturnDest::IndirectOperand(dst, _) = ret_dest { if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
@ -579,20 +580,20 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let invokeret = bcx.invoke(fn_ptr, let invokeret = bcx.invoke(fn_ptr,
&llargs, &llargs,
ret_bcx, ret_bcx,
llblock(self, cleanup), llblock(self, &bcx, cleanup),
cleanup_bundle); cleanup_bundle);
fn_ty.apply_attrs_callsite(invokeret); fn_ty.apply_attrs_callsite(invokeret);
if let Some((_, target)) = *destination { if let Some((_, target)) = *destination {
let ret_bcx = self.build_block(target); let ret_bcx = self.build_block(target);
ret_bcx.at_start(|ret_bcx| { ret_bcx.position_at_start(ret_bcx.llbb());
self.set_debug_loc(&ret_bcx, terminator.source_info); self.set_debug_loc(&ret_bcx, terminator.source_info);
let op = OperandRef { let op = OperandRef {
val: Immediate(invokeret), val: Immediate(invokeret),
ty: sig.output(), ty: sig.output(),
}; };
self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op);
}); ret_bcx.position_at_end(ret_bcx.llbb());
} }
} else { } else {
let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle);
@ -613,7 +614,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
fn trans_argument(&mut self, fn trans_argument(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
op: OperandRef<'tcx>, op: OperandRef<'tcx>,
llargs: &mut Vec<ValueRef>, llargs: &mut Vec<ValueRef>,
fn_ty: &FnType, fn_ty: &FnType,
@ -634,7 +635,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let imm_op = |x| OperandRef { let imm_op = |x| OperandRef {
val: Immediate(x), val: Immediate(x),
// We won't be checking the type again. // We won't be checking the type again.
ty: bcx.tcx().types.err ty: bcx.ccx.tcx().types.err
}; };
self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, callee); self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee); self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee);
@ -689,7 +690,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
fn trans_arguments_untupled(&mut self, fn trans_arguments_untupled(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
operand: &mir::Operand<'tcx>, operand: &mir::Operand<'tcx>,
llargs: &mut Vec<ValueRef>, llargs: &mut Vec<ValueRef>,
fn_ty: &FnType, fn_ty: &FnType,
@ -765,13 +766,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> ValueRef { fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> ValueRef {
let ccx = bcx.ccx; let ccx = bcx.ccx;
if let Some(slot) = self.llpersonalityslot { if let Some(slot) = self.llpersonalityslot {
slot slot
} else { } else {
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
let slot = bcx.fcx().alloca(llretty, "personalityslot"); let slot = bcx.alloca(llretty, "personalityslot");
self.llpersonalityslot = Some(slot); self.llpersonalityslot = Some(slot);
Lifetime::Start.call(bcx, slot); Lifetime::Start.call(bcx, slot);
slot slot
@ -815,11 +816,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}) })
} }
pub fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'a, 'tcx> { pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> {
BlockAndBuilder::new(self.blocks[bb], self.fcx) let builder = Builder::with_ccx(self.fcx.ccx);
builder.position_at_end(self.blocks[bb]);
builder
} }
fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>,
dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType,
llargs: &mut Vec<ValueRef>, is_intrinsic: bool) -> ReturnDest { llargs: &mut Vec<ValueRef>, is_intrinsic: bool) -> ReturnDest {
// If the return is ignored, we can just return a do-nothing ReturnDest // If the return is ignored, we can just return a do-nothing ReturnDest
@ -836,14 +839,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
return if fn_ret_ty.is_indirect() { return if fn_ret_ty.is_indirect() {
// Odd, but possible, case, we have an operand temporary, // Odd, but possible, case, we have an operand temporary,
// but the calling convention has an indirect return. // but the calling convention has an indirect return.
let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); let tmp = bcx.alloca_ty(ret_ty, "tmp_ret");
llargs.push(tmp); llargs.push(tmp);
ReturnDest::IndirectOperand(tmp, index) ReturnDest::IndirectOperand(tmp, index)
} else if is_intrinsic { } else if is_intrinsic {
// Currently, intrinsics always need a location to store // Currently, intrinsics always need a location to store
// the result. so we create a temporary alloca for the // the result. so we create a temporary alloca for the
// result // result
let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); let tmp = bcx.alloca_ty(ret_ty, "tmp_ret");
ReturnDest::IndirectOperand(tmp, index) ReturnDest::IndirectOperand(tmp, index)
} else { } else {
ReturnDest::DirectOperand(index) ReturnDest::DirectOperand(index)
@ -864,17 +867,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
} }
fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>,
src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) {
let mut val = self.trans_operand(bcx, src); let mut val = self.trans_operand(bcx, src);
if let ty::TyFnDef(def_id, substs, _) = val.ty.sty { if let ty::TyFnDef(def_id, substs, _) = val.ty.sty {
let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.tcx())); let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.ccx.tcx()));
let out_type_size = llbitsize_of_real(bcx.ccx, llouttype); let out_type_size = llbitsize_of_real(bcx.ccx, llouttype);
if out_type_size != 0 { if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle. // FIXME #19925 Remove this hack after a release cycle.
let f = Callee::def(bcx.ccx, def_id, substs); let f = Callee::def(bcx.ccx, def_id, substs);
let ty = match f.ty.sty { let ty = match f.ty.sty {
ty::TyFnDef(.., f) => bcx.tcx().mk_fn_ptr(f), ty::TyFnDef(.., f) => bcx.ccx.tcx().mk_fn_ptr(f),
_ => f.ty _ => f.ty
}; };
val = OperandRef { val = OperandRef {
@ -895,7 +898,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Stores the return value of a function call into it's final location. // Stores the return value of a function call into it's final location.
fn store_return(&mut self, fn store_return(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
dest: ReturnDest, dest: ReturnDest,
ret_ty: ArgType, ret_ty: ArgType,
op: OperandRef<'tcx>) { op: OperandRef<'tcx>) {
@ -911,7 +914,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
DirectOperand(index) => { DirectOperand(index) => {
// If there is a cast, we have to store and reload. // If there is a cast, we have to store and reload.
let op = if ret_ty.cast.is_some() { let op = if ret_ty.cast.is_some() {
let tmp = base::alloc_ty(bcx, op.ty, "tmp_ret"); let tmp = bcx.alloca_ty(op.ty, "tmp_ret");
ret_ty.store(bcx, op.immediate(), tmp); ret_ty.store(bcx, op.immediate(), tmp);
self.trans_load(bcx, tmp, op.ty) self.trans_load(bcx, tmp, op.ty)
} else { } else {

View File

@ -24,10 +24,11 @@ use rustc::ty::subst::Substs;
use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use {abi, adt, base, Disr, machine}; use {abi, adt, base, Disr, machine};
use callee::Callee; use callee::Callee;
use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty}; use builder::Builder;
use common::{self, CrateContext, const_get_elt, val_ty};
use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral, C_big_integral}; use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral, C_big_integral};
use common::{C_null, C_struct, C_str_slice, C_undef, C_uint}; use common::{C_null, C_struct, C_str_slice, C_undef, C_uint};
use common::{const_to_opt_u128}; use common::const_to_opt_u128;
use consts; use consts;
use monomorphize::{self, Instance}; use monomorphize::{self, Instance};
use type_of; use type_of;
@ -900,7 +901,7 @@ pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
impl<'a, 'tcx> MirContext<'a, 'tcx> { impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_constant(&mut self, pub fn trans_constant(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
constant: &mir::Constant<'tcx>) constant: &mir::Constant<'tcx>)
-> Const<'tcx> -> Const<'tcx>
{ {

View File

@ -14,8 +14,8 @@ use rustc::mir;
use rustc::mir::tcx::LvalueTy; use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::indexed_vec::Idx;
use adt; use adt;
use base; use builder::Builder;
use common::{self, BlockAndBuilder, CrateContext, C_uint, C_undef}; use common::{self, CrateContext, C_uint, C_undef};
use consts; use consts;
use machine; use machine;
use type_of::type_of; use type_of::type_of;
@ -44,16 +44,6 @@ impl<'tcx> LvalueRef<'tcx> {
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty } LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
} }
pub fn alloca<'a>(bcx: &BlockAndBuilder<'a, 'tcx>,
ty: Ty<'tcx>,
name: &str)
-> LvalueRef<'tcx>
{
assert!(!ty.has_erasable_regions());
let lltemp = base::alloc_ty(bcx, ty, name);
LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
}
pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
let ty = self.ty.to_ty(ccx.tcx()); let ty = self.ty.to_ty(ccx.tcx());
match ty.sty { match ty.sty {
@ -69,13 +59,13 @@ impl<'tcx> LvalueRef<'tcx> {
impl<'a, 'tcx> MirContext<'a, 'tcx> { impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_lvalue(&mut self, pub fn trans_lvalue(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Lvalue<'tcx>) lvalue: &mir::Lvalue<'tcx>)
-> LvalueRef<'tcx> { -> LvalueRef<'tcx> {
debug!("trans_lvalue(lvalue={:?})", lvalue); debug!("trans_lvalue(lvalue={:?})", lvalue);
let ccx = bcx.ccx; let ccx = bcx.ccx;
let tcx = bcx.tcx(); let tcx = ccx.tcx();
if let mir::Lvalue::Local(index) = *lvalue { if let mir::Lvalue::Local(index) = *lvalue {
match self.locals[index] { match self.locals[index] {
@ -177,7 +167,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let llindex = C_uint(bcx.ccx, from); let llindex = C_uint(bcx.ccx, from);
let llbase = project_index(llindex); let llbase = project_index(llindex);
let base_ty = tr_base.ty.to_ty(bcx.tcx()); let base_ty = tr_base.ty.to_ty(bcx.ccx.tcx());
match base_ty.sty { match base_ty.sty {
ty::TyArray(..) => { ty::TyArray(..) => {
// must cast the lvalue pointer type to the new // must cast the lvalue pointer type to the new
@ -214,7 +204,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Perform an action using the given Lvalue. // Perform an action using the given Lvalue.
// If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot // If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot
// is created first, then used as an operand to update the Lvalue. // is created first, then used as an operand to update the Lvalue.
pub fn with_lvalue_ref<F, U>(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, pub fn with_lvalue_ref<F, U>(&mut self, bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Lvalue<'tcx>, f: F) -> U lvalue: &mir::Lvalue<'tcx>, f: F) -> U
where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U
{ {
@ -223,9 +213,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
LocalRef::Lvalue(lvalue) => f(self, lvalue), LocalRef::Lvalue(lvalue) => f(self, lvalue),
LocalRef::Operand(None) => { LocalRef::Operand(None) => {
let lvalue_ty = self.monomorphized_lvalue_ty(lvalue); let lvalue_ty = self.monomorphized_lvalue_ty(lvalue);
let lvalue = LvalueRef::alloca(bcx, assert!(!lvalue_ty.has_erasable_regions());
lvalue_ty, let lltemp = bcx.alloca_ty(lvalue_ty, "lvalue_temp");
"lvalue_temp"); let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(lvalue_ty));
let ret = f(self, lvalue); let ret = f(self, lvalue);
let op = self.trans_load(bcx, lvalue.llval, lvalue_ty); let op = self.trans_load(bcx, lvalue.llval, lvalue_ty);
self.locals[index] = LocalRef::Operand(Some(op)); self.locals[index] = LocalRef::Operand(Some(op));
@ -254,18 +244,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
/// than we are. /// than we are.
/// ///
/// nmatsakis: is this still necessary? Not sure. /// nmatsakis: is this still necessary? Not sure.
fn prepare_index(&mut self, fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef {
bcx: &BlockAndBuilder<'a, 'tcx>,
llindex: ValueRef)
-> ValueRef
{
let ccx = bcx.ccx;
let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex)); let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex));
let int_size = machine::llbitsize_of_real(bcx.ccx, ccx.int_type()); let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.int_type());
if index_size < int_size { if index_size < int_size {
bcx.zext(llindex, ccx.int_type()) bcx.zext(llindex, bcx.ccx.int_type())
} else if index_size > int_size { } else if index_size > int_size {
bcx.trunc(llindex, ccx.int_type()) bcx.trunc(llindex, bcx.ccx.int_type())
} else { } else {
llindex llindex
} }

View File

@ -19,7 +19,8 @@ use rustc::infer::TransNormalize;
use rustc::ty::TypeFoldable; use rustc::ty::TypeFoldable;
use session::config::FullDebugInfo; use session::config::FullDebugInfo;
use base; use base;
use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet}; use builder::Builder;
use common::{self, CrateContext, FunctionContext, C_null, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
use monomorphize::{self, Instance}; use monomorphize::{self, Instance};
use abi::FnType; use abi::FnType;
@ -106,7 +107,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value) monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value)
} }
pub fn set_debug_loc(&mut self, bcx: &BlockAndBuilder, source_info: mir::SourceInfo) { pub fn set_debug_loc(&mut self, bcx: &Builder, source_info: mir::SourceInfo) {
let (scope, span) = self.debug_loc(source_info); let (scope, span) = self.debug_loc(source_info);
debuginfo::set_source_location(&self.debug_context, bcx, scope, span); debuginfo::set_source_location(&self.debug_context, bcx, scope, span);
} }
@ -258,7 +259,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
// User variable // User variable
let source_info = decl.source_info.unwrap(); let source_info = decl.source_info.unwrap();
let debug_scope = mircx.scopes[source_info.scope]; let debug_scope = mircx.scopes[source_info.scope];
let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo; let dbg = debug_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo;
if !lvalue_locals.contains(local.index()) && !dbg { if !lvalue_locals.contains(local.index()) && !dbg {
debug!("alloc: {:?} ({}) -> operand", local, name); debug!("alloc: {:?} ({}) -> operand", local, name);
@ -266,7 +267,9 @@ pub fn trans_mir<'a, 'tcx: 'a>(
} }
debug!("alloc: {:?} ({}) -> lvalue", local, name); debug!("alloc: {:?} ({}) -> lvalue", local, name);
let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str()); assert!(!ty.has_erasable_regions());
let lltemp = bcx.alloca_ty(ty, &name.as_str());
let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty));
if dbg { if dbg {
let (scope, span) = mircx.debug_loc(source_info); let (scope, span) = mircx.debug_loc(source_info);
declare_local(&bcx, &mircx.debug_context, name, ty, scope, declare_local(&bcx, &mircx.debug_context, name, ty, scope,
@ -282,7 +285,9 @@ pub fn trans_mir<'a, 'tcx: 'a>(
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty))) LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
} else if lvalue_locals.contains(local.index()) { } else if lvalue_locals.contains(local.index()) {
debug!("alloc: {:?} -> lvalue", local); debug!("alloc: {:?} -> lvalue", local);
LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local))) assert!(!ty.has_erasable_regions());
let lltemp = bcx.alloca_ty(ty, &format!("{:?}", local));
LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)))
} else { } else {
// If this is an immediate local, we do not create an // If this is an immediate local, we do not create an
// alloca in advance. Instead we wait until we see the // alloca in advance. Instead we wait until we see the
@ -347,20 +352,20 @@ pub fn trans_mir<'a, 'tcx: 'a>(
/// Produce, for each argument, a `ValueRef` pointing at the /// Produce, for each argument, a `ValueRef` pointing at the
/// argument's value. As arguments are lvalues, these are always /// argument's value. As arguments are lvalues, these are always
/// indirect. /// indirect.
fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
mircx: &MirContext<'a, 'tcx>, mircx: &MirContext<'a, 'tcx>,
scopes: &IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>, scopes: &IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>,
lvalue_locals: &BitVector) lvalue_locals: &BitVector)
-> Vec<LocalRef<'tcx>> { -> Vec<LocalRef<'tcx>> {
let mir = mircx.mir; let mir = mircx.mir;
let fcx = bcx.fcx(); let fcx = mircx.fcx;
let tcx = bcx.tcx(); let tcx = bcx.ccx.tcx();
let mut idx = 0; let mut idx = 0;
let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize; let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize;
// Get the argument scope, if it exists and if we need it. // Get the argument scope, if it exists and if we need it.
let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE]; let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE];
let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo { let arg_scope = if arg_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo {
Some(arg_scope.scope_metadata) Some(arg_scope.scope_metadata)
} else { } else {
None None
@ -381,7 +386,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
_ => bug!("spread argument isn't a tuple?!") _ => bug!("spread argument isn't a tuple?!")
}; };
let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index));
for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
let dst = bcx.struct_gep(lltemp, i); let dst = bcx.struct_gep(lltemp, i);
let arg = &mircx.fn_ty.args[idx]; let arg = &mircx.fn_ty.args[idx];
@ -420,7 +425,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
let arg = &mircx.fn_ty.args[idx]; let arg = &mircx.fn_ty.args[idx];
idx += 1; idx += 1;
let llval = if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo { let llval = if arg.is_indirect() && bcx.ccx.sess().opts.debuginfo != FullDebugInfo {
// Don't copy an indirect argument to an alloca, the caller // Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up, unless // already put it in a temporary alloca and gave it up, unless
// we emit extra-debug-info, which requires local allocas :(. // we emit extra-debug-info, which requires local allocas :(.
@ -462,7 +467,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
}; };
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
} else { } else {
let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index));
if common::type_is_fat_ptr(bcx.ccx, arg_ty) { if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
// we pass fat pointers as two words, but we want to // we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words, // represent them internally as a pointer to two words,

View File

@ -14,7 +14,8 @@ use rustc::mir;
use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::indexed_vec::Idx;
use base; use base;
use common::{self, BlockAndBuilder}; use common;
use builder::Builder;
use value::Value; use value::Value;
use type_of; use type_of;
use type_::Type; use type_::Type;
@ -85,8 +86,7 @@ impl<'a, 'tcx> OperandRef<'tcx> {
/// If this operand is a Pair, we return an /// If this operand is a Pair, we return an
/// Immediate aggregate with the two values. /// Immediate aggregate with the two values.
pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
-> OperandRef<'tcx> {
if let OperandValue::Pair(a, b) = self.val { if let OperandValue::Pair(a, b) = self.val {
// Reconstruct the immediate aggregate. // Reconstruct the immediate aggregate.
let llty = type_of::type_of(bcx.ccx, self.ty); let llty = type_of::type_of(bcx.ccx, self.ty);
@ -107,8 +107,7 @@ impl<'a, 'tcx> OperandRef<'tcx> {
/// If this operand is a pair in an Immediate, /// If this operand is a pair in an Immediate,
/// we return a Pair with the two halves. /// we return a Pair with the two halves.
pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) pub fn unpack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
-> OperandRef<'tcx> {
if let OperandValue::Immediate(llval) = self.val { if let OperandValue::Immediate(llval) = self.val {
// Deconstruct the immediate aggregate. // Deconstruct the immediate aggregate.
if common::type_is_imm_pair(bcx.ccx, self.ty) { if common::type_is_imm_pair(bcx.ccx, self.ty) {
@ -136,7 +135,7 @@ impl<'a, 'tcx> OperandRef<'tcx> {
impl<'a, 'tcx> MirContext<'a, 'tcx> { impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_load(&mut self, pub fn trans_load(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
llval: ValueRef, llval: ValueRef,
ty: Ty<'tcx>) ty: Ty<'tcx>)
-> OperandRef<'tcx> -> OperandRef<'tcx>
@ -165,7 +164,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
pub fn trans_consume(&mut self, pub fn trans_consume(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Lvalue<'tcx>) lvalue: &mir::Lvalue<'tcx>)
-> OperandRef<'tcx> -> OperandRef<'tcx>
{ {
@ -212,12 +211,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// for most lvalues, to consume them we just load them // for most lvalues, to consume them we just load them
// out from their home // out from their home
let tr_lvalue = self.trans_lvalue(bcx, lvalue); let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx()); let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx());
self.trans_load(bcx, tr_lvalue.llval, ty) self.trans_load(bcx, tr_lvalue.llval, ty)
} }
pub fn trans_operand(&mut self, pub fn trans_operand(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
operand: &mir::Operand<'tcx>) operand: &mir::Operand<'tcx>)
-> OperandRef<'tcx> -> OperandRef<'tcx>
{ {
@ -242,7 +241,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
pub fn store_operand(&mut self, pub fn store_operand(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
lldest: ValueRef, lldest: ValueRef,
operand: OperandRef<'tcx>, operand: OperandRef<'tcx>,
align: Option<u32>) { align: Option<u32>) {

View File

@ -17,8 +17,9 @@ use middle::lang_items::ExchangeMallocFnLangItem;
use asm; use asm;
use base; use base;
use builder::Builder;
use callee::Callee; use callee::Callee;
use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder}; use common::{self, val_ty, C_bool, C_null, C_uint};
use common::{C_integral}; use common::{C_integral};
use adt; use adt;
use machine; use machine;
@ -35,10 +36,10 @@ use super::lvalue::{LvalueRef};
impl<'a, 'tcx> MirContext<'a, 'tcx> { impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_rvalue(&mut self, pub fn trans_rvalue(&mut self,
bcx: BlockAndBuilder<'a, 'tcx>, bcx: Builder<'a, 'tcx>,
dest: LvalueRef<'tcx>, dest: LvalueRef<'tcx>,
rvalue: &mir::Rvalue<'tcx>) rvalue: &mir::Rvalue<'tcx>)
-> BlockAndBuilder<'a, 'tcx> -> Builder<'a, 'tcx>
{ {
debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})", debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
Value(dest.llval), rvalue); Value(dest.llval), rvalue);
@ -79,7 +80,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// index into the struct, and this case isn't // index into the struct, and this case isn't
// important enough for it. // important enough for it.
debug!("trans_rvalue: creating ugly alloca"); debug!("trans_rvalue: creating ugly alloca");
let lltemp = base::alloc_ty(&bcx, operand.ty, "__unsize_temp"); let lltemp = bcx.alloca_ty(operand.ty, "__unsize_temp");
base::store_ty(&bcx, llval, lltemp, operand.ty); base::store_ty(&bcx, llval, lltemp, operand.ty);
lltemp lltemp
} }
@ -91,7 +92,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::Rvalue::Repeat(ref elem, ref count) => { mir::Rvalue::Repeat(ref elem, ref count) => {
let tr_elem = self.trans_operand(&bcx, elem); let tr_elem = self.trans_operand(&bcx, elem);
let size = count.value.as_u64(bcx.tcx().sess.target.uint_type); let size = count.value.as_u64(bcx.ccx.tcx().sess.target.uint_type);
let size = C_uint(bcx.ccx, size); let size = C_uint(bcx.ccx, size);
let base = base::get_dataptr(&bcx, dest.llval); let base = base::get_dataptr(&bcx, dest.llval);
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| { tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
@ -103,7 +104,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
match *kind { match *kind {
mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
let disr = Disr::from(adt_def.variants[variant_index].disr_val); let disr = Disr::from(adt_def.variants[variant_index].disr_val);
let dest_ty = dest.ty.to_ty(bcx.tcx()); let dest_ty = dest.ty.to_ty(bcx.ccx.tcx());
adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr)); adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr));
for (i, operand) in operands.iter().enumerate() { for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand); let op = self.trans_operand(&bcx, operand);
@ -119,7 +120,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}, },
_ => { _ => {
// If this is a tuple or closure, we need to translate GEP indices. // If this is a tuple or closure, we need to translate GEP indices.
let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx())); let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.ccx.tcx()));
let translation = if let Layout::Univariant { ref variant, .. } = *layout { let translation = if let Layout::Univariant { ref variant, .. } = *layout {
Some(&variant.memory_index) Some(&variant.memory_index)
} else { } else {
@ -149,7 +150,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => { mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
let outputs = outputs.iter().map(|output| { let outputs = outputs.iter().map(|output| {
let lvalue = self.trans_lvalue(&bcx, output); let lvalue = self.trans_lvalue(&bcx, output);
(lvalue.llval, lvalue.ty.to_ty(bcx.tcx())) (lvalue.llval, lvalue.ty.to_ty(bcx.ccx.tcx()))
}).collect(); }).collect();
let input_vals = inputs.iter().map(|input| { let input_vals = inputs.iter().map(|input| {
@ -170,9 +171,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
pub fn trans_rvalue_operand(&mut self, pub fn trans_rvalue_operand(&mut self,
bcx: BlockAndBuilder<'a, 'tcx>, bcx: Builder<'a, 'tcx>,
rvalue: &mir::Rvalue<'tcx>) rvalue: &mir::Rvalue<'tcx>)
-> (BlockAndBuilder<'a, 'tcx>, OperandRef<'tcx>) -> (Builder<'a, 'tcx>, OperandRef<'tcx>)
{ {
assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
@ -344,9 +345,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::Rvalue::Ref(_, bk, ref lvalue) => { mir::Rvalue::Ref(_, bk, ref lvalue) => {
let tr_lvalue = self.trans_lvalue(&bcx, lvalue); let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx()); let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx());
let ref_ty = bcx.tcx().mk_ref( let ref_ty = bcx.ccx.tcx().mk_ref(
bcx.tcx().mk_region(ty::ReErased), bcx.ccx.tcx().mk_region(ty::ReErased),
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() } ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
); );
@ -371,7 +372,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let tr_lvalue = self.trans_lvalue(&bcx, lvalue); let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
let operand = OperandRef { let operand = OperandRef {
val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)), val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)),
ty: bcx.tcx().types.usize, ty: bcx.ccx.tcx().types.usize,
}; };
(bcx, operand) (bcx, operand)
} }
@ -398,7 +399,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}; };
let operand = OperandRef { let operand = OperandRef {
val: OperandValue::Immediate(llresult), val: OperandValue::Immediate(llresult),
ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty), ty: op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty),
}; };
(bcx, operand) (bcx, operand)
} }
@ -408,8 +409,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let result = self.trans_scalar_checked_binop(&bcx, op, let result = self.trans_scalar_checked_binop(&bcx, op,
lhs.immediate(), rhs.immediate(), lhs.immediate(), rhs.immediate(),
lhs.ty); lhs.ty);
let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty); let val_ty = op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty);
let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool]); let operand_ty = bcx.ccx.tcx().intern_tup(&[val_ty, bcx.ccx.tcx().types.bool]);
let operand = OperandRef { let operand = OperandRef {
val: result, val: result,
ty: operand_ty ty: operand_ty
@ -443,16 +444,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let align = type_of::align_of(bcx.ccx, content_ty); let align = type_of::align_of(bcx.ccx, content_ty);
let llalign = C_uint(bcx.ccx, align); let llalign = C_uint(bcx.ccx, align);
let llty_ptr = llty.ptr_to(); let llty_ptr = llty.ptr_to();
let box_ty = bcx.tcx().mk_box(content_ty); let box_ty = bcx.ccx.tcx().mk_box(content_ty);
// Allocate space: // Allocate space:
let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) { let def_id = match bcx.ccx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
Ok(id) => id, Ok(id) => id,
Err(s) => { Err(s) => {
bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); bcx.ccx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
} }
}; };
let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[])) let r = Callee::def(bcx.ccx, def_id, bcx.ccx.tcx().intern_substs(&[]))
.reify(bcx.ccx); .reify(bcx.ccx);
let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr); let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
@ -477,7 +478,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
pub fn trans_scalar_binop(&mut self, pub fn trans_scalar_binop(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
op: mir::BinOp, op: mir::BinOp,
lhs: ValueRef, lhs: ValueRef,
rhs: ValueRef, rhs: ValueRef,
@ -552,7 +553,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
pub fn trans_fat_ptr_binop(&mut self, pub fn trans_fat_ptr_binop(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
op: mir::BinOp, op: mir::BinOp,
lhs_addr: ValueRef, lhs_addr: ValueRef,
lhs_extra: ValueRef, lhs_extra: ValueRef,
@ -599,7 +600,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
pub fn trans_scalar_checked_binop(&mut self, pub fn trans_scalar_checked_binop(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
op: mir::BinOp, op: mir::BinOp,
lhs: ValueRef, lhs: ValueRef,
rhs: ValueRef, rhs: ValueRef,
@ -617,7 +618,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// will only succeed if both operands are constant. // will only succeed if both operands are constant.
// This is necessary to determine when an overflow Assert // This is necessary to determine when an overflow Assert
// will always panic at runtime, and produce a warning. // will always panic at runtime, and produce a warning.
if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) { if let Some((val, of)) = const_scalar_checked_binop(bcx.ccx.tcx(), op, lhs, rhs, input_ty) {
return OperandValue::Pair(val, C_bool(bcx.ccx, of)); return OperandValue::Pair(val, C_bool(bcx.ccx, of));
} }
@ -681,12 +682,12 @@ enum OverflowOp {
Add, Sub, Mul Add, Sub, Mul
} }
fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef { fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
use syntax::ast::IntTy::*; use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*; use syntax::ast::UintTy::*;
use rustc::ty::{TyInt, TyUint}; use rustc::ty::{TyInt, TyUint};
let tcx = bcx.tcx(); let tcx = bcx.ccx.tcx();
let new_sty = match ty.sty { let new_sty = match ty.sty {
TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] { TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {

View File

@ -11,7 +11,8 @@
use rustc::mir; use rustc::mir;
use base; use base;
use common::{self, BlockAndBuilder}; use common;
use builder::Builder;
use super::MirContext; use super::MirContext;
use super::LocalRef; use super::LocalRef;
@ -20,9 +21,9 @@ use super::super::disr::Disr;
impl<'a, 'tcx> MirContext<'a, 'tcx> { impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_statement(&mut self, pub fn trans_statement(&mut self,
bcx: BlockAndBuilder<'a, 'tcx>, bcx: Builder<'a, 'tcx>,
statement: &mir::Statement<'tcx>) statement: &mir::Statement<'tcx>)
-> BlockAndBuilder<'a, 'tcx> { -> Builder<'a, 'tcx> {
debug!("trans_statement(statement={:?})", statement); debug!("trans_statement(statement={:?})", statement);
self.set_debug_loc(&bcx, statement.source_info); self.set_debug_loc(&bcx, statement.source_info);
@ -77,10 +78,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} }
fn trans_storage_liveness(&self, fn trans_storage_liveness(&self,
bcx: BlockAndBuilder<'a, 'tcx>, bcx: Builder<'a, 'tcx>,
lvalue: &mir::Lvalue<'tcx>, lvalue: &mir::Lvalue<'tcx>,
intrinsic: base::Lifetime) intrinsic: base::Lifetime)
-> BlockAndBuilder<'a, 'tcx> { -> Builder<'a, 'tcx> {
if let mir::Lvalue::Local(index) = *lvalue { if let mir::Lvalue::Local(index) = *lvalue {
if let LocalRef::Lvalue(tr_lval) = self.locals[index] { if let LocalRef::Lvalue(tr_lval) = self.locals[index] {
intrinsic.call(&bcx, tr_lval.llval); intrinsic.call(&bcx, tr_lval.llval);

View File

@ -9,28 +9,29 @@
// except according to those terms. // except according to those terms.
use llvm; use llvm;
use builder::Builder;
use llvm::ValueRef; use llvm::ValueRef;
use common::*; use common::*;
use rustc::ty::Ty; use rustc::ty::Ty;
pub fn slice_for_each<'a, 'tcx, F>( pub fn slice_for_each<'a, 'tcx, F>(
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
data_ptr: ValueRef, data_ptr: ValueRef,
unit_ty: Ty<'tcx>, unit_ty: Ty<'tcx>,
len: ValueRef, len: ValueRef,
f: F f: F
) -> BlockAndBuilder<'a, 'tcx> where F: FnOnce(&BlockAndBuilder<'a, 'tcx>, ValueRef) { ) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef) {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
let zst = type_is_zero_size(bcx.ccx, unit_ty); let zst = type_is_zero_size(bcx.ccx, unit_ty);
let add = |bcx: &BlockAndBuilder, a, b| if zst { let add = |bcx: &Builder, a, b| if zst {
bcx.add(a, b) bcx.add(a, b)
} else { } else {
bcx.inbounds_gep(a, &[b]) bcx.inbounds_gep(a, &[b])
}; };
let body_bcx = bcx.fcx().build_new_block("slice_loop_body"); let body_bcx = bcx.build_new_block("slice_loop_body");
let next_bcx = bcx.fcx().build_new_block("slice_loop_next"); let next_bcx = bcx.build_new_block("slice_loop_next");
let header_bcx = bcx.fcx().build_new_block("slice_loop_header"); let header_bcx = bcx.build_new_block("slice_loop_header");
let start = if zst { let start = if zst {
C_uint(bcx.ccx, 0usize) C_uint(bcx.ccx, 0usize)

View File

@ -24,8 +24,8 @@ pub struct Bytes {
// dependent alignment // dependent alignment
#[no_mangle] #[no_mangle]
pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
// CHECK: %arg1 = alloca [4 x i8]
// CHECK: [[TMP:%.+]] = alloca i32 // CHECK: [[TMP:%.+]] = alloca i32
// CHECK: %arg1 = alloca [4 x i8]
// CHECK: store i32 %1, i32* [[TMP]] // CHECK: store i32 %1, i32* [[TMP]]
// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %arg1 to i8* // CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %arg1 to i8*
// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*
@ -38,8 +38,8 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
// dependent alignment // dependent alignment
#[no_mangle] #[no_mangle]
pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) { pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) {
// CHECK: %arg1 = alloca %Bytes
// CHECK: [[TMP:%.+]] = alloca i32 // CHECK: [[TMP:%.+]] = alloca i32
// CHECK: %arg1 = alloca %Bytes
// CHECK: store i32 %1, i32* [[TMP]] // CHECK: store i32 %1, i32* [[TMP]]
// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %arg1 to i8* // CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %arg1 to i8*
// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*