mirror of
https://github.com/rust-lang/rust.git
synced 2025-02-20 10:55:14 +00:00
Replace build.rs with calling functions on builder directly
This commit is contained in:
parent
3f17ab9618
commit
59ef51c12a
@ -10,7 +10,6 @@
|
||||
|
||||
use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace};
|
||||
use base;
|
||||
use build::AllocaFcx;
|
||||
use common::{type_is_fat_ptr, BlockAndBuilder, C_uint};
|
||||
use context::CrateContext;
|
||||
use cabi_x86;
|
||||
@ -278,7 +277,7 @@ impl ArgType {
|
||||
// bitcasting to the struct type yields invalid cast errors.
|
||||
|
||||
// We instead thus allocate some scratch space...
|
||||
let llscratch = AllocaFcx(bcx.fcx(), ty, "abi_cast");
|
||||
let llscratch = bcx.fcx().alloca(ty, "abi_cast");
|
||||
base::Lifetime::Start.call(bcx, llscratch);
|
||||
|
||||
// ...where we first store the value...
|
||||
|
@ -48,7 +48,6 @@ use std;
|
||||
use llvm::{ValueRef, True, IntEQ, IntNE};
|
||||
use rustc::ty::layout;
|
||||
use rustc::ty::{self, Ty, AdtKind};
|
||||
use build::*;
|
||||
use common::*;
|
||||
use debuginfo::DebugLoc;
|
||||
use glue;
|
||||
@ -348,7 +347,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
|
||||
load_discr(bcx, discr, scrutinee, min, max, range_assert)
|
||||
}
|
||||
layout::General { discr, .. } => {
|
||||
let ptr = StructGEP(bcx, scrutinee, 0);
|
||||
let ptr = bcx.struct_gep(scrutinee, 0);
|
||||
load_discr(bcx, discr, ptr, 0, def.variants.len() as u64 - 1,
|
||||
range_assert)
|
||||
}
|
||||
@ -358,7 +357,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
|
||||
let llptrty = type_of::sizing_type_of(bcx.ccx(),
|
||||
monomorphize::field_ty(bcx.ccx().tcx(), substs,
|
||||
&def.variants[nndiscr as usize].fields[0]));
|
||||
ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None)
|
||||
bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty))
|
||||
}
|
||||
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
|
||||
struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee)
|
||||
@ -367,7 +366,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
|
||||
};
|
||||
match cast_to {
|
||||
None => val,
|
||||
Some(llty) => if is_discr_signed(&l) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) }
|
||||
Some(llty) => if is_discr_signed(&l) { bcx.sext(val, llty) } else { bcx.zext(val, llty) }
|
||||
}
|
||||
}
|
||||
|
||||
@ -377,11 +376,11 @@ fn struct_wrapped_nullable_bitdiscr(
|
||||
discrfield: &layout::FieldPath,
|
||||
scrutinee: ValueRef
|
||||
) -> ValueRef {
|
||||
let llptrptr = GEPi(bcx, scrutinee,
|
||||
let llptrptr = bcx.gepi(scrutinee,
|
||||
&discrfield.iter().map(|f| *f as usize).collect::<Vec<_>>()[..]);
|
||||
let llptr = Load(bcx, llptrptr);
|
||||
let llptr = bcx.load(llptrptr);
|
||||
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
|
||||
ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None)
|
||||
bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
|
||||
}
|
||||
|
||||
/// Helper for cases where the discriminant is simply loaded.
|
||||
@ -401,11 +400,11 @@ fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u
|
||||
// rejected by the LLVM verifier (it would mean either an
|
||||
// empty set, which is impossible, or the entire range of the
|
||||
// type, which is pointless).
|
||||
Load(bcx, ptr)
|
||||
bcx.load(ptr)
|
||||
} else {
|
||||
// llvm::ConstantRange can deal with ranges that wrap around,
|
||||
// so an overflow on (max + 1) is fine.
|
||||
LoadRangeAssert(bcx, ptr, min, max.wrapping_add(1), /* signed: */ True)
|
||||
bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True)
|
||||
}
|
||||
}
|
||||
|
||||
@ -440,12 +439,12 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
|
||||
match *l {
|
||||
layout::CEnum{ discr, min, max, .. } => {
|
||||
assert_discr_in_range(Disr(min), Disr(max), to);
|
||||
Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true),
|
||||
bcx.store(C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true),
|
||||
val);
|
||||
}
|
||||
layout::General{ discr, .. } => {
|
||||
Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true),
|
||||
StructGEP(bcx, val, 0));
|
||||
bcx.store(C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true),
|
||||
bcx.struct_gep(val, 0));
|
||||
}
|
||||
layout::Univariant { .. }
|
||||
| layout::UntaggedUnion { .. }
|
||||
@ -456,7 +455,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
|
||||
let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0];
|
||||
if to.0 != nndiscr {
|
||||
let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
|
||||
Store(bcx, C_null(llptrty), val);
|
||||
bcx.store(C_null(llptrty), val);
|
||||
}
|
||||
}
|
||||
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => {
|
||||
@ -472,9 +471,9 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
|
||||
base::call_memset(bcx, llptr, fill_byte, size, align, false);
|
||||
} else {
|
||||
let path = discrfield.iter().map(|&i| i as usize).collect::<Vec<_>>();
|
||||
let llptrptr = GEPi(bcx, val, &path[..]);
|
||||
let llptrptr = bcx.gepi(val, &path[..]);
|
||||
let llptrty = val_ty(llptrptr).element_type();
|
||||
Store(bcx, C_null(llptrty), llptrptr);
|
||||
bcx.store(C_null(llptrty), llptrptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
use llvm::{self, ValueRef};
|
||||
use base;
|
||||
use build::*;
|
||||
use common::*;
|
||||
use type_of;
|
||||
use type_::Type;
|
||||
@ -90,20 +89,21 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
|
||||
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
|
||||
let constraint_cstr = CString::new(all_constraints).unwrap();
|
||||
let r = InlineAsmCall(bcx,
|
||||
asm.as_ptr(),
|
||||
constraint_cstr.as_ptr(),
|
||||
&inputs,
|
||||
output_type,
|
||||
ia.volatile,
|
||||
ia.alignstack,
|
||||
dialect);
|
||||
let r = bcx.inline_asm_call(
|
||||
asm.as_ptr(),
|
||||
constraint_cstr.as_ptr(),
|
||||
&inputs,
|
||||
output_type,
|
||||
ia.volatile,
|
||||
ia.alignstack,
|
||||
dialect
|
||||
);
|
||||
|
||||
// Again, based on how many outputs we have
|
||||
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
|
||||
for (i, (_, &(val, _))) in outputs.enumerate() {
|
||||
let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) };
|
||||
Store(bcx, v, val);
|
||||
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) };
|
||||
bcx.store(v, val);
|
||||
}
|
||||
|
||||
// Store expn_id in a metadata node so we can map LLVM errors
|
||||
|
@ -51,7 +51,6 @@ use session::{self, DataTypeKind, Session};
|
||||
use abi::{self, Abi, FnType};
|
||||
use adt;
|
||||
use attributes;
|
||||
use build::*;
|
||||
use builder::{Builder, noname};
|
||||
use callee::{Callee};
|
||||
use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint};
|
||||
@ -174,11 +173,11 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn get_meta(bcx: &BlockAndBuilder, fat_ptr: ValueRef) -> ValueRef {
|
||||
StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
|
||||
bcx.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA)
|
||||
}
|
||||
|
||||
pub fn get_dataptr(bcx: &BlockAndBuilder, fat_ptr: ValueRef) -> ValueRef {
|
||||
StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
|
||||
bcx.struct_gep(fat_ptr, abi::FAT_PTR_ADDR)
|
||||
}
|
||||
|
||||
pub fn get_meta_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
|
||||
@ -207,15 +206,14 @@ pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
llty_ptr: Type,
|
||||
info_ty: Ty<'tcx>,
|
||||
size: ValueRef,
|
||||
align: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
align: ValueRef)
|
||||
-> ValueRef {
|
||||
let _icx = push_ctxt("malloc_raw_exchange");
|
||||
|
||||
// Allocate space:
|
||||
let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem);
|
||||
let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])).reify(bcx.ccx());
|
||||
PointerCast(bcx, Call(bcx, r, &[size, align], debug_loc), llty_ptr)
|
||||
bcx.pointercast(bcx.call(r, &[size, align], bcx.lpad().and_then(|b| b.bundle())), llty_ptr)
|
||||
}
|
||||
|
||||
|
||||
@ -258,13 +256,12 @@ pub fn compare_simd_types<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
rhs: ValueRef,
|
||||
t: Ty<'tcx>,
|
||||
ret_ty: Type,
|
||||
op: hir::BinOp_,
|
||||
debug_loc: DebugLoc)
|
||||
op: hir::BinOp_)
|
||||
-> ValueRef {
|
||||
let signed = match t.sty {
|
||||
ty::TyFloat(_) => {
|
||||
let cmp = bin_op_to_fcmp_predicate(op);
|
||||
return SExt(bcx, FCmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty);
|
||||
return bcx.sext(bcx.fcmp(cmp, lhs, rhs), ret_ty);
|
||||
},
|
||||
ty::TyUint(_) => false,
|
||||
ty::TyInt(_) => true,
|
||||
@ -276,7 +273,7 @@ pub fn compare_simd_types<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
// to get the correctly sized type. This will compile to a single instruction
|
||||
// once the IR is converted to assembly if the SIMD instruction is supported
|
||||
// by the target architecture.
|
||||
SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty)
|
||||
bcx.sext(bcx.icmp(cmp, lhs, rhs), ret_ty)
|
||||
}
|
||||
|
||||
/// Retrieve the information we are losing (making dynamic) in an unsizing
|
||||
@ -326,8 +323,7 @@ pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
|
||||
assert!(common::type_is_sized(bcx.tcx(), a));
|
||||
let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to();
|
||||
(PointerCast(bcx, src, ptr_ty),
|
||||
unsized_info(bcx.ccx(), a, b, None))
|
||||
(bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx(), a, b, None))
|
||||
}
|
||||
_ => bug!("unsize_thin_ptr: called on bad types"),
|
||||
}
|
||||
@ -352,7 +348,7 @@ pub fn coerce_unsized_into<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
// the types match up.
|
||||
let (base, info) = load_fat_ptr(bcx, src, src_ty);
|
||||
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), dst_ty);
|
||||
let base = PointerCast(bcx, base, llcast_ty);
|
||||
let base = bcx.pointercast(base, llcast_ty);
|
||||
(base, info)
|
||||
} else {
|
||||
let base = load_ty(bcx, src, src_ty);
|
||||
@ -414,8 +410,10 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cast_shift_expr_rhs(cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
|
||||
cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b))
|
||||
pub fn cast_shift_expr_rhs(
|
||||
cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef
|
||||
) -> ValueRef {
|
||||
cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b))
|
||||
}
|
||||
|
||||
pub fn cast_shift_const_rhs(op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
|
||||
@ -463,8 +461,7 @@ fn cast_shift_rhs<F, G>(op: hir::BinOp_,
|
||||
|
||||
pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
llfn: ValueRef,
|
||||
llargs: &[ValueRef],
|
||||
debug_loc: DebugLoc)
|
||||
llargs: &[ValueRef])
|
||||
-> (ValueRef, BlockAndBuilder<'blk, 'tcx>) {
|
||||
let _icx = push_ctxt("invoke_");
|
||||
if need_invoke(&bcx) {
|
||||
@ -475,12 +472,13 @@ pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
let normal_bcx = bcx.fcx().new_block("normal-return");
|
||||
let landing_pad = bcx.fcx().get_landing_pad();
|
||||
|
||||
let llresult = Invoke(&bcx,
|
||||
llfn,
|
||||
&llargs[..],
|
||||
normal_bcx.llbb,
|
||||
landing_pad,
|
||||
debug_loc);
|
||||
let llresult = bcx.invoke(
|
||||
llfn,
|
||||
&llargs[..],
|
||||
normal_bcx.llbb,
|
||||
landing_pad,
|
||||
bcx.lpad().and_then(|b| b.bundle())
|
||||
);
|
||||
return (llresult, normal_bcx.build());
|
||||
} else {
|
||||
debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb());
|
||||
@ -488,7 +486,7 @@ pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
debug!("arg: {:?}", Value(llarg));
|
||||
}
|
||||
|
||||
let llresult = Call(&bcx, llfn, &llargs[..], debug_loc);
|
||||
let llresult = bcx.call(llfn, &llargs[..], bcx.lpad().and_then(|b| b.bundle()));
|
||||
return (llresult, bcx);
|
||||
}
|
||||
}
|
||||
@ -518,7 +516,9 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
|
||||
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
|
||||
/// differs from the type used for SSA values. Also handles various special cases where the type
|
||||
/// gives us better information about what we are loading.
|
||||
pub fn load_ty<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
|
||||
pub fn load_ty<'blk, 'tcx>(
|
||||
cx: &BlockAndBuilder<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>
|
||||
) -> ValueRef {
|
||||
load_ty_builder(cx, ptr, t)
|
||||
}
|
||||
|
||||
@ -557,15 +557,17 @@ pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tc
|
||||
|
||||
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
|
||||
/// differs from the type used for SSA values.
|
||||
pub fn store_ty<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
|
||||
pub fn store_ty<'blk, 'tcx>(
|
||||
cx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>
|
||||
) {
|
||||
debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
|
||||
|
||||
if common::type_is_fat_ptr(cx.tcx(), t) {
|
||||
let lladdr = ExtractValue(cx, v, abi::FAT_PTR_ADDR);
|
||||
let llextra = ExtractValue(cx, v, abi::FAT_PTR_EXTRA);
|
||||
let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR);
|
||||
let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA);
|
||||
store_fat_ptr(cx, lladdr, llextra, dst, t);
|
||||
} else {
|
||||
Store(cx, from_immediate(cx, v), dst);
|
||||
cx.store(from_immediate(cx, v), dst);
|
||||
}
|
||||
}
|
||||
|
||||
@ -575,8 +577,8 @@ pub fn store_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
dst: ValueRef,
|
||||
_ty: Ty<'tcx>) {
|
||||
// FIXME: emit metadata
|
||||
Store(cx, data, get_dataptr(cx, dst));
|
||||
Store(cx, extra, get_meta(cx, dst));
|
||||
cx.store(data, get_dataptr(cx, dst));
|
||||
cx.store(extra, get_meta(cx, dst));
|
||||
}
|
||||
|
||||
pub fn load_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
@ -609,7 +611,7 @@ pub fn load_fat_ptr_builder<'a, 'tcx>(
|
||||
|
||||
pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef {
|
||||
if val_ty(val) == Type::i1(bcx.ccx()) {
|
||||
ZExt(bcx, val, Type::i8(bcx.ccx()))
|
||||
bcx.zext(val, Type::i8(bcx.ccx()))
|
||||
} else {
|
||||
val
|
||||
}
|
||||
@ -617,7 +619,7 @@ pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef {
|
||||
|
||||
pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef {
|
||||
if ty.is_bool() {
|
||||
Trunc(bcx, val, Type::i1(bcx.ccx()))
|
||||
bcx.trunc(val, Type::i1(bcx.ccx()))
|
||||
} else {
|
||||
val
|
||||
}
|
||||
@ -637,9 +639,9 @@ pub fn with_cond<'blk, 'tcx, F>(
|
||||
let fcx = bcx.fcx();
|
||||
let next_cx = fcx.new_block("next").build();
|
||||
let cond_cx = fcx.new_block("cond").build();
|
||||
CondBr(&bcx, val, cond_cx.llbb(), next_cx.llbb(), DebugLoc::None);
|
||||
bcx.cond_br(val, cond_cx.llbb(), next_cx.llbb());
|
||||
let after_cx = f(cond_cx);
|
||||
Br(&after_cx, next_cx.llbb(), DebugLoc::None);
|
||||
after_cx.br(next_cx.llbb());
|
||||
next_cx
|
||||
}
|
||||
|
||||
@ -702,8 +704,9 @@ pub fn trans_unwind_resume(bcx: &BlockAndBuilder, lpval: ValueRef) {
|
||||
if !bcx.sess().target.target.options.custom_unwind_resume {
|
||||
bcx.resume(lpval);
|
||||
} else {
|
||||
let exc_ptr = ExtractValue(bcx, lpval, 0);
|
||||
Call(bcx, bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], DebugLoc::None);
|
||||
let exc_ptr = bcx.extract_value(lpval, 0);
|
||||
bcx.call(bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr],
|
||||
bcx.lpad().and_then(|b| b.bundle()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -725,7 +728,9 @@ pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
|
||||
b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
|
||||
}
|
||||
|
||||
pub fn memcpy_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) {
|
||||
pub fn memcpy_ty<'blk, 'tcx>(
|
||||
bcx: &BlockAndBuilder<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>
|
||||
) {
|
||||
let _icx = push_ctxt("memcpy_ty");
|
||||
let ccx = bcx.ccx();
|
||||
|
||||
@ -792,7 +797,7 @@ pub fn alloc_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
pub fn alloca(cx: &BlockAndBuilder, ty: Type, name: &str) -> ValueRef {
|
||||
let _icx = push_ctxt("alloca");
|
||||
DebugLoc::None.apply(cx.fcx());
|
||||
Alloca(cx, ty, name)
|
||||
cx.fcx().alloca(ty, name)
|
||||
}
|
||||
|
||||
impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
@ -863,7 +868,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
// Use a dummy instruction as the insertion point for all allocas.
|
||||
// This is later removed in FunctionContext::cleanup.
|
||||
self.alloca_insert_pt.set(Some(unsafe {
|
||||
Load(&entry_bcx, C_null(Type::i8p(self.ccx)));
|
||||
entry_bcx.load(C_null(Type::i8p(self.ccx)));
|
||||
llvm::LLVMGetFirstInstruction(entry_bcx.llbb())
|
||||
}));
|
||||
|
||||
@ -881,7 +886,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
let slot = if self.fn_ty.ret.is_indirect() {
|
||||
get_param(self.llfn, 0)
|
||||
} else {
|
||||
AllocaFcx(self, llty, "sret_slot")
|
||||
self.alloca(llty, "sret_slot")
|
||||
};
|
||||
|
||||
self.llretslotptr.set(Some(slot));
|
||||
@ -892,21 +897,19 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
|
||||
/// Ties up the llstaticallocas -> llloadenv -> lltop edges,
|
||||
/// and builds the return block.
|
||||
pub fn finish(&'blk self, ret_cx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
ret_debug_loc: DebugLoc) {
|
||||
pub fn finish(&'blk self, ret_cx: &BlockAndBuilder<'blk, 'tcx>) {
|
||||
let _icx = push_ctxt("FunctionContext::finish");
|
||||
|
||||
self.build_return_block(ret_cx, ret_debug_loc);
|
||||
self.build_return_block(ret_cx);
|
||||
|
||||
DebugLoc::None.apply(self);
|
||||
self.cleanup();
|
||||
}
|
||||
|
||||
// Builds the return block for a function.
|
||||
pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
ret_debug_location: DebugLoc) {
|
||||
pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'blk, 'tcx>) {
|
||||
if self.llretslotptr.get().is_none() || self.fn_ty.ret.is_indirect() {
|
||||
return RetVoid(ret_cx, ret_debug_location);
|
||||
return ret_cx.ret_void();
|
||||
}
|
||||
|
||||
let retslot = self.llretslotptr.get().unwrap();
|
||||
@ -925,13 +928,13 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
}
|
||||
|
||||
if self.fn_ty.ret.is_indirect() {
|
||||
Store(ret_cx, retval, get_param(self.llfn, 0));
|
||||
RetVoid(ret_cx, ret_debug_location)
|
||||
ret_cx.store(retval, get_param(self.llfn, 0));
|
||||
ret_cx.ret_void()
|
||||
} else {
|
||||
if llty == Type::i1(self.ccx) {
|
||||
retval = Trunc(ret_cx, retval, llty);
|
||||
retval = ret_cx.trunc(retval, llty);
|
||||
}
|
||||
Ret(ret_cx, retval, ret_debug_location)
|
||||
ret_cx.ret(retval)
|
||||
}
|
||||
}
|
||||
(_, cast_ty) if self.fn_ty.ret.is_indirect() => {
|
||||
@ -941,24 +944,24 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty);
|
||||
call_memcpy(&ret_cx, get_param(self.llfn, 0),
|
||||
retslot, llsz, llalign as u32);
|
||||
RetVoid(ret_cx, ret_debug_location)
|
||||
ret_cx.ret_void()
|
||||
}
|
||||
(_, Some(cast_ty)) => {
|
||||
let load = Load(ret_cx, PointerCast(ret_cx, retslot, cast_ty.ptr_to()));
|
||||
let load = ret_cx.load(ret_cx.pointercast(retslot, cast_ty.ptr_to()));
|
||||
let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty);
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(load, llalign);
|
||||
}
|
||||
Ret(ret_cx, load, ret_debug_location)
|
||||
ret_cx.ret(load)
|
||||
}
|
||||
(_, None) => {
|
||||
let retval = if llty == Type::i1(self.ccx) {
|
||||
let val = LoadRangeAssert(ret_cx, retslot, 0, 2, llvm::False);
|
||||
Trunc(ret_cx, val, llty)
|
||||
let val = ret_cx.load_range_assert(retslot, 0, 2, llvm::False);
|
||||
ret_cx.trunc(val, llty)
|
||||
} else {
|
||||
Load(ret_cx, retslot)
|
||||
ret_cx.load(retslot)
|
||||
};
|
||||
Ret(ret_cx, retval, ret_debug_location)
|
||||
ret_cx.ret(retval)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1056,7 +1059,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
||||
adt::trans_set_discr(&bcx, sig.output(), dest, disr);
|
||||
}
|
||||
|
||||
fcx.finish(&bcx, DebugLoc::None);
|
||||
fcx.finish(&bcx);
|
||||
}
|
||||
|
||||
pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
|
||||
|
@ -1,734 +0,0 @@
|
||||
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![allow(dead_code)] // FFI wrappers
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use llvm;
|
||||
use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
|
||||
use llvm::{Opcode, IntPredicate, RealPredicate};
|
||||
use llvm::{ValueRef, BasicBlockRef};
|
||||
use common::*;
|
||||
use syntax_pos::Span;
|
||||
|
||||
use type_::Type;
|
||||
use value::Value;
|
||||
use debuginfo::DebugLoc;
|
||||
|
||||
use libc::{c_uint, c_char};
|
||||
|
||||
pub fn RetVoid(cx: &BlockAndBuilder, debug_loc: DebugLoc) {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.ret_void();
|
||||
}
|
||||
|
||||
pub fn Ret(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.ret(v);
|
||||
}
|
||||
|
||||
pub fn AggregateRet(cx: &BlockAndBuilder,
|
||||
ret_vals: &[ValueRef],
|
||||
debug_loc: DebugLoc) {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.aggregate_ret(ret_vals);
|
||||
}
|
||||
|
||||
pub fn Br(cx: &BlockAndBuilder, dest: BasicBlockRef, debug_loc: DebugLoc) {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.br(dest);
|
||||
}
|
||||
|
||||
pub fn CondBr(cx: &BlockAndBuilder,
|
||||
if_: ValueRef,
|
||||
then: BasicBlockRef,
|
||||
else_: BasicBlockRef,
|
||||
debug_loc: DebugLoc) {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.cond_br(if_, then, else_);
|
||||
}
|
||||
|
||||
pub fn Switch(cx: &BlockAndBuilder, v: ValueRef, else_: BasicBlockRef, num_cases: usize)
|
||||
-> ValueRef {
|
||||
cx.switch(v, else_, num_cases)
|
||||
}
|
||||
|
||||
pub fn AddCase(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) {
|
||||
unsafe {
|
||||
if llvm::LLVMIsUndef(s) == llvm::True { return; }
|
||||
llvm::LLVMAddCase(s, on_val, dest);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn IndirectBr(cx: &BlockAndBuilder,
|
||||
addr: ValueRef,
|
||||
num_dests: usize,
|
||||
debug_loc: DebugLoc) {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.indirect_br(addr, num_dests);
|
||||
}
|
||||
|
||||
pub fn Invoke(cx: &BlockAndBuilder,
|
||||
fn_: ValueRef,
|
||||
args: &[ValueRef],
|
||||
then: BasicBlockRef,
|
||||
catch: BasicBlockRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug!("Invoke({:?} with arguments ({}))",
|
||||
Value(fn_),
|
||||
args.iter().map(|a| {
|
||||
format!("{:?}", Value(*a))
|
||||
}).collect::<Vec<String>>().join(", "));
|
||||
debug_loc.apply(cx.fcx());
|
||||
let bundle = cx.lpad().and_then(|b| b.bundle());
|
||||
cx.invoke(fn_, args, then, catch, bundle)
|
||||
}
|
||||
|
||||
/* Arithmetic */
|
||||
pub fn Add(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.add(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn NSWAdd(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.nswadd(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn NUWAdd(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.nuwadd(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn FAdd(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.fadd(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn FAddFast(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.fadd_fast(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn Sub(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.sub(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn NSWSub(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.nswsub(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn NUWSub(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.nuwsub(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn FSub(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.fsub(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn FSubFast(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.fsub_fast(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn Mul(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.mul(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn NSWMul(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.nswmul(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn NUWMul(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.nuwmul(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn FMul(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.fmul(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn FMulFast(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.fmul_fast(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn UDiv(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.udiv(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn SDiv(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.sdiv(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn ExactSDiv(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.exactsdiv(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn FDiv(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.fdiv(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn FDivFast(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.fdiv_fast(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn URem(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.urem(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn SRem(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.srem(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn FRem(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.frem(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn FRemFast(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.frem_fast(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn Shl(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.shl(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn LShr(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.lshr(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn AShr(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.ashr(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn And(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.and(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn Or(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.or(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn Xor(cx: &BlockAndBuilder,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.xor(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn BinOp(cx: &BlockAndBuilder,
|
||||
op: Opcode,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.binop(op, lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn Neg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.neg(v)
|
||||
}
|
||||
|
||||
pub fn NSWNeg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.nswneg(v)
|
||||
}
|
||||
|
||||
pub fn NUWNeg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.nuwneg(v)
|
||||
}
|
||||
pub fn FNeg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.fneg(v)
|
||||
}
|
||||
|
||||
pub fn Not(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.not(v)
|
||||
}
|
||||
|
||||
pub fn Alloca(cx: &BlockAndBuilder, ty: Type, name: &str) -> ValueRef {
|
||||
AllocaFcx(cx.fcx(), ty, name)
|
||||
}
|
||||
|
||||
pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef {
|
||||
let b = fcx.ccx.builder();
|
||||
b.position_before(fcx.alloca_insert_pt.get().unwrap());
|
||||
DebugLoc::None.apply(fcx);
|
||||
b.alloca(ty, name)
|
||||
}
|
||||
|
||||
pub fn Free(cx: &BlockAndBuilder, pointer_val: ValueRef) {
|
||||
cx.free(pointer_val)
|
||||
}
|
||||
|
||||
pub fn Load(cx: &BlockAndBuilder, pointer_val: ValueRef) -> ValueRef {
|
||||
cx.load(pointer_val)
|
||||
}
|
||||
|
||||
pub fn VolatileLoad(cx: &BlockAndBuilder, pointer_val: ValueRef) -> ValueRef {
|
||||
cx.volatile_load(pointer_val)
|
||||
}
|
||||
|
||||
pub fn AtomicLoad(cx: &BlockAndBuilder, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef {
|
||||
cx.atomic_load(pointer_val, order)
|
||||
}
|
||||
|
||||
|
||||
pub fn LoadRangeAssert(cx: &BlockAndBuilder, pointer_val: ValueRef, lo: u64,
|
||||
hi: u64, signed: llvm::Bool) -> ValueRef {
|
||||
cx.load_range_assert(pointer_val, lo, hi, signed)
|
||||
}
|
||||
|
||||
pub fn LoadNonNull(cx: &BlockAndBuilder, ptr: ValueRef) -> ValueRef {
|
||||
cx.load_nonnull(ptr)
|
||||
}
|
||||
|
||||
pub fn Store(cx: &BlockAndBuilder, val: ValueRef, ptr: ValueRef) -> ValueRef {
|
||||
cx.store(val, ptr)
|
||||
}
|
||||
|
||||
pub fn VolatileStore(cx: &BlockAndBuilder, val: ValueRef, ptr: ValueRef) -> ValueRef {
|
||||
cx.volatile_store(val, ptr)
|
||||
}
|
||||
|
||||
pub fn AtomicStore(cx: &BlockAndBuilder, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
|
||||
cx.atomic_store(val, ptr, order)
|
||||
}
|
||||
|
||||
pub fn GEP(cx: &BlockAndBuilder, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
|
||||
cx.gep(pointer, indices)
|
||||
}
|
||||
|
||||
// Simple wrapper around GEP that takes an array of ints and wraps them
|
||||
// in C_i32()
|
||||
#[inline]
|
||||
pub fn GEPi(cx: &BlockAndBuilder, base: ValueRef, ixs: &[usize]) -> ValueRef {
|
||||
cx.gepi(base, ixs)
|
||||
}
|
||||
|
||||
pub fn InBoundsGEP(cx: &BlockAndBuilder, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
|
||||
cx.inbounds_gep(pointer, indices)
|
||||
}
|
||||
|
||||
pub fn StructGEP(cx: &BlockAndBuilder, pointer: ValueRef, idx: usize) -> ValueRef {
|
||||
cx.struct_gep(pointer, idx)
|
||||
}
|
||||
|
||||
pub fn GlobalString(cx: &BlockAndBuilder, _str: *const c_char) -> ValueRef {
|
||||
cx.global_string(_str)
|
||||
}
|
||||
|
||||
pub fn GlobalStringPtr(cx: &BlockAndBuilder, _str: *const c_char) -> ValueRef {
|
||||
cx.global_string_ptr(_str)
|
||||
}
|
||||
|
||||
/* Casts */
|
||||
pub fn Trunc(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.trunc(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn ZExt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.zext(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn SExt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.sext(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn FPToUI(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.fptoui(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn FPToSI(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.fptosi(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn UIToFP(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.uitofp(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn SIToFP(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.sitofp(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn FPTrunc(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.fptrunc(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn FPExt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.fpext(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn PtrToInt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.ptrtoint(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn IntToPtr(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.inttoptr(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn BitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.bitcast(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn ZExtOrBitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.zext_or_bitcast(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn SExtOrBitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.sext_or_bitcast(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn TruncOrBitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.trunc_or_bitcast(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn Cast(cx: &BlockAndBuilder, op: Opcode, val: ValueRef, dest_ty: Type,
|
||||
_: *const u8)
|
||||
-> ValueRef {
|
||||
cx.cast(op, val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn PointerCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.pointercast(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn IntCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.intcast(val, dest_ty)
|
||||
}
|
||||
|
||||
pub fn FPCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef {
|
||||
cx.fpcast(val, dest_ty)
|
||||
}
|
||||
|
||||
|
||||
/* Comparisons */
|
||||
pub fn ICmp(cx: &BlockAndBuilder,
|
||||
op: IntPredicate,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.icmp(op, lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn FCmp(cx: &BlockAndBuilder,
|
||||
op: RealPredicate,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
cx.fcmp(op, lhs, rhs)
|
||||
}
|
||||
|
||||
/* Miscellaneous instructions */
|
||||
pub fn EmptyPhi(cx: &BlockAndBuilder, ty: Type) -> ValueRef {
|
||||
cx.empty_phi(ty)
|
||||
}
|
||||
|
||||
pub fn Phi(cx: &BlockAndBuilder, ty: Type, vals: &[ValueRef], bbs: &[BasicBlockRef]) -> ValueRef {
|
||||
cx.phi(ty, vals, bbs)
|
||||
}
|
||||
|
||||
pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) {
|
||||
unsafe {
|
||||
if llvm::LLVMIsUndef(phi) == llvm::True { return; }
|
||||
llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_span_comment(cx: &BlockAndBuilder, sp: Span, text: &str) {
|
||||
cx.add_span_comment(sp, text)
|
||||
}
|
||||
|
||||
pub fn add_comment(cx: &BlockAndBuilder, text: &str) {
|
||||
cx.add_comment(text)
|
||||
}
|
||||
|
||||
pub fn InlineAsmCall(cx: &BlockAndBuilder, asm: *const c_char, cons: *const c_char,
|
||||
inputs: &[ValueRef], output: Type,
|
||||
volatile: bool, alignstack: bool,
|
||||
dia: AsmDialect) -> ValueRef {
|
||||
cx.inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia)
|
||||
}
|
||||
|
||||
pub fn Call(cx: &BlockAndBuilder,
|
||||
fn_: ValueRef,
|
||||
args: &[ValueRef],
|
||||
debug_loc: DebugLoc)
|
||||
-> ValueRef {
|
||||
debug_loc.apply(cx.fcx());
|
||||
let bundle = cx.lpad().and_then(|b| b.bundle());
|
||||
cx.call(fn_, args, bundle)
|
||||
}
|
||||
|
||||
pub fn AtomicFence(cx: &BlockAndBuilder, order: AtomicOrdering, scope: SynchronizationScope) {
|
||||
cx.atomic_fence(order, scope)
|
||||
}
|
||||
|
||||
pub fn Select(cx: &BlockAndBuilder, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef {
|
||||
cx.select(if_, then, else_)
|
||||
}
|
||||
|
||||
pub fn VAArg(cx: &BlockAndBuilder, list: ValueRef, ty: Type) -> ValueRef {
|
||||
cx.va_arg(list, ty)
|
||||
}
|
||||
|
||||
pub fn ExtractElement(cx: &BlockAndBuilder, vec_val: ValueRef, index: ValueRef) -> ValueRef {
|
||||
cx.extract_element(vec_val, index)
|
||||
}
|
||||
|
||||
pub fn InsertElement(cx: &BlockAndBuilder, vec_val: ValueRef, elt_val: ValueRef,
|
||||
index: ValueRef) -> ValueRef {
|
||||
cx.insert_element(vec_val, elt_val, index)
|
||||
}
|
||||
|
||||
pub fn ShuffleVector(cx: &BlockAndBuilder, v1: ValueRef, v2: ValueRef,
|
||||
mask: ValueRef) -> ValueRef {
|
||||
cx.shuffle_vector(v1, v2, mask)
|
||||
}
|
||||
|
||||
pub fn VectorSplat(cx: &BlockAndBuilder, num_elts: usize, elt_val: ValueRef) -> ValueRef {
|
||||
cx.vector_splat(num_elts, elt_val)
|
||||
}
|
||||
|
||||
pub fn ExtractValue(cx: &BlockAndBuilder, agg_val: ValueRef, index: usize) -> ValueRef {
|
||||
cx.extract_value(agg_val, index)
|
||||
}
|
||||
|
||||
pub fn InsertValue(cx: &BlockAndBuilder, agg_val: ValueRef, elt_val: ValueRef, index: usize) -> ValueRef {
|
||||
cx.insert_value(agg_val, elt_val, index)
|
||||
}
|
||||
|
||||
pub fn IsNull(cx: &BlockAndBuilder, val: ValueRef) -> ValueRef {
|
||||
cx.is_null(val)
|
||||
}
|
||||
|
||||
pub fn IsNotNull(cx: &BlockAndBuilder, val: ValueRef) -> ValueRef {
|
||||
cx.is_not_null(val)
|
||||
}
|
||||
|
||||
pub fn PtrDiff(cx: &BlockAndBuilder, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
|
||||
cx.ptrdiff(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn Trap(cx: &BlockAndBuilder) {
|
||||
cx.trap();
|
||||
}
|
||||
|
||||
pub fn LandingPad(cx: &BlockAndBuilder, ty: Type, pers_fn: ValueRef,
|
||||
num_clauses: usize) -> ValueRef {
|
||||
cx.landing_pad(ty, pers_fn, num_clauses, cx.fcx().llfn)
|
||||
}
|
||||
|
||||
pub fn AddClause(cx: &BlockAndBuilder, landing_pad: ValueRef, clause: ValueRef) {
|
||||
cx.add_clause(landing_pad, clause)
|
||||
}
|
||||
|
||||
pub fn SetCleanup(cx: &BlockAndBuilder, landing_pad: ValueRef) {
|
||||
cx.set_cleanup(landing_pad)
|
||||
}
|
||||
|
||||
pub fn SetPersonalityFn(cx: &BlockAndBuilder, f: ValueRef) {
|
||||
cx.set_personality_fn(f)
|
||||
}
|
||||
|
||||
// Atomic Operations
|
||||
pub fn AtomicCmpXchg(cx: &BlockAndBuilder, dst: ValueRef,
|
||||
cmp: ValueRef, src: ValueRef,
|
||||
order: AtomicOrdering,
|
||||
failure_order: AtomicOrdering,
|
||||
weak: llvm::Bool) -> ValueRef {
|
||||
cx.atomic_cmpxchg(dst, cmp, src, order, failure_order, weak)
|
||||
}
|
||||
pub fn AtomicRMW(cx: &BlockAndBuilder, op: AtomicRmwBinOp,
|
||||
dst: ValueRef, src: ValueRef,
|
||||
order: AtomicOrdering) -> ValueRef {
|
||||
cx.atomic_rmw(op, dst, src, order)
|
||||
}
|
||||
|
||||
pub fn CleanupPad(cx: &BlockAndBuilder,
|
||||
parent: Option<ValueRef>,
|
||||
args: &[ValueRef]) -> ValueRef {
|
||||
cx.cleanup_pad(parent, args)
|
||||
}
|
||||
|
||||
pub fn CleanupRet(cx: &BlockAndBuilder,
|
||||
cleanup: ValueRef,
|
||||
unwind: Option<BasicBlockRef>) -> ValueRef {
|
||||
cx.cleanup_ret(cleanup, unwind)
|
||||
}
|
||||
|
||||
pub fn CatchPad(cx: &BlockAndBuilder,
|
||||
parent: ValueRef,
|
||||
args: &[ValueRef]) -> ValueRef {
|
||||
cx.catch_pad(parent, args)
|
||||
}
|
||||
|
||||
pub fn CatchRet(cx: &BlockAndBuilder, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef {
|
||||
cx.catch_ret(pad, unwind)
|
||||
}
|
||||
|
||||
pub fn CatchSwitch(cx: &BlockAndBuilder,
|
||||
parent: Option<ValueRef>,
|
||||
unwind: Option<BasicBlockRef>,
|
||||
num_handlers: usize) -> ValueRef {
|
||||
cx.catch_switch(parent, unwind, num_handlers)
|
||||
}
|
||||
|
||||
pub fn AddHandler(cx: &BlockAndBuilder, catch_switch: ValueRef, handler: BasicBlockRef) {
|
||||
cx.add_handler(catch_switch, handler)
|
||||
}
|
@ -1103,6 +1103,20 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_case(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) {
|
||||
unsafe {
|
||||
if llvm::LLVMIsUndef(s) == llvm::True { return; }
|
||||
llvm::LLVMAddCase(s, on_val, dest)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_incoming_to_phi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) {
|
||||
unsafe {
|
||||
if llvm::LLVMIsUndef(phi) == llvm::True { return; }
|
||||
llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the ptr value that should be used for storing `val`.
|
||||
fn check_store<'b>(&self,
|
||||
val: ValueRef,
|
||||
|
@ -25,12 +25,10 @@ use abi::{Abi, FnType};
|
||||
use attributes;
|
||||
use base;
|
||||
use base::*;
|
||||
use build::*;
|
||||
use common::{
|
||||
self, Block, BlockAndBuilder, CrateContext, FunctionContext, SharedCrateContext
|
||||
};
|
||||
use consts;
|
||||
use debuginfo::DebugLoc;
|
||||
use declare;
|
||||
use value::Value;
|
||||
use meth;
|
||||
@ -210,11 +208,10 @@ impl<'tcx> Callee<'tcx> {
|
||||
/// into memory somewhere. Nonetheless we return the actual return value of the
|
||||
/// function.
|
||||
pub fn call<'a, 'blk>(self, bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
debug_loc: DebugLoc,
|
||||
args: &[ValueRef],
|
||||
dest: Option<ValueRef>)
|
||||
-> (BlockAndBuilder<'blk, 'tcx>, ValueRef) {
|
||||
trans_call_inner(bcx, debug_loc, self, args, dest)
|
||||
trans_call_inner(bcx, self, args, dest)
|
||||
}
|
||||
|
||||
/// Turn the callee into a function pointer.
|
||||
@ -414,11 +411,11 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
|
||||
let self_scope = fcx.push_custom_cleanup_scope();
|
||||
fcx.schedule_drop_mem(self_scope, llenv, closure_ty);
|
||||
|
||||
let bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).0;
|
||||
let bcx = callee.call(bcx, &llargs[self_idx..], dest).0;
|
||||
|
||||
let bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope);
|
||||
|
||||
fcx.finish(&bcx, DebugLoc::None);
|
||||
fcx.finish(&bcx);
|
||||
|
||||
ccx.instances().borrow_mut().insert(method_instance, lloncefn);
|
||||
|
||||
@ -531,7 +528,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
|
||||
let llfnpointer = llfnpointer.unwrap_or_else(|| {
|
||||
// the first argument (`self`) will be ptr to the fn pointer
|
||||
if is_by_ref {
|
||||
Load(&bcx, llargs[self_idx])
|
||||
bcx.load(llargs[self_idx])
|
||||
} else {
|
||||
llargs[self_idx]
|
||||
}
|
||||
@ -543,8 +540,8 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
|
||||
data: Fn(llfnpointer),
|
||||
ty: bare_fn_ty
|
||||
};
|
||||
let bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).0;
|
||||
fcx.finish(&bcx, DebugLoc::None);
|
||||
let bcx = callee.call(bcx, &llargs[(self_idx + 1)..], dest).0;
|
||||
fcx.finish(&bcx);
|
||||
|
||||
ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn);
|
||||
|
||||
@ -654,7 +651,6 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
||||
// Translating calls
|
||||
|
||||
fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
debug_loc: DebugLoc,
|
||||
callee: Callee<'tcx>,
|
||||
args: &[ValueRef],
|
||||
opt_llretslot: Option<ValueRef>)
|
||||
@ -689,7 +685,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
if fn_ty.ret.is_indirect() {
|
||||
let mut llretslot = opt_llretslot.unwrap();
|
||||
if let Some(ty) = fn_ty.ret.cast {
|
||||
llretslot = PointerCast(&bcx, llretslot, ty.ptr_to());
|
||||
llretslot = bcx.pointercast(llretslot, ty.ptr_to());
|
||||
}
|
||||
llargs.push(llretslot);
|
||||
}
|
||||
@ -700,7 +696,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
|
||||
let fn_ptr = meth::get_virtual_method(&bcx, args[1], idx);
|
||||
let llty = fn_ty.llvm_type(&bcx.ccx()).ptr_to();
|
||||
callee = Fn(PointerCast(&bcx, fn_ptr, llty));
|
||||
callee = Fn(bcx.pointercast(fn_ptr, llty));
|
||||
llargs.extend_from_slice(&args[2..]);
|
||||
}
|
||||
_ => llargs.extend_from_slice(args)
|
||||
@ -711,7 +707,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
_ => bug!("expected fn pointer callee, found {:?}", callee)
|
||||
};
|
||||
|
||||
let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
|
||||
let (llret, bcx) = base::invoke(bcx, llfn, &llargs);
|
||||
fn_ty.apply_attrs_callsite(llret);
|
||||
|
||||
// If the function we just called does not use an outpointer,
|
||||
|
@ -118,7 +118,6 @@ pub use self::EarlyExitLabel::*;
|
||||
|
||||
use llvm::{BasicBlockRef, ValueRef};
|
||||
use base;
|
||||
use build;
|
||||
use common;
|
||||
use common::{BlockAndBuilder, FunctionContext, LandingPad};
|
||||
use debuginfo::{DebugLoc};
|
||||
@ -344,7 +343,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
|
||||
let mut bcx = bcx;
|
||||
for cleanup in scope.cleanups.iter().rev() {
|
||||
bcx = cleanup.trans(bcx, scope.debug_loc);
|
||||
bcx = cleanup.trans(bcx);
|
||||
}
|
||||
bcx
|
||||
}
|
||||
@ -422,13 +421,13 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
UnwindKind::LandingPad => {
|
||||
let addr = self.landingpad_alloca.get()
|
||||
.unwrap();
|
||||
let lp = build::Load(&bcx, addr);
|
||||
let lp = bcx.load(addr);
|
||||
base::call_lifetime_end(&bcx, addr);
|
||||
base::trans_unwind_resume(&bcx, lp);
|
||||
}
|
||||
UnwindKind::CleanupPad(_) => {
|
||||
let pad = build::CleanupPad(&bcx, None, &[]);
|
||||
build::CleanupRet(&bcx, pad, None);
|
||||
let pad = bcx.cleanup_pad(None, &[]);
|
||||
bcx.cleanup_ret(pad, None);
|
||||
}
|
||||
}
|
||||
prev_llbb = bcx.llbb();
|
||||
@ -488,7 +487,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
let mut bcx_out = bcx_in;
|
||||
let len = scope.cleanups.len();
|
||||
for cleanup in scope.cleanups.iter().rev().take(len - skip) {
|
||||
bcx_out = cleanup.trans(bcx_out, scope.debug_loc);
|
||||
bcx_out = cleanup.trans(bcx_out);
|
||||
}
|
||||
skip = 0;
|
||||
exit_label.branch(&bcx_out, prev_llbb);
|
||||
@ -540,8 +539,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
// creation of the landingpad instruction). We then create a
|
||||
// cleanuppad instruction which has no filters to run cleanup on all
|
||||
// exceptions.
|
||||
build::SetPersonalityFn(&pad_bcx, llpersonality);
|
||||
let llretval = build::CleanupPad(&pad_bcx, None, &[]);
|
||||
pad_bcx.set_personality_fn(llpersonality);
|
||||
let llretval = pad_bcx.cleanup_pad(None, &[]);
|
||||
UnwindKind::CleanupPad(llretval)
|
||||
} else {
|
||||
// The landing pad return type (the type being propagated). Not sure
|
||||
@ -552,10 +551,10 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
false);
|
||||
|
||||
// The only landing pad clause will be 'cleanup'
|
||||
let llretval = build::LandingPad(&pad_bcx, llretty, llpersonality, 1);
|
||||
let llretval = pad_bcx.landing_pad(llretty, llpersonality, 1, pad_bcx.fcx().llfn);
|
||||
|
||||
// The landing pad block is a cleanup
|
||||
build::SetCleanup(&pad_bcx, llretval);
|
||||
pad_bcx.set_cleanup(llretval);
|
||||
|
||||
let addr = match self.landingpad_alloca.get() {
|
||||
Some(addr) => addr,
|
||||
@ -567,7 +566,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
||||
addr
|
||||
}
|
||||
};
|
||||
build::Store(&pad_bcx, llretval, addr);
|
||||
pad_bcx.store(llretval, addr);
|
||||
UnwindKind::LandingPad
|
||||
};
|
||||
|
||||
@ -629,9 +628,9 @@ impl EarlyExitLabel {
|
||||
/// the `cleanupret` instruction instead of the `br` instruction.
|
||||
fn branch(&self, from_bcx: &BlockAndBuilder, to_llbb: BasicBlockRef) {
|
||||
if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self {
|
||||
build::CleanupRet(from_bcx, pad, Some(to_llbb));
|
||||
from_bcx.cleanup_ret(pad, Some(to_llbb));
|
||||
} else {
|
||||
build::Br(from_bcx, to_llbb, DebugLoc::None);
|
||||
from_bcx.br(to_llbb);
|
||||
}
|
||||
}
|
||||
|
||||
@ -649,7 +648,7 @@ impl EarlyExitLabel {
|
||||
fn start(&self, bcx: &BlockAndBuilder) -> EarlyExitLabel {
|
||||
match *self {
|
||||
UnwindExit(UnwindKind::CleanupPad(..)) => {
|
||||
let pad = build::CleanupPad(bcx, None, &[]);
|
||||
let pad = bcx.cleanup_pad(None, &[]);
|
||||
bcx.set_lpad_ref(Some(bcx.fcx().lpad_arena.alloc(LandingPad::msvc(pad))));
|
||||
UnwindExit(UnwindKind::CleanupPad(pad))
|
||||
}
|
||||
@ -683,10 +682,7 @@ pub struct DropValue<'tcx> {
|
||||
}
|
||||
|
||||
impl<'tcx> DropValue<'tcx> {
|
||||
fn trans<'blk>(&self,
|
||||
bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
debug_loc: DebugLoc)
|
||||
-> BlockAndBuilder<'blk, 'tcx> {
|
||||
fn trans<'blk>(&self, bcx: BlockAndBuilder<'blk, 'tcx>) -> BlockAndBuilder<'blk, 'tcx> {
|
||||
let skip_dtor = self.skip_dtor;
|
||||
let _icx = if skip_dtor {
|
||||
base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=true")
|
||||
@ -694,9 +690,9 @@ impl<'tcx> DropValue<'tcx> {
|
||||
base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=false")
|
||||
};
|
||||
if self.is_immediate {
|
||||
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
|
||||
glue::drop_ty_immediate(bcx, self.val, self.ty, self.skip_dtor)
|
||||
} else {
|
||||
glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
|
||||
glue::drop_ty_core(bcx, self.val, self.ty, self.skip_dtor)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,12 +26,11 @@ use middle::lang_items::LangItem;
|
||||
use rustc::ty::subst::Substs;
|
||||
use abi::{Abi, FnType};
|
||||
use base;
|
||||
use build;
|
||||
use builder::Builder;
|
||||
use callee::Callee;
|
||||
use cleanup;
|
||||
use consts;
|
||||
use debuginfo::{self, DebugLoc};
|
||||
use debuginfo;
|
||||
use declare;
|
||||
use machine;
|
||||
use monomorphize;
|
||||
@ -434,6 +433,12 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
|
||||
unwresume.set(Some(llfn));
|
||||
Callee::ptr(llfn, ty)
|
||||
}
|
||||
|
||||
pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
|
||||
let b = self.ccx.builder();
|
||||
b.position_before(self.alloca_insert_pt.get().unwrap());
|
||||
b.alloca(ty, name)
|
||||
}
|
||||
}
|
||||
|
||||
// Basic block context. We create a block context for each basic block
|
||||
@ -998,35 +1003,32 @@ pub fn langcall(tcx: TyCtxt,
|
||||
|
||||
pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
binop_debug_loc: DebugLoc) -> ValueRef {
|
||||
rhs: ValueRef) -> ValueRef {
|
||||
let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs);
|
||||
// #1877, #10183: Ensure that input is always valid
|
||||
let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
|
||||
build::Shl(bcx, lhs, rhs, binop_debug_loc)
|
||||
let rhs = shift_mask_rhs(bcx, rhs);
|
||||
bcx.shl(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
lhs_t: Ty<'tcx>,
|
||||
lhs: ValueRef,
|
||||
rhs: ValueRef,
|
||||
binop_debug_loc: DebugLoc) -> ValueRef {
|
||||
rhs: ValueRef) -> ValueRef {
|
||||
let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs);
|
||||
// #1877, #10183: Ensure that input is always valid
|
||||
let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
|
||||
let rhs = shift_mask_rhs(bcx, rhs);
|
||||
let is_signed = lhs_t.is_signed();
|
||||
if is_signed {
|
||||
build::AShr(bcx, lhs, rhs, binop_debug_loc)
|
||||
bcx.ashr(lhs, rhs)
|
||||
} else {
|
||||
build::LShr(bcx, lhs, rhs, binop_debug_loc)
|
||||
bcx.lshr(lhs, rhs)
|
||||
}
|
||||
}
|
||||
|
||||
fn shift_mask_rhs<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
rhs: ValueRef,
|
||||
debug_loc: DebugLoc) -> ValueRef {
|
||||
rhs: ValueRef) -> ValueRef {
|
||||
let rhs_llty = val_ty(rhs);
|
||||
build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc)
|
||||
bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false))
|
||||
}
|
||||
|
||||
pub fn shift_mask_val<'blk, 'tcx>(
|
||||
@ -1048,7 +1050,7 @@ pub fn shift_mask_val<'blk, 'tcx>(
|
||||
},
|
||||
TypeKind::Vector => {
|
||||
let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert);
|
||||
build::VectorSplat(bcx, mask_llty.vector_length(), mask)
|
||||
bcx.vector_splat(mask_llty.vector_length(), mask)
|
||||
},
|
||||
_ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
|
||||
}
|
||||
|
@ -22,10 +22,9 @@ use rustc::traits;
|
||||
use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable};
|
||||
use adt;
|
||||
use base::*;
|
||||
use build::*;
|
||||
use callee::{Callee};
|
||||
use builder::Builder;
|
||||
use common::*;
|
||||
use debuginfo::DebugLoc;
|
||||
use machine::*;
|
||||
use monomorphize;
|
||||
use trans_item::TransItem;
|
||||
@ -41,35 +40,28 @@ use syntax_pos::DUMMY_SP;
|
||||
pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
v: ValueRef,
|
||||
size: ValueRef,
|
||||
align: ValueRef,
|
||||
debug_loc: DebugLoc)
|
||||
align: ValueRef)
|
||||
-> BlockAndBuilder<'blk, 'tcx> {
|
||||
let _icx = push_ctxt("trans_exchange_free");
|
||||
|
||||
let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem);
|
||||
let args = [PointerCast(&bcx, v, Type::i8p(bcx.ccx())), size, align];
|
||||
let args = [bcx.pointercast(v, Type::i8p(bcx.ccx())), size, align];
|
||||
Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[]))
|
||||
.call(bcx, debug_loc, &args, None).0
|
||||
.call(bcx, &args, None).0
|
||||
}
|
||||
|
||||
pub fn trans_exchange_free<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
|
||||
v: ValueRef,
|
||||
size: u64,
|
||||
align: u32,
|
||||
debug_loc: DebugLoc)
|
||||
align: u32)
|
||||
-> BlockAndBuilder<'blk, 'tcx> {
|
||||
let ccx = cx.ccx();
|
||||
trans_exchange_free_dyn(cx,
|
||||
v,
|
||||
C_uint(ccx, size),
|
||||
C_uint(ccx, align),
|
||||
debug_loc)
|
||||
trans_exchange_free_dyn(cx, v, C_uint(ccx, size), C_uint(ccx, align))
|
||||
}
|
||||
|
||||
pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
ptr: ValueRef,
|
||||
content_ty: Ty<'tcx>,
|
||||
debug_loc: DebugLoc)
|
||||
content_ty: Ty<'tcx>)
|
||||
-> BlockAndBuilder<'blk, 'tcx> {
|
||||
assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
|
||||
let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
|
||||
@ -78,7 +70,7 @@ pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
// `Box<ZeroSizeType>` does not allocate.
|
||||
if content_size != 0 {
|
||||
let content_align = align_of(bcx.ccx(), content_ty);
|
||||
trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc)
|
||||
trans_exchange_free(bcx, ptr, content_size, content_align)
|
||||
} else {
|
||||
bcx
|
||||
}
|
||||
@ -132,15 +124,13 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
|
||||
pub fn drop_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
v: ValueRef,
|
||||
t: Ty<'tcx>,
|
||||
debug_loc: DebugLoc) -> BlockAndBuilder<'blk, 'tcx> {
|
||||
drop_ty_core(bcx, v, t, debug_loc, false)
|
||||
t: Ty<'tcx>) -> BlockAndBuilder<'blk, 'tcx> {
|
||||
drop_ty_core(bcx, v, t, false)
|
||||
}
|
||||
|
||||
pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
v: ValueRef,
|
||||
t: Ty<'tcx>,
|
||||
debug_loc: DebugLoc,
|
||||
skip_dtor: bool)
|
||||
-> BlockAndBuilder<'blk, 'tcx> {
|
||||
// NB: v is an *alias* of type t here, not a direct value.
|
||||
@ -156,13 +146,13 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
let glue = get_drop_glue_core(ccx, g);
|
||||
let glue_type = get_drop_glue_type(ccx.tcx(), t);
|
||||
let ptr = if glue_type != t {
|
||||
PointerCast(&bcx, v, type_of(ccx, glue_type).ptr_to())
|
||||
bcx.pointercast(v, type_of(ccx, glue_type).ptr_to())
|
||||
} else {
|
||||
v
|
||||
};
|
||||
|
||||
// No drop-hint ==> call standard drop glue
|
||||
Call(&bcx, glue, &[ptr], debug_loc);
|
||||
bcx.call(glue, &[ptr], bcx.lpad().and_then(|b| b.bundle()));
|
||||
}
|
||||
bcx
|
||||
}
|
||||
@ -170,14 +160,13 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
pub fn drop_ty_immediate<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
v: ValueRef,
|
||||
t: Ty<'tcx>,
|
||||
debug_loc: DebugLoc,
|
||||
skip_dtor: bool)
|
||||
-> BlockAndBuilder<'blk, 'tcx> {
|
||||
let _icx = push_ctxt("drop_ty_immediate");
|
||||
let vp = alloc_ty(&bcx, t, "");
|
||||
call_lifetime_start(&bcx, vp);
|
||||
store_ty(&bcx, v, vp, t);
|
||||
let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor);
|
||||
let bcx = drop_ty_core(bcx, vp, t, skip_dtor);
|
||||
call_lifetime_end(&bcx, vp);
|
||||
bcx
|
||||
}
|
||||
@ -249,7 +238,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
||||
// type, so we don't need to explicitly cast the function parameter.
|
||||
|
||||
let bcx = make_drop_glue(bcx, get_param(llfn, 0), g);
|
||||
fcx.finish(&bcx, DebugLoc::None);
|
||||
fcx.finish(&bcx);
|
||||
}
|
||||
|
||||
fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
@ -285,8 +274,8 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
} else {
|
||||
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
|
||||
unsized_args = [
|
||||
Load(&bcx, get_dataptr(&bcx, v0)),
|
||||
Load(&bcx, get_meta(&bcx, v0))
|
||||
bcx.load(get_dataptr(&bcx, v0)),
|
||||
bcx.load(get_meta(&bcx, v0))
|
||||
];
|
||||
&unsized_args
|
||||
};
|
||||
@ -301,7 +290,7 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
};
|
||||
let dtor_did = def.destructor().unwrap();
|
||||
bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs)
|
||||
.call(bcx, DebugLoc::None, args, None).0;
|
||||
.call(bcx, args, None).0;
|
||||
|
||||
bcx.fcx().pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
|
||||
}
|
||||
@ -436,29 +425,27 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
assert!(!skip_dtor);
|
||||
if !type_is_sized(bcx.tcx(), content_ty) {
|
||||
let llval = get_dataptr(&bcx, v0);
|
||||
let llbox = Load(&bcx, llval);
|
||||
let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
|
||||
let llbox = bcx.load(llval);
|
||||
let bcx = drop_ty(bcx, v0, content_ty);
|
||||
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
|
||||
let info = get_meta(&bcx, v0);
|
||||
let info = Load(&bcx, info);
|
||||
let info = bcx.load(info);
|
||||
let (llsize, llalign) = size_and_align_of_dst(&bcx, content_ty, info);
|
||||
|
||||
// `Box<ZeroSizeType>` does not allocate.
|
||||
let needs_free = ICmp(
|
||||
&bcx,
|
||||
let needs_free = bcx.icmp(
|
||||
llvm::IntNE,
|
||||
llsize,
|
||||
C_uint(bcx.ccx(), 0u64),
|
||||
DebugLoc::None
|
||||
);
|
||||
with_cond(bcx, needs_free, |bcx| {
|
||||
trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
|
||||
trans_exchange_free_dyn(bcx, llbox, llsize, llalign)
|
||||
})
|
||||
} else {
|
||||
let llval = v0;
|
||||
let llbox = Load(&bcx, llval);
|
||||
let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
|
||||
trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
|
||||
let llbox = bcx.load(llval);
|
||||
let bcx = drop_ty(bcx, llbox, content_ty);
|
||||
trans_exchange_free_ty(bcx, llbox, content_ty)
|
||||
}
|
||||
}
|
||||
ty::TyDynamic(..) => {
|
||||
@ -468,12 +455,11 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
|
||||
assert!(!skip_dtor);
|
||||
let data_ptr = get_dataptr(&bcx, v0);
|
||||
let vtable_ptr = Load(&bcx, get_meta(&bcx, v0));
|
||||
let dtor = Load(&bcx, vtable_ptr);
|
||||
Call(&bcx,
|
||||
dtor,
|
||||
&[PointerCast(&bcx, Load(&bcx, data_ptr), Type::i8p(bcx.ccx()))],
|
||||
DebugLoc::None);
|
||||
let vtable_ptr = bcx.load(get_meta(&bcx, v0));
|
||||
let dtor = bcx.load(vtable_ptr);
|
||||
bcx.call(dtor,
|
||||
&[bcx.pointercast(bcx.load(data_ptr), Type::i8p(bcx.ccx()))],
|
||||
bcx.lpad().and_then(|b| b.bundle()));
|
||||
bcx
|
||||
}
|
||||
ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => {
|
||||
@ -512,7 +498,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
|
||||
for (i, field) in variant.fields.iter().enumerate() {
|
||||
let arg = monomorphize::field_ty(tcx, substs, field);
|
||||
let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i);
|
||||
cx = drop_ty(cx, field_ptr, arg, DebugLoc::None);
|
||||
cx = drop_ty(cx, field_ptr, arg);
|
||||
}
|
||||
return cx;
|
||||
}
|
||||
@ -521,8 +507,8 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
|
||||
adt::MaybeSizedValue::sized(av)
|
||||
} else {
|
||||
// FIXME(#36457) -- we should pass unsized values as two arguments
|
||||
let data = Load(&cx, get_dataptr(&cx, av));
|
||||
let info = Load(&cx, get_meta(&cx, av));
|
||||
let data = cx.load(get_dataptr(&cx, av));
|
||||
let info = cx.load(get_meta(&cx, av));
|
||||
adt::MaybeSizedValue::unsized_(data, info)
|
||||
};
|
||||
|
||||
@ -531,7 +517,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
|
||||
ty::TyClosure(def_id, substs) => {
|
||||
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
|
||||
let llupvar = adt::trans_field_ptr(&cx, t, value, Disr(0), i);
|
||||
cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None);
|
||||
cx = drop_ty(cx, llupvar, upvar_ty);
|
||||
}
|
||||
}
|
||||
ty::TyArray(_, n) => {
|
||||
@ -539,17 +525,17 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
|
||||
let len = C_uint(cx.ccx(), n);
|
||||
let unit_ty = t.sequence_element_type(cx.tcx());
|
||||
cx = tvec::slice_for_each(cx, base, unit_ty, len,
|
||||
|bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None));
|
||||
|bb, vv| drop_ty(bb, vv, unit_ty));
|
||||
}
|
||||
ty::TySlice(_) | ty::TyStr => {
|
||||
let unit_ty = t.sequence_element_type(cx.tcx());
|
||||
cx = tvec::slice_for_each(cx, value.value, unit_ty, value.meta,
|
||||
|bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None));
|
||||
|bb, vv| drop_ty(bb, vv, unit_ty));
|
||||
}
|
||||
ty::TyTuple(ref args) => {
|
||||
for (i, arg) in args.iter().enumerate() {
|
||||
let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr(0), i);
|
||||
cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None);
|
||||
cx = drop_ty(cx, llfld_a, *arg);
|
||||
}
|
||||
}
|
||||
ty::TyAdt(adt, substs) => match adt.adt_kind() {
|
||||
@ -563,11 +549,11 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
|
||||
} else {
|
||||
// FIXME(#36457) -- we should pass unsized values as two arguments
|
||||
let scratch = alloc_ty(&cx, field_ty, "__fat_ptr_iter");
|
||||
Store(&cx, llfld_a, get_dataptr(&cx, scratch));
|
||||
Store(&cx, value.meta, get_meta(&cx, scratch));
|
||||
cx.store(llfld_a, get_dataptr(&cx, scratch));
|
||||
cx.store(value.meta, get_meta(&cx, scratch));
|
||||
scratch
|
||||
};
|
||||
cx = drop_ty(cx, val, field_ty, DebugLoc::None);
|
||||
cx = drop_ty(cx, val, field_ty);
|
||||
}
|
||||
}
|
||||
AdtKind::Union => {
|
||||
@ -591,7 +577,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
|
||||
}
|
||||
(adt::BranchKind::Switch, Some(lldiscrim_a)) => {
|
||||
let tcx = cx.tcx();
|
||||
cx = drop_ty(cx, lldiscrim_a, tcx.types.isize, DebugLoc::None);
|
||||
cx = drop_ty(cx, lldiscrim_a, tcx.types.isize);
|
||||
|
||||
// Create a fall-through basic block for the "else" case of
|
||||
// the switch instruction we're about to generate. Note that
|
||||
@ -607,8 +593,8 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
|
||||
// call this for an already-valid enum in which case the `ret
|
||||
// void` will never be hit.
|
||||
let ret_void_cx = fcx.new_block("enum-iter-ret-void").build();
|
||||
RetVoid(&ret_void_cx, DebugLoc::None);
|
||||
let llswitch = Switch(&cx, lldiscrim_a, ret_void_cx.llbb(), n_variants);
|
||||
ret_void_cx.ret_void();
|
||||
let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants);
|
||||
let next_cx = fcx.new_block("enum-iter-next").build();
|
||||
|
||||
for variant in &adt.variants {
|
||||
@ -616,9 +602,9 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
|
||||
&variant.disr_val.to_string());
|
||||
let variant_cx = fcx.new_block(&variant_cx_name).build();
|
||||
let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val));
|
||||
AddCase(llswitch, case_val, variant_cx.llbb());
|
||||
Builder::add_case(llswitch, case_val, variant_cx.llbb());
|
||||
let variant_cx = iter_variant(variant_cx, t, value, variant, substs);
|
||||
Br(&variant_cx, next_cx.llbb(), DebugLoc::None);
|
||||
variant_cx.br(next_cx.llbb());
|
||||
}
|
||||
cx = next_cx;
|
||||
}
|
||||
|
@ -18,7 +18,6 @@ use llvm::{ValueRef};
|
||||
use abi::{Abi, FnType};
|
||||
use adt;
|
||||
use base::*;
|
||||
use build::*;
|
||||
use common::*;
|
||||
use debuginfo::DebugLoc;
|
||||
use declare;
|
||||
@ -120,7 +119,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
// These are the only intrinsic functions that diverge.
|
||||
if name == "abort" {
|
||||
let llfn = ccx.get_intrinsic(&("llvm.trap"));
|
||||
Call(bcx, llfn, &[], call_debug_location);
|
||||
bcx.call(llfn, &[], bcx.lpad().and_then(|b| b.bundle()));
|
||||
return;
|
||||
} else if name == "unreachable" {
|
||||
// FIXME: do nothing?
|
||||
@ -132,23 +131,23 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
let simple = get_simple_intrinsic(ccx, name);
|
||||
let llval = match (simple, name) {
|
||||
(Some(llfn), _) => {
|
||||
Call(bcx, llfn, &llargs, call_debug_location)
|
||||
bcx.call(llfn, &llargs, bcx.lpad().and_then(|b| b.bundle()))
|
||||
}
|
||||
(_, "likely") => {
|
||||
let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
|
||||
Call(bcx, expect, &[llargs[0], C_bool(ccx, true)], call_debug_location)
|
||||
bcx.call(expect, &[llargs[0], C_bool(ccx, true)], bcx.lpad().and_then(|b| b.bundle()))
|
||||
}
|
||||
(_, "unlikely") => {
|
||||
let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
|
||||
Call(bcx, expect, &[llargs[0], C_bool(ccx, false)], call_debug_location)
|
||||
bcx.call(expect, &[llargs[0], C_bool(ccx, false)], bcx.lpad().and_then(|b| b.bundle()))
|
||||
}
|
||||
(_, "try") => {
|
||||
try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult, call_debug_location);
|
||||
try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult);
|
||||
C_nil(ccx)
|
||||
}
|
||||
(_, "breakpoint") => {
|
||||
let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
|
||||
Call(bcx, llfn, &[], call_debug_location)
|
||||
bcx.call(llfn, &[], bcx.lpad().and_then(|b| b.bundle()))
|
||||
}
|
||||
(_, "size_of") => {
|
||||
let tp_ty = substs.type_at(0);
|
||||
@ -213,12 +212,12 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
(_, "offset") => {
|
||||
let ptr = llargs[0];
|
||||
let offset = llargs[1];
|
||||
InBoundsGEP(bcx, ptr, &[offset])
|
||||
bcx.inbounds_gep(ptr, &[offset])
|
||||
}
|
||||
(_, "arith_offset") => {
|
||||
let ptr = llargs[0];
|
||||
let offset = llargs[1];
|
||||
GEP(bcx, ptr, &[offset])
|
||||
bcx.gep(ptr, &[offset])
|
||||
}
|
||||
|
||||
(_, "copy_nonoverlapping") => {
|
||||
@ -228,8 +227,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
substs.type_at(0),
|
||||
llargs[1],
|
||||
llargs[0],
|
||||
llargs[2],
|
||||
call_debug_location)
|
||||
llargs[2])
|
||||
}
|
||||
(_, "copy") => {
|
||||
copy_intrinsic(bcx,
|
||||
@ -238,8 +236,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
substs.type_at(0),
|
||||
llargs[1],
|
||||
llargs[0],
|
||||
llargs[2],
|
||||
call_debug_location)
|
||||
llargs[2])
|
||||
}
|
||||
(_, "write_bytes") => {
|
||||
memset_intrinsic(bcx,
|
||||
@ -247,8 +244,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
substs.type_at(0),
|
||||
llargs[0],
|
||||
llargs[1],
|
||||
llargs[2],
|
||||
call_debug_location)
|
||||
llargs[2])
|
||||
}
|
||||
|
||||
(_, "volatile_copy_nonoverlapping_memory") => {
|
||||
@ -258,8 +254,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
substs.type_at(0),
|
||||
llargs[0],
|
||||
llargs[1],
|
||||
llargs[2],
|
||||
call_debug_location)
|
||||
llargs[2])
|
||||
}
|
||||
(_, "volatile_copy_memory") => {
|
||||
copy_intrinsic(bcx,
|
||||
@ -268,8 +263,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
substs.type_at(0),
|
||||
llargs[0],
|
||||
llargs[1],
|
||||
llargs[2],
|
||||
call_debug_location)
|
||||
llargs[2])
|
||||
}
|
||||
(_, "volatile_set_memory") => {
|
||||
memset_intrinsic(bcx,
|
||||
@ -277,16 +271,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
substs.type_at(0),
|
||||
llargs[0],
|
||||
llargs[1],
|
||||
llargs[2],
|
||||
call_debug_location)
|
||||
llargs[2])
|
||||
}
|
||||
(_, "volatile_load") => {
|
||||
let tp_ty = substs.type_at(0);
|
||||
let mut ptr = llargs[0];
|
||||
if let Some(ty) = fn_ty.ret.cast {
|
||||
ptr = PointerCast(bcx, ptr, ty.ptr_to());
|
||||
ptr = bcx.pointercast(ptr, ty.ptr_to());
|
||||
}
|
||||
let load = VolatileLoad(bcx, ptr);
|
||||
let load = bcx.volatile_load(ptr);
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
|
||||
}
|
||||
@ -295,16 +288,16 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
(_, "volatile_store") => {
|
||||
let tp_ty = substs.type_at(0);
|
||||
if type_is_fat_ptr(bcx.tcx(), tp_ty) {
|
||||
VolatileStore(bcx, llargs[1], get_dataptr(bcx, llargs[0]));
|
||||
VolatileStore(bcx, llargs[2], get_meta(bcx, llargs[0]));
|
||||
bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0]));
|
||||
bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0]));
|
||||
} else {
|
||||
let val = if fn_ty.args[1].is_indirect() {
|
||||
Load(bcx, llargs[1])
|
||||
bcx.load(llargs[1])
|
||||
} else {
|
||||
from_immediate(bcx, llargs[1])
|
||||
};
|
||||
let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
|
||||
let store = VolatileStore(bcx, val, ptr);
|
||||
let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to());
|
||||
let store = bcx.volatile_store(val, ptr);
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
|
||||
}
|
||||
@ -321,40 +314,39 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
Some((width, signed)) =>
|
||||
match name {
|
||||
"ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
|
||||
llargs[0], call_debug_location),
|
||||
llargs[0]),
|
||||
"cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
|
||||
llargs[0], call_debug_location),
|
||||
"ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
|
||||
&llargs, call_debug_location),
|
||||
llargs[0]),
|
||||
"ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
|
||||
&llargs, bcx.lpad().and_then(|b| b.bundle())),
|
||||
"bswap" => {
|
||||
if width == 8 {
|
||||
llargs[0] // byte swap a u8/i8 is just a no-op
|
||||
} else {
|
||||
Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
|
||||
&llargs, call_debug_location)
|
||||
bcx.call(ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
|
||||
&llargs, bcx.lpad().and_then(|b| b.bundle()))
|
||||
}
|
||||
}
|
||||
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
|
||||
let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
|
||||
if signed { 's' } else { 'u' },
|
||||
&name[..3], width);
|
||||
with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
|
||||
call_debug_location)
|
||||
with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult)
|
||||
},
|
||||
"overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
|
||||
"overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
|
||||
"overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
|
||||
"overflowing_add" => bcx.add(llargs[0], llargs[1]),
|
||||
"overflowing_sub" => bcx.sub(llargs[0], llargs[1]),
|
||||
"overflowing_mul" => bcx.mul(llargs[0], llargs[1]),
|
||||
"unchecked_div" =>
|
||||
if signed {
|
||||
SDiv(bcx, llargs[0], llargs[1], call_debug_location)
|
||||
bcx.sdiv(llargs[0], llargs[1])
|
||||
} else {
|
||||
UDiv(bcx, llargs[0], llargs[1], call_debug_location)
|
||||
bcx.udiv(llargs[0], llargs[1])
|
||||
},
|
||||
"unchecked_rem" =>
|
||||
if signed {
|
||||
SRem(bcx, llargs[0], llargs[1], call_debug_location)
|
||||
bcx.srem(llargs[0], llargs[1])
|
||||
} else {
|
||||
URem(bcx, llargs[0], llargs[1], call_debug_location)
|
||||
bcx.urem(llargs[0], llargs[1])
|
||||
},
|
||||
_ => bug!(),
|
||||
},
|
||||
@ -374,11 +366,11 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
match float_type_width(sty) {
|
||||
Some(_width) =>
|
||||
match name {
|
||||
"fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location),
|
||||
"fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location),
|
||||
"fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location),
|
||||
"fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location),
|
||||
"frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location),
|
||||
"fadd_fast" => bcx.fadd_fast(llargs[0], llargs[1]),
|
||||
"fsub_fast" => bcx.fsub_fast(llargs[0], llargs[1]),
|
||||
"fmul_fast" => bcx.fmul_fast(llargs[0], llargs[1]),
|
||||
"fdiv_fast" => bcx.fdiv_fast(llargs[0], llargs[1]),
|
||||
"frem_fast" => bcx.frem_fast(llargs[0], llargs[1]),
|
||||
_ => bug!(),
|
||||
},
|
||||
None => {
|
||||
@ -407,7 +399,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
callee_ty,
|
||||
&llargs,
|
||||
ret_ty, llret_ty,
|
||||
call_debug_location,
|
||||
span)
|
||||
}
|
||||
// This requires that atomic intrinsics follow a specific naming pattern:
|
||||
@ -447,12 +438,12 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
let sty = &substs.type_at(0).sty;
|
||||
if int_type_width_signed(sty, ccx).is_some() {
|
||||
let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
|
||||
let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2],
|
||||
order, failorder, weak);
|
||||
let result = ExtractValue(bcx, val, 0);
|
||||
let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
|
||||
Store(bcx, result, StructGEP(bcx, llresult, 0));
|
||||
Store(bcx, success, StructGEP(bcx, llresult, 1));
|
||||
let val = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order,
|
||||
failorder, weak);
|
||||
let result = bcx.extract_value(val, 0);
|
||||
let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx()));
|
||||
bcx.store(result, bcx.struct_gep(llresult, 0));
|
||||
bcx.store(success, bcx.struct_gep(llresult, 1));
|
||||
} else {
|
||||
span_invalid_monomorphization_error(
|
||||
tcx.sess, span,
|
||||
@ -465,7 +456,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
"load" => {
|
||||
let sty = &substs.type_at(0).sty;
|
||||
if int_type_width_signed(sty, ccx).is_some() {
|
||||
AtomicLoad(bcx, llargs[0], order)
|
||||
bcx.atomic_load(llargs[0], order)
|
||||
} else {
|
||||
span_invalid_monomorphization_error(
|
||||
tcx.sess, span,
|
||||
@ -478,7 +469,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
"store" => {
|
||||
let sty = &substs.type_at(0).sty;
|
||||
if int_type_width_signed(sty, ccx).is_some() {
|
||||
AtomicStore(bcx, llargs[1], llargs[0], order);
|
||||
bcx.atomic_store(llargs[1], llargs[0], order);
|
||||
} else {
|
||||
span_invalid_monomorphization_error(
|
||||
tcx.sess, span,
|
||||
@ -489,12 +480,12 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
}
|
||||
|
||||
"fence" => {
|
||||
AtomicFence(bcx, order, llvm::SynchronizationScope::CrossThread);
|
||||
bcx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
|
||||
C_nil(ccx)
|
||||
}
|
||||
|
||||
"singlethreadfence" => {
|
||||
AtomicFence(bcx, order, llvm::SynchronizationScope::SingleThread);
|
||||
bcx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
|
||||
C_nil(ccx)
|
||||
}
|
||||
|
||||
@ -517,7 +508,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
|
||||
let sty = &substs.type_at(0).sty;
|
||||
if int_type_width_signed(sty, ccx).is_some() {
|
||||
AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
|
||||
bcx.atomic_rmw(atom_op, llargs[0], llargs[1], order)
|
||||
} else {
|
||||
span_invalid_monomorphization_error(
|
||||
tcx.sess, span,
|
||||
@ -609,25 +600,24 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
let arg = adt::MaybeSizedValue::sized(llarg);
|
||||
(0..contents.len())
|
||||
.map(|i| {
|
||||
Load(bcx, adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i))
|
||||
bcx.load(adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
|
||||
let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
|
||||
vec![PointerCast(bcx, llarg,
|
||||
vec![bcx.pointercast(llarg,
|
||||
llvm_elem.ptr_to())]
|
||||
}
|
||||
intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
|
||||
let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
|
||||
vec![BitCast(bcx, llarg,
|
||||
Type::vector(&llvm_elem, length as u64))]
|
||||
vec![bcx.bitcast(llarg, Type::vector(&llvm_elem, length as u64))]
|
||||
}
|
||||
intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
|
||||
// the LLVM intrinsic uses a smaller integer
|
||||
// size than the C intrinsic's signature, so
|
||||
// we have to trim it down here.
|
||||
vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
|
||||
vec![bcx.trunc(llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
|
||||
}
|
||||
_ => vec![llarg],
|
||||
}
|
||||
@ -664,7 +654,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
let f = declare::declare_cfn(ccx,
|
||||
name,
|
||||
Type::func(&inputs, &outputs));
|
||||
Call(bcx, f, &llargs, call_debug_location)
|
||||
bcx.call(f, &llargs, bcx.lpad().and_then(|b| b.bundle()))
|
||||
}
|
||||
};
|
||||
|
||||
@ -674,8 +664,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
assert!(!flatten);
|
||||
|
||||
for i in 0..elems.len() {
|
||||
let val = ExtractValue(bcx, val, i);
|
||||
Store(bcx, val, StructGEP(bcx, llresult, i));
|
||||
let val = bcx.extract_value(val, i);
|
||||
bcx.store(val, bcx.struct_gep(llresult, i));
|
||||
}
|
||||
C_nil(ccx)
|
||||
}
|
||||
@ -687,8 +677,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
if val_ty(llval) != Type::void(ccx) &&
|
||||
machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
|
||||
if let Some(ty) = fn_ty.ret.cast {
|
||||
let ptr = PointerCast(bcx, llresult, ty.ptr_to());
|
||||
let store = Store(bcx, llval, ptr);
|
||||
let ptr = bcx.pointercast(llresult, ty.ptr_to());
|
||||
let store = bcx.store(llval, ptr);
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
|
||||
}
|
||||
@ -704,8 +694,7 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
tp_ty: Ty<'tcx>,
|
||||
dst: ValueRef,
|
||||
src: ValueRef,
|
||||
count: ValueRef,
|
||||
call_debug_location: DebugLoc)
|
||||
count: ValueRef)
|
||||
-> ValueRef {
|
||||
let ccx = bcx.ccx();
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
@ -721,18 +710,17 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
|
||||
let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
|
||||
|
||||
let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
|
||||
let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
|
||||
let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx));
|
||||
let src_ptr = bcx.pointercast(src, Type::i8p(ccx));
|
||||
let llfn = ccx.get_intrinsic(&name);
|
||||
|
||||
Call(bcx,
|
||||
llfn,
|
||||
&[dst_ptr,
|
||||
src_ptr,
|
||||
Mul(bcx, size, count, DebugLoc::None),
|
||||
align,
|
||||
C_bool(ccx, volatile)],
|
||||
call_debug_location)
|
||||
bcx.call(llfn,
|
||||
&[dst_ptr,
|
||||
src_ptr,
|
||||
bcx.mul(size, count),
|
||||
align,
|
||||
C_bool(ccx, volatile)],
|
||||
bcx.lpad().and_then(|b| b.bundle()))
|
||||
}
|
||||
|
||||
fn memset_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
@ -740,8 +728,7 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
tp_ty: Ty<'tcx>,
|
||||
dst: ValueRef,
|
||||
val: ValueRef,
|
||||
count: ValueRef,
|
||||
call_debug_location: DebugLoc)
|
||||
count: ValueRef)
|
||||
-> ValueRef {
|
||||
let ccx = bcx.ccx();
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
@ -751,44 +738,42 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
|
||||
let name = format!("llvm.memset.p0i8.i{}", int_size);
|
||||
|
||||
let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
|
||||
let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx));
|
||||
let llfn = ccx.get_intrinsic(&name);
|
||||
|
||||
Call(bcx,
|
||||
llfn,
|
||||
&[dst_ptr,
|
||||
val,
|
||||
Mul(bcx, size, count, DebugLoc::None),
|
||||
align,
|
||||
C_bool(ccx, volatile)],
|
||||
call_debug_location)
|
||||
bcx.call(
|
||||
llfn,
|
||||
&[dst_ptr,
|
||||
val,
|
||||
bcx.mul(size, count),
|
||||
align,
|
||||
C_bool(ccx, volatile)],
|
||||
bcx.lpad().and_then(|b| b.bundle()))
|
||||
}
|
||||
|
||||
fn count_zeros_intrinsic(bcx: &BlockAndBuilder,
|
||||
name: &str,
|
||||
val: ValueRef,
|
||||
call_debug_location: DebugLoc)
|
||||
val: ValueRef)
|
||||
-> ValueRef {
|
||||
let y = C_bool(bcx.ccx(), false);
|
||||
let llfn = bcx.ccx().get_intrinsic(&name);
|
||||
Call(bcx, llfn, &[val, y], call_debug_location)
|
||||
bcx.call(llfn, &[val, y], bcx.lpad().and_then(|b| b.bundle()))
|
||||
}
|
||||
|
||||
fn with_overflow_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
name: &str,
|
||||
a: ValueRef,
|
||||
b: ValueRef,
|
||||
out: ValueRef,
|
||||
call_debug_location: DebugLoc)
|
||||
out: ValueRef)
|
||||
-> ValueRef {
|
||||
let llfn = bcx.ccx().get_intrinsic(&name);
|
||||
|
||||
// Convert `i1` to a `bool`, and write it to the out parameter
|
||||
let val = Call(bcx, llfn, &[a, b], call_debug_location);
|
||||
let result = ExtractValue(bcx, val, 0);
|
||||
let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
|
||||
Store(bcx, result, StructGEP(bcx, out, 0));
|
||||
Store(bcx, overflow, StructGEP(bcx, out, 1));
|
||||
let val = bcx.call(llfn, &[a, b], bcx.lpad().and_then(|b| b.bundle()));
|
||||
let result = bcx.extract_value(val, 0);
|
||||
let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx()));
|
||||
bcx.store(result, bcx.struct_gep(out, 0));
|
||||
bcx.store(overflow, bcx.struct_gep(out, 1));
|
||||
|
||||
C_nil(bcx.ccx())
|
||||
}
|
||||
@ -799,15 +784,14 @@ fn try_intrinsic<'blk, 'tcx>(
|
||||
data: ValueRef,
|
||||
local_ptr: ValueRef,
|
||||
dest: ValueRef,
|
||||
dloc: DebugLoc
|
||||
) {
|
||||
if bcx.sess().no_landing_pads() {
|
||||
Call(bcx, func, &[data], dloc);
|
||||
Store(bcx, C_null(Type::i8p(&bcx.ccx())), dest);
|
||||
bcx.call(func, &[data], bcx.lpad().and_then(|b| b.bundle()));
|
||||
bcx.store(C_null(Type::i8p(&bcx.ccx())), dest);
|
||||
} else if wants_msvc_seh(bcx.sess()) {
|
||||
trans_msvc_try(bcx, func, data, local_ptr, dest, dloc);
|
||||
trans_msvc_try(bcx, func, data, local_ptr, dest);
|
||||
} else {
|
||||
trans_gnu_try(bcx, func, data, local_ptr, dest, dloc);
|
||||
trans_gnu_try(bcx, func, data, local_ptr, dest);
|
||||
}
|
||||
}
|
||||
|
||||
@ -822,13 +806,11 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
func: ValueRef,
|
||||
data: ValueRef,
|
||||
local_ptr: ValueRef,
|
||||
dest: ValueRef,
|
||||
dloc: DebugLoc) {
|
||||
dest: ValueRef) {
|
||||
let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| {
|
||||
let ccx = bcx.ccx();
|
||||
let dloc = DebugLoc::None;
|
||||
|
||||
SetPersonalityFn(&bcx, bcx.fcx().eh_personality());
|
||||
bcx.set_personality_fn(bcx.fcx().eh_personality());
|
||||
|
||||
let normal = bcx.fcx().new_block("normal").build();
|
||||
let catchswitch = bcx.fcx().new_block("catchswitch").build();
|
||||
@ -879,36 +861,37 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
//
|
||||
// More information can be found in libstd's seh.rs implementation.
|
||||
let i64p = Type::i64(ccx).ptr_to();
|
||||
let slot = Alloca(&bcx, i64p, "slot");
|
||||
Invoke(&bcx, func, &[data], normal.llbb(), catchswitch.llbb(), dloc);
|
||||
let slot = bcx.fcx().alloca(i64p, "slot");
|
||||
bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
|
||||
bcx.lpad().and_then(|b| b.bundle()));
|
||||
|
||||
Ret(&normal, C_i32(ccx, 0), dloc);
|
||||
normal.ret(C_i32(ccx, 0));
|
||||
|
||||
let cs = CatchSwitch(&catchswitch, None, None, 1);
|
||||
AddHandler(&catchswitch, cs, catchpad.llbb());
|
||||
let cs = catchswitch.catch_switch(None, None, 1);
|
||||
catchswitch.add_handler(cs, catchpad.llbb());
|
||||
|
||||
let tcx = ccx.tcx();
|
||||
let tydesc = match tcx.lang_items.msvc_try_filter() {
|
||||
Some(did) => ::consts::get_static(ccx, did),
|
||||
None => bug!("msvc_try_filter not defined"),
|
||||
};
|
||||
let tok = CatchPad(&catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]);
|
||||
let addr = Load(&catchpad, slot);
|
||||
let arg1 = Load(&catchpad, addr);
|
||||
let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(ccx, 0), slot]);
|
||||
let addr = catchpad.load(slot);
|
||||
let arg1 = catchpad.load(addr);
|
||||
let val1 = C_i32(ccx, 1);
|
||||
let arg2 = Load(&catchpad, InBoundsGEP(&catchpad, addr, &[val1]));
|
||||
let local_ptr = BitCast(&catchpad, local_ptr, i64p);
|
||||
Store(&catchpad, arg1, local_ptr);
|
||||
Store(&catchpad, arg2, InBoundsGEP(&catchpad, local_ptr, &[val1]));
|
||||
CatchRet(&catchpad, tok, caught.llbb());
|
||||
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]));
|
||||
let local_ptr = catchpad.bitcast(local_ptr, i64p);
|
||||
catchpad.store(arg1, local_ptr);
|
||||
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]));
|
||||
catchpad.catch_ret(tok, caught.llbb());
|
||||
|
||||
Ret(&caught, C_i32(ccx, 1), dloc);
|
||||
caught.ret(C_i32(ccx, 1));
|
||||
});
|
||||
|
||||
// Note that no invoke is used here because by definition this function
|
||||
// can't panic (that's what it's catching).
|
||||
let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
|
||||
Store(bcx, ret, dest);
|
||||
let ret = bcx.call(llfn, &[func, data, local_ptr], bcx.lpad().and_then(|b| b.bundle()));
|
||||
bcx.store(ret, dest);
|
||||
}
|
||||
|
||||
// Definition of the standard "try" function for Rust using the GNU-like model
|
||||
@ -926,11 +909,9 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
func: ValueRef,
|
||||
data: ValueRef,
|
||||
local_ptr: ValueRef,
|
||||
dest: ValueRef,
|
||||
dloc: DebugLoc) {
|
||||
dest: ValueRef) {
|
||||
let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| {
|
||||
let ccx = bcx.ccx();
|
||||
let dloc = DebugLoc::None;
|
||||
|
||||
// Translates the shims described above:
|
||||
//
|
||||
@ -955,8 +936,8 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
let func = llvm::get_param(bcx.fcx().llfn, 0);
|
||||
let data = llvm::get_param(bcx.fcx().llfn, 1);
|
||||
let local_ptr = llvm::get_param(bcx.fcx().llfn, 2);
|
||||
Invoke(&bcx, func, &[data], then.llbb(), catch.llbb(), dloc);
|
||||
Ret(&then, C_i32(ccx, 0), dloc);
|
||||
bcx.invoke(func, &[data], then.llbb(), catch.llbb(), bcx.lpad().and_then(|b| b.bundle()));
|
||||
then.ret(C_i32(ccx, 0));
|
||||
|
||||
// Type indicator for the exception being thrown.
|
||||
//
|
||||
@ -966,17 +947,17 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
// rust_try ignores the selector.
|
||||
let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
|
||||
false);
|
||||
let vals = LandingPad(&catch, lpad_ty, bcx.fcx().eh_personality(), 1);
|
||||
AddClause(&catch, vals, C_null(Type::i8p(ccx)));
|
||||
let ptr = ExtractValue(&catch, vals, 0);
|
||||
Store(&catch, ptr, BitCast(&catch, local_ptr, Type::i8p(ccx).ptr_to()));
|
||||
Ret(&catch, C_i32(ccx, 1), dloc);
|
||||
let vals = catch.landing_pad(lpad_ty, bcx.fcx().eh_personality(), 1, catch.fcx().llfn);
|
||||
catch.add_clause(vals, C_null(Type::i8p(ccx)));
|
||||
let ptr = catch.extract_value(vals, 0);
|
||||
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()));
|
||||
catch.ret(C_i32(ccx, 1));
|
||||
});
|
||||
|
||||
// Note that no invoke is used here because by definition this function
|
||||
// can't panic (that's what it's catching).
|
||||
let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
|
||||
Store(bcx, ret, dest);
|
||||
let ret = bcx.call(llfn, &[func, data, local_ptr], bcx.lpad().and_then(|b| b.bundle()));
|
||||
bcx.store(ret, dest);
|
||||
}
|
||||
|
||||
// Helper function to give a Block to a closure to translate a shim function.
|
||||
@ -1042,7 +1023,6 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>(
|
||||
llargs: &[ValueRef],
|
||||
ret_ty: Ty<'tcx>,
|
||||
llret_ty: Type,
|
||||
call_debug_location: DebugLoc,
|
||||
span: Span
|
||||
) -> ValueRef {
|
||||
// macros for error handling:
|
||||
@ -1113,8 +1093,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>(
|
||||
llargs[1],
|
||||
in_elem,
|
||||
llret_ty,
|
||||
cmp_op,
|
||||
call_debug_location)
|
||||
cmp_op)
|
||||
}
|
||||
|
||||
if name.starts_with("simd_shuffle") {
|
||||
@ -1163,20 +1142,20 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>(
|
||||
None => return C_null(llret_ty)
|
||||
};
|
||||
|
||||
return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
|
||||
return bcx.shuffle_vector(llargs[0], llargs[1], C_vector(&indices))
|
||||
}
|
||||
|
||||
if name == "simd_insert" {
|
||||
require!(in_elem == arg_tys[2],
|
||||
"expected inserted type `{}` (element of input `{}`), found `{}`",
|
||||
in_elem, in_ty, arg_tys[2]);
|
||||
return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
|
||||
return bcx.insert_element(llargs[0], llargs[2], llargs[1])
|
||||
}
|
||||
if name == "simd_extract" {
|
||||
require!(ret_ty == in_elem,
|
||||
"expected return type `{}` (element of input `{}`), found `{}`",
|
||||
in_elem, in_ty, ret_ty);
|
||||
return ExtractElement(bcx, llargs[0], llargs[1])
|
||||
return bcx.extract_element(llargs[0], llargs[1])
|
||||
}
|
||||
|
||||
if name == "simd_cast" {
|
||||
@ -1212,34 +1191,34 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>(
|
||||
match (in_style, out_style) {
|
||||
(Style::Int(in_is_signed), Style::Int(_)) => {
|
||||
return match in_width.cmp(&out_width) {
|
||||
Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
|
||||
Ordering::Greater => bcx.trunc(llargs[0], llret_ty),
|
||||
Ordering::Equal => llargs[0],
|
||||
Ordering::Less => if in_is_signed {
|
||||
SExt(bcx, llargs[0], llret_ty)
|
||||
bcx.sext(llargs[0], llret_ty)
|
||||
} else {
|
||||
ZExt(bcx, llargs[0], llret_ty)
|
||||
bcx.zext(llargs[0], llret_ty)
|
||||
}
|
||||
}
|
||||
}
|
||||
(Style::Int(in_is_signed), Style::Float) => {
|
||||
return if in_is_signed {
|
||||
SIToFP(bcx, llargs[0], llret_ty)
|
||||
bcx.sitofp(llargs[0], llret_ty)
|
||||
} else {
|
||||
UIToFP(bcx, llargs[0], llret_ty)
|
||||
bcx.uitofp(llargs[0], llret_ty)
|
||||
}
|
||||
}
|
||||
(Style::Float, Style::Int(out_is_signed)) => {
|
||||
return if out_is_signed {
|
||||
FPToSI(bcx, llargs[0], llret_ty)
|
||||
bcx.fptosi(llargs[0], llret_ty)
|
||||
} else {
|
||||
FPToUI(bcx, llargs[0], llret_ty)
|
||||
bcx.fptoui(llargs[0], llret_ty)
|
||||
}
|
||||
}
|
||||
(Style::Float, Style::Float) => {
|
||||
return match in_width.cmp(&out_width) {
|
||||
Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
|
||||
Ordering::Greater => bcx.fptrunc(llargs[0], llret_ty),
|
||||
Ordering::Equal => llargs[0],
|
||||
Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
|
||||
Ordering::Less => bcx.fpext(llargs[0], llret_ty)
|
||||
}
|
||||
}
|
||||
_ => {/* Unsupported. Fallthrough. */}
|
||||
@ -1250,13 +1229,13 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>(
|
||||
ret_ty, out_elem);
|
||||
}
|
||||
macro_rules! arith {
|
||||
($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
|
||||
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
|
||||
$(
|
||||
if name == stringify!($name) {
|
||||
match in_elem.sty {
|
||||
$(
|
||||
$(ty::$p(_))|* => {
|
||||
return $call(bcx, llargs[0], llargs[1], call_debug_location)
|
||||
return bcx.$call(llargs[0], llargs[1])
|
||||
}
|
||||
)*
|
||||
_ => {},
|
||||
@ -1269,15 +1248,15 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>(
|
||||
}
|
||||
}
|
||||
arith! {
|
||||
simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
|
||||
simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
|
||||
simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
|
||||
simd_div: TyFloat => FDiv;
|
||||
simd_shl: TyUint, TyInt => Shl;
|
||||
simd_shr: TyUint => LShr, TyInt => AShr;
|
||||
simd_and: TyUint, TyInt => And;
|
||||
simd_or: TyUint, TyInt => Or;
|
||||
simd_xor: TyUint, TyInt => Xor;
|
||||
simd_add: TyUint, TyInt => add, TyFloat => fadd;
|
||||
simd_sub: TyUint, TyInt => sub, TyFloat => fsub;
|
||||
simd_mul: TyUint, TyInt => mul, TyFloat => fmul;
|
||||
simd_div: TyFloat => fdiv;
|
||||
simd_shl: TyUint, TyInt => shl;
|
||||
simd_shr: TyUint => lshr, TyInt => ashr;
|
||||
simd_and: TyUint, TyInt => and;
|
||||
simd_or: TyUint, TyInt => or;
|
||||
simd_xor: TyUint, TyInt => xor;
|
||||
}
|
||||
span_bug!(span, "unknown SIMD intrinsic");
|
||||
}
|
||||
|
@ -96,7 +96,6 @@ mod assert_module_sources;
|
||||
mod attributes;
|
||||
mod base;
|
||||
mod basic_block;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod cabi_aarch64;
|
||||
mod cabi_arm;
|
||||
|
@ -14,11 +14,9 @@ use llvm::{ValueRef, get_params};
|
||||
use rustc::traits;
|
||||
use abi::FnType;
|
||||
use base::*;
|
||||
use build::*;
|
||||
use callee::Callee;
|
||||
use common::*;
|
||||
use consts;
|
||||
use debuginfo::DebugLoc;
|
||||
use declare;
|
||||
use glue;
|
||||
use machine;
|
||||
@ -40,7 +38,7 @@ pub fn get_virtual_method<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
|
||||
debug!("get_virtual_method(vtable_index={}, llvtable={:?})",
|
||||
vtable_index, Value(llvtable));
|
||||
|
||||
Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET]))
|
||||
bcx.load(bcx.gepi(llvtable, &[vtable_index + VTABLE_OFFSET]))
|
||||
}
|
||||
|
||||
/// Generate a shim function that allows an object type like `SomeTrait` to
|
||||
@ -93,10 +91,9 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
|
||||
|
||||
let dest = fcx.llretslotptr.get();
|
||||
let llargs = get_params(fcx.llfn);
|
||||
bcx = callee.call(bcx, DebugLoc::None,
|
||||
&llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).0;
|
||||
bcx = callee.call(bcx, &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).0;
|
||||
|
||||
fcx.finish(&bcx, DebugLoc::None);
|
||||
fcx.finish(&bcx);
|
||||
|
||||
llfn
|
||||
}
|
||||
|
@ -16,10 +16,10 @@ use rustc::mir;
|
||||
use abi::{Abi, FnType, ArgType};
|
||||
use adt;
|
||||
use base;
|
||||
use build;
|
||||
use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual};
|
||||
use common::{self, Block, BlockAndBuilder, LandingPad};
|
||||
use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef};
|
||||
use builder::Builder;
|
||||
use consts;
|
||||
use debuginfo::DebugLoc;
|
||||
use Disr;
|
||||
@ -167,7 +167,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
if default_bb != Some(target) {
|
||||
let llbb = llblock(self, target);
|
||||
let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val));
|
||||
build::AddCase(switch, llval, llbb)
|
||||
Builder::add_case(switch, llval, llbb)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -180,7 +180,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
for (value, target) in values.iter().zip(targets) {
|
||||
let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty);
|
||||
let llbb = llblock(self, *target);
|
||||
build::AddCase(switch, val.llval, llbb)
|
||||
Builder::add_case(switch, val.llval, llbb)
|
||||
}
|
||||
}
|
||||
|
||||
@ -204,7 +204,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
};
|
||||
let llslot = match op.val {
|
||||
Immediate(_) | Pair(..) => {
|
||||
let llscratch = build::AllocaFcx(bcx.fcx(), ret.original_ty, "ret");
|
||||
let llscratch = bcx.fcx().alloca(ret.original_ty, "ret");
|
||||
self.store_operand(&bcx, llscratch, op);
|
||||
llscratch
|
||||
}
|
||||
@ -257,8 +257,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// I want to avoid touching all of trans.
|
||||
let scratch = base::alloc_ty(&bcx, ty, "drop");
|
||||
base::call_lifetime_start(&bcx, scratch);
|
||||
build::Store(&bcx, lvalue.llval, base::get_dataptr(&bcx, scratch));
|
||||
build::Store(&bcx, lvalue.llextra, base::get_meta(&bcx, scratch));
|
||||
bcx.store(lvalue.llval, base::get_dataptr(&bcx, scratch));
|
||||
bcx.store(lvalue.llextra, base::get_meta(&bcx, scratch));
|
||||
scratch
|
||||
};
|
||||
if let Some(unwind) = unwind {
|
||||
@ -479,8 +479,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
// I want to avoid touching all of trans.
|
||||
let scratch = base::alloc_ty(&bcx, ty, "drop");
|
||||
base::call_lifetime_start(&bcx, scratch);
|
||||
build::Store(&bcx, llval, base::get_dataptr(&bcx, scratch));
|
||||
build::Store(&bcx, llextra, base::get_meta(&bcx, scratch));
|
||||
bcx.store(llval, base::get_dataptr(&bcx, scratch));
|
||||
bcx.store(llextra, base::get_meta(&bcx, scratch));
|
||||
scratch
|
||||
};
|
||||
if let Some(unwind) = *cleanup {
|
||||
@ -702,7 +702,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
let (mut llval, by_ref) = match op.val {
|
||||
Immediate(_) | Pair(..) => {
|
||||
if arg.is_indirect() || arg.cast.is_some() {
|
||||
let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg");
|
||||
let llscratch = bcx.fcx().alloca(arg.original_ty, "arg");
|
||||
self.store_operand(bcx, llscratch, op);
|
||||
(llscratch, true)
|
||||
} else {
|
||||
|
@ -478,10 +478,9 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
|
||||
// environment into its components so it ends up out of bounds.
|
||||
let env_ptr = if !env_ref {
|
||||
use base::*;
|
||||
use build::*;
|
||||
use common::*;
|
||||
let alloc = alloca(bcx, val_ty(llval), "__debuginfo_env_ptr");
|
||||
Store(bcx, llval, alloc);
|
||||
bcx.store(llval, alloc);
|
||||
alloc
|
||||
} else {
|
||||
llval
|
||||
|
@ -264,11 +264,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty),
|
||||
OperandValue::Immediate(s) => base::store_ty(bcx, s, lldest, operand.ty),
|
||||
OperandValue::Pair(a, b) => {
|
||||
use build::*;
|
||||
let a = base::from_immediate(bcx, a);
|
||||
let b = base::from_immediate(bcx, b);
|
||||
Store(bcx, a, StructGEP(bcx, lldest, 0));
|
||||
Store(bcx, b, StructGEP(bcx, lldest, 1));
|
||||
bcx.store(a, bcx.struct_gep(lldest, 0));
|
||||
bcx.store(b, bcx.struct_gep(lldest, 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ use base;
|
||||
use callee::Callee;
|
||||
use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder};
|
||||
use common::{C_integral};
|
||||
use debuginfo::DebugLoc;
|
||||
use adt;
|
||||
use machine;
|
||||
use type_::Type;
|
||||
@ -37,8 +36,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
pub fn trans_rvalue(&mut self,
|
||||
bcx: BlockAndBuilder<'bcx, 'tcx>,
|
||||
dest: LvalueRef<'tcx>,
|
||||
rvalue: &mir::Rvalue<'tcx>,
|
||||
debug_loc: DebugLoc)
|
||||
rvalue: &mir::Rvalue<'tcx>)
|
||||
-> BlockAndBuilder<'bcx, 'tcx>
|
||||
{
|
||||
debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
|
||||
@ -59,7 +57,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
|
||||
// into-coerce of a thin pointer to a fat pointer - just
|
||||
// use the operand path.
|
||||
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
|
||||
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
|
||||
self.store_operand(&bcx, dest.llval, temp);
|
||||
return bcx;
|
||||
}
|
||||
@ -171,7 +169,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
_ => {
|
||||
assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue));
|
||||
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
|
||||
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
|
||||
self.store_operand(&bcx, dest.llval, temp);
|
||||
bcx
|
||||
}
|
||||
@ -180,8 +178,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
|
||||
pub fn trans_rvalue_operand(&mut self,
|
||||
bcx: BlockAndBuilder<'bcx, 'tcx>,
|
||||
rvalue: &mir::Rvalue<'tcx>,
|
||||
debug_loc: DebugLoc)
|
||||
rvalue: &mir::Rvalue<'tcx>)
|
||||
-> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
|
||||
{
|
||||
assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue),
|
||||
@ -455,14 +452,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
let llalign = C_uint(bcx.ccx(), align);
|
||||
let llty_ptr = llty.ptr_to();
|
||||
let box_ty = bcx.tcx().mk_box(content_ty);
|
||||
let val = base::malloc_raw_dyn(
|
||||
&bcx,
|
||||
llty_ptr,
|
||||
box_ty,
|
||||
llsize,
|
||||
llalign,
|
||||
debug_loc
|
||||
);
|
||||
let val = base::malloc_raw_dyn(&bcx, llty_ptr, box_ty, llsize, llalign);
|
||||
let operand = OperandRef {
|
||||
val: OperandValue::Immediate(val),
|
||||
ty: box_ty,
|
||||
@ -526,23 +516,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
mir::BinOp::BitOr => bcx.or(lhs, rhs),
|
||||
mir::BinOp::BitAnd => bcx.and(lhs, rhs),
|
||||
mir::BinOp::BitXor => bcx.xor(lhs, rhs),
|
||||
mir::BinOp::Shl => {
|
||||
common::build_unchecked_lshift(
|
||||
&bcx,
|
||||
lhs,
|
||||
rhs,
|
||||
DebugLoc::None
|
||||
)
|
||||
}
|
||||
mir::BinOp::Shr => {
|
||||
common::build_unchecked_rshift(
|
||||
bcx,
|
||||
input_ty,
|
||||
lhs,
|
||||
rhs,
|
||||
DebugLoc::None
|
||||
)
|
||||
}
|
||||
mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs),
|
||||
mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs),
|
||||
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
|
||||
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
|
||||
C_bool(bcx.ccx(), match op {
|
||||
|
@ -33,11 +33,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
if let mir::Lvalue::Local(index) = *lvalue {
|
||||
match self.locals[index] {
|
||||
LocalRef::Lvalue(tr_dest) => {
|
||||
self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc)
|
||||
self.trans_rvalue(bcx, tr_dest, rvalue)
|
||||
}
|
||||
LocalRef::Operand(None) => {
|
||||
let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue,
|
||||
debug_loc);
|
||||
let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue);
|
||||
self.locals[index] = LocalRef::Operand(Some(operand));
|
||||
bcx
|
||||
}
|
||||
@ -51,13 +50,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
|
||||
} else {
|
||||
// If the type is zero-sized, it's already been set here,
|
||||
// but we still need to make sure we translate the operand
|
||||
self.trans_rvalue_operand(bcx, rvalue, debug_loc).0
|
||||
self.trans_rvalue_operand(bcx, rvalue).0
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let tr_dest = self.trans_lvalue(&bcx, lvalue);
|
||||
self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc)
|
||||
self.trans_rvalue(bcx, tr_dest, rvalue)
|
||||
}
|
||||
}
|
||||
mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => {
|
||||
|
@ -13,9 +13,8 @@
|
||||
use llvm;
|
||||
use llvm::ValueRef;
|
||||
use base::*;
|
||||
use build::*;
|
||||
use common::*;
|
||||
use debuginfo::DebugLoc;
|
||||
use builder::Builder;
|
||||
use rustc::ty::Ty;
|
||||
|
||||
pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
@ -31,10 +30,10 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
|
||||
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
|
||||
let zst = type_is_zero_size(bcx.ccx(), unit_ty);
|
||||
let add = |bcx, a, b| if zst {
|
||||
Add(bcx, a, b, DebugLoc::None)
|
||||
let add = |bcx: &BlockAndBuilder, a, b| if zst {
|
||||
bcx.add(a, b)
|
||||
} else {
|
||||
InBoundsGEP(bcx, a, &[b])
|
||||
bcx.inbounds_gep(a, &[b])
|
||||
};
|
||||
|
||||
let body_bcx = fcx.new_block("slice_loop_body").build();
|
||||
@ -42,28 +41,27 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>,
|
||||
let header_bcx = fcx.new_block("slice_loop_header").build();
|
||||
|
||||
let start = if zst {
|
||||
C_uint(bcx.ccx(), 0 as usize)
|
||||
C_uint(bcx.ccx(), 0usize)
|
||||
} else {
|
||||
data_ptr
|
||||
};
|
||||
let end = add(&bcx, start, len);
|
||||
|
||||
Br(&bcx, header_bcx.llbb(), DebugLoc::None);
|
||||
let current = Phi(&header_bcx, val_ty(start), &[start], &[bcx.llbb()]);
|
||||
bcx.br(header_bcx.llbb());
|
||||
let current = header_bcx.phi(val_ty(start), &[start], &[bcx.llbb()]);
|
||||
|
||||
let keep_going =
|
||||
ICmp(&header_bcx, llvm::IntNE, current, end, DebugLoc::None);
|
||||
CondBr(&header_bcx, keep_going, body_bcx.llbb(), next_bcx.llbb(), DebugLoc::None);
|
||||
let keep_going = header_bcx.icmp(llvm::IntNE, current, end);
|
||||
header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
|
||||
|
||||
let body_bcx = f(body_bcx, if zst { data_ptr } else { current });
|
||||
// FIXME(simulacrum): The code below is identical to the closure (add) above, but using the
|
||||
// closure doesn't compile due to body_bcx still being borrowed when dropped.
|
||||
let next = if zst {
|
||||
Add(&body_bcx, current, C_uint(bcx.ccx(), 1usize), DebugLoc::None)
|
||||
body_bcx.add(current, C_uint(bcx.ccx(), 1usize))
|
||||
} else {
|
||||
InBoundsGEP(&body_bcx, current, &[C_uint(bcx.ccx(), 1usize)])
|
||||
body_bcx.inbounds_gep(current, &[C_uint(bcx.ccx(), 1usize)])
|
||||
};
|
||||
AddIncomingToPhi(current, next, body_bcx.llbb());
|
||||
Br(&body_bcx, header_bcx.llbb(), DebugLoc::None);
|
||||
Builder::add_incoming_to_phi(current, next, body_bcx.llbb());
|
||||
body_bcx.br(header_bcx.llbb());
|
||||
next_bcx
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user