rustc_trans: promote constant rvalues in functions as an optimization.

This commit is contained in:
Eduard Burtescu 2015-01-29 14:03:34 +02:00
parent df3cc0c55f
commit f4473a4664
19 changed files with 880 additions and 777 deletions

View File

@ -881,6 +881,7 @@ extern {
/* Operations on global variables */
pub fn LLVMIsAGlobalVariable(GlobalVar: ValueRef) -> ValueRef;
pub fn LLVMAddGlobal(M: ModuleRef, Ty: TypeRef, Name: *const c_char)
-> ValueRef;
pub fn LLVMAddGlobalInAddressSpace(M: ModuleRef,

View File

@ -278,14 +278,14 @@ impl<'a, 'tcx> Opt<'a, 'tcx> {
match *self {
ConstantValue(ConstantExpr(lit_expr), _) => {
let lit_ty = ty::node_id_to_type(bcx.tcx(), lit_expr.id);
let (llval, _) = consts::const_expr(ccx, &*lit_expr);
let (llval, _) = consts::const_expr(ccx, &*lit_expr, bcx.fcx.param_substs);
let lit_datum = immediate_rvalue(llval, lit_ty);
let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx));
SingleResult(Result::new(bcx, lit_datum.val))
}
ConstantRange(ConstantExpr(ref l1), ConstantExpr(ref l2), _) => {
let (l1, _) = consts::const_expr(ccx, &**l1);
let (l2, _) = consts::const_expr(ccx, &**l2);
let (l1, _) = consts::const_expr(ccx, &**l1, bcx.fcx.param_substs);
let (l2, _) = consts::const_expr(ccx, &**l2, bcx.fcx.param_substs);
RangeResult(Result::new(bcx, l1), Result::new(bcx, l2))
}
Variant(disr_val, ref repr, _, _) => {
@ -832,8 +832,8 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
let _icx = push_ctxt("compare_values");
if ty::type_is_scalar(rhs_t) {
let rs = compare_scalar_types(cx, lhs, rhs, rhs_t, ast::BiEq, debug_loc);
return Result::new(rs.bcx, rs.val);
let cmp = compare_scalar_types(cx, lhs, rhs, rhs_t, ast::BiEq, debug_loc);
return Result::new(cx, cmp);
}
match rhs_t.sty {
@ -1163,29 +1163,16 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}
RangeResult(Result { val: vbegin, .. },
Result { bcx, val: vend }) => {
let Result { bcx, val: llge } =
compare_scalar_types(bcx,
test_val,
vbegin,
t,
ast::BiGe,
debug_loc);
let Result { bcx, val: llle } =
compare_scalar_types(bcx,
test_val,
vend,
t,
ast::BiLe,
debug_loc);
Result::new(bcx, And(bcx, llge, llle, debug_loc))
let llge = compare_scalar_types(bcx, test_val, vbegin,
t, ast::BiGe, debug_loc);
let llle = compare_scalar_types(bcx, test_val, vend,
t, ast::BiLe, debug_loc);
Result::new(bcx, And(bcx, llge, llle, DebugLoc::None))
}
LowerBound(Result { bcx, val }) => {
compare_scalar_types(bcx,
test_val,
val,
t,
ast::BiGe,
debug_loc)
Result::new(bcx, compare_scalar_types(bcx, test_val,
val, t, ast::BiGe,
debug_loc))
}
}
};

View File

@ -26,7 +26,6 @@
#![allow(non_camel_case_types)]
pub use self::ValueOrigin::*;
pub use self::scalar_type::*;
use super::CrateTranslation;
use super::ModuleTranslation;
@ -40,7 +39,6 @@ use metadata::{csearch, encoder, loader};
use middle::astencode;
use middle::cfg;
use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
use middle::subst;
use middle::weak_lang_items;
use middle::subst::{Subst, Substs};
use middle::ty::{self, Ty, ClosureTyper};
@ -498,7 +496,7 @@ pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
did: ast::DefId,
t: Ty<'tcx>,
parent_id: ast::DefId,
substs: &subst::Substs<'tcx>)
substs: &Substs<'tcx>)
-> ValueRef {
let _icx = push_ctxt("trans_res_dtor");
let did = inline::maybe_instantiate_inline(ccx, did);
@ -507,9 +505,9 @@ pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
assert_eq!(did.krate, ast::LOCAL_CRATE);
// Since we're in trans we don't care for any region parameters
let substs = subst::Substs::erased(substs.types.clone());
let substs = ccx.tcx().mk_substs(Substs::erased(substs.types.clone()));
let (val, _, _) = monomorphize::monomorphic_fn(ccx, did, &substs, None);
let (val, _, _) = monomorphize::monomorphic_fn(ccx, did, substs, None);
val
} else if did.krate == ast::LOCAL_CRATE {
@ -532,137 +530,100 @@ pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
}
// Used only for creating scalar comparison glue.
#[derive(Copy)]
pub enum scalar_type { nil_type, signed_int, unsigned_int, floating_point, }
pub fn compare_scalar_types<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
lhs: ValueRef,
rhs: ValueRef,
t: Ty<'tcx>,
op: ast::BinOp_,
debug_loc: DebugLoc)
-> Result<'blk, 'tcx> {
let f = |a| Result::new(cx, compare_scalar_values(cx, lhs, rhs, a, op, debug_loc));
match t.sty {
ty::ty_tup(ref tys) if tys.is_empty() => f(nil_type),
ty::ty_bool | ty::ty_uint(_) | ty::ty_char => f(unsigned_int),
ty::ty_ptr(mt) if common::type_is_sized(cx.tcx(), mt.ty) => f(unsigned_int),
ty::ty_int(_) => f(signed_int),
ty::ty_float(_) => f(floating_point),
// Should never get here, because t is scalar.
_ => cx.sess().bug("non-scalar type passed to compare_scalar_types")
}
}
// A helper function to do the actual comparison of scalar values.
pub fn compare_scalar_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
lhs: ValueRef,
rhs: ValueRef,
nt: scalar_type,
op: ast::BinOp_,
debug_loc: DebugLoc)
-> ValueRef {
let _icx = push_ctxt("compare_scalar_values");
fn die(cx: Block) -> ! {
cx.sess().bug("compare_scalar_values: must be a comparison operator");
}
match nt {
nil_type => {
// We don't need to do actual comparisons for nil.
// () == () holds but () < () does not.
pub fn bin_op_to_icmp_predicate(ccx: &CrateContext, op: ast::BinOp_, signed: bool)
-> llvm::IntPredicate {
match op {
ast::BiEq | ast::BiLe | ast::BiGe => return C_bool(cx.ccx(), true),
ast::BiNe | ast::BiLt | ast::BiGt => return C_bool(cx.ccx(), false),
// refinements would be nice
_ => die(cx)
ast::BiEq => llvm::IntEQ,
ast::BiNe => llvm::IntNE,
ast::BiLt => if signed { llvm::IntSLT } else { llvm::IntULT },
ast::BiLe => if signed { llvm::IntSLE } else { llvm::IntULE },
ast::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT },
ast::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE },
op => {
ccx.sess().bug(&format!("comparison_op_to_icmp_predicate: expected \
comparison operator, found {:?}", op)[]);
}
}
floating_point => {
let cmp = match op {
}
pub fn bin_op_to_fcmp_predicate(ccx: &CrateContext, op: ast::BinOp_)
-> llvm::RealPredicate {
match op {
ast::BiEq => llvm::RealOEQ,
ast::BiNe => llvm::RealUNE,
ast::BiLt => llvm::RealOLT,
ast::BiLe => llvm::RealOLE,
ast::BiGt => llvm::RealOGT,
ast::BiGe => llvm::RealOGE,
_ => die(cx)
};
return FCmp(cx, cmp, lhs, rhs, debug_loc);
}
signed_int => {
let cmp = match op {
ast::BiEq => llvm::IntEQ,
ast::BiNe => llvm::IntNE,
ast::BiLt => llvm::IntSLT,
ast::BiLe => llvm::IntSLE,
ast::BiGt => llvm::IntSGT,
ast::BiGe => llvm::IntSGE,
_ => die(cx)
};
return ICmp(cx, cmp, lhs, rhs, debug_loc);
}
unsigned_int => {
let cmp = match op {
ast::BiEq => llvm::IntEQ,
ast::BiNe => llvm::IntNE,
ast::BiLt => llvm::IntULT,
ast::BiLe => llvm::IntULE,
ast::BiGt => llvm::IntUGT,
ast::BiGe => llvm::IntUGE,
_ => die(cx)
};
return ICmp(cx, cmp, lhs, rhs, debug_loc);
op => {
ccx.sess().bug(&format!("comparison_op_to_fcmp_predicate: expected \
comparison operator, found {:?}", op)[]);
}
}
}
pub fn compare_simd_types<'blk, 'tcx>(
cx: Block<'blk, 'tcx>,
pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lhs: ValueRef,
rhs: ValueRef,
t: Ty<'tcx>,
size: uint,
op: ast::BinOp_,
debug_loc: DebugLoc)
-> ValueRef {
let cmp = match t.sty {
match t.sty {
ty::ty_tup(ref tys) if tys.is_empty() => {
// We don't need to do actual comparisons for nil.
// () == () holds but () < () does not.
match op {
ast::BiEq | ast::BiLe | ast::BiGe => return C_bool(bcx.ccx(), true),
ast::BiNe | ast::BiLt | ast::BiGt => return C_bool(bcx.ccx(), false),
// refinements would be nice
_ => bcx.sess().bug("compare_scalar_types: must be a comparison operator")
}
}
ty::ty_bool | ty::ty_uint(_) | ty::ty_char => {
ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc)
}
ty::ty_ptr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => {
ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc)
}
ty::ty_int(_) => {
ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, true), lhs, rhs, debug_loc)
}
ty::ty_float(_) => {
FCmp(bcx, bin_op_to_fcmp_predicate(bcx.ccx(), op), lhs, rhs, debug_loc)
}
// Should never get here, because t is scalar.
_ => bcx.sess().bug("non-scalar type passed to compare_scalar_types")
}
}
pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lhs: ValueRef,
rhs: ValueRef,
t: Ty<'tcx>,
op: ast::BinOp_,
debug_loc: DebugLoc)
-> ValueRef {
let signed = match t.sty {
ty::ty_float(_) => {
// The comparison operators for floating point vectors are challenging.
// LLVM outputs a `< size x i1 >`, but if we perform a sign extension
// then bitcast to a floating point vector, the result will be `-NaN`
// for each truth value. Because of this they are unsupported.
cx.sess().bug("compare_simd_types: comparison operators \
bcx.sess().bug("compare_simd_types: comparison operators \
not supported for floating point SIMD types")
},
ty::ty_uint(_) => match op {
ast::BiEq => llvm::IntEQ,
ast::BiNe => llvm::IntNE,
ast::BiLt => llvm::IntULT,
ast::BiLe => llvm::IntULE,
ast::BiGt => llvm::IntUGT,
ast::BiGe => llvm::IntUGE,
_ => cx.sess().bug("compare_simd_types: must be a comparison operator"),
},
ty::ty_int(_) => match op {
ast::BiEq => llvm::IntEQ,
ast::BiNe => llvm::IntNE,
ast::BiLt => llvm::IntSLT,
ast::BiLe => llvm::IntSLE,
ast::BiGt => llvm::IntSGT,
ast::BiGe => llvm::IntSGE,
_ => cx.sess().bug("compare_simd_types: must be a comparison operator"),
},
_ => cx.sess().bug("compare_simd_types: invalid SIMD type"),
ty::ty_uint(_) => false,
ty::ty_int(_) => true,
_ => bcx.sess().bug("compare_simd_types: invalid SIMD type"),
};
let return_ty = Type::vector(&type_of(cx.ccx(), t), size as u64);
let cmp = bin_op_to_icmp_predicate(bcx.ccx(), op, signed);
// LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
// to get the correctly sized type. This will compile to a single instruction
// once the IR is converted to assembly if the SIMD instruction is supported
// by the target architecture.
SExt(cx, ICmp(cx, cmp, lhs, rhs, debug_loc), return_ty)
SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), val_ty(lhs))
}
// Iterates through the elements of a structural type.
@ -679,7 +640,7 @@ pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
repr: &adt::Repr<'tcx>,
av: ValueRef,
variant: &ty::VariantInfo<'tcx>,
substs: &subst::Substs<'tcx>,
substs: &Substs<'tcx>,
f: &mut F)
-> Block<'blk, 'tcx> where
F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
@ -1034,23 +995,41 @@ pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
if type_is_zero_size(cx.ccx(), t) {
C_undef(type_of::type_of(cx.ccx(), t))
} else if ty::type_is_bool(t) {
Trunc(cx, LoadRangeAssert(cx, ptr, 0, 2, llvm::False), Type::i1(cx.ccx()))
} else if type_is_immediate(cx.ccx(), t) && type_of::type_of(cx.ccx(), t).is_aggregate() {
// We want to pass small aggregates as immediate values, but using an aggregate LLVM type
// for this leads to bad optimizations, so its arg type is an appropriately sized integer
// and we have to convert it
Load(cx, BitCast(cx, ptr, type_of::arg_type_of(cx.ccx(), t).ptr_to()))
} else if ty::type_is_region_ptr(t) || ty::type_is_unique(t) {
LoadNonNull(cx, ptr)
} else {
unsafe {
let global = llvm::LLVMIsAGlobalVariable(ptr);
if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
let val = llvm::LLVMGetInitializer(global);
if !val.is_null() {
// This could go into its own function, for DRY.
// (something like "pre-store packing/post-load unpacking")
if ty::type_is_bool(t) {
return Trunc(cx, val, Type::i1(cx.ccx()));
} else {
return val;
}
}
}
}
if ty::type_is_bool(t) {
Trunc(cx, LoadRangeAssert(cx, ptr, 0, 2, llvm::False), Type::i1(cx.ccx()))
} else if ty::type_is_char(t) {
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False)
} else if (ty::type_is_region_ptr(t) || ty::type_is_unique(t))
&& !common::type_is_fat_ptr(cx.tcx(), t) {
LoadNonNull(cx, ptr)
} else {
Load(cx, ptr)
}
}
}
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values.
@ -1064,7 +1043,7 @@ pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t
Store(cx, v, BitCast(cx, dst, type_of::arg_type_of(cx.ccx(), t).ptr_to()));
} else {
Store(cx, v, dst);
};
}
}
pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &ast::Local)
@ -1162,7 +1141,7 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let llalign = type_of::align_of(ccx, t);
call_memcpy(bcx, dst, src, llsz, llalign as u32);
} else {
store_ty(bcx, Load(bcx, src), dst, t);
store_ty(bcx, load_ty(bcx, src, t), dst, t);
}
}
@ -1425,7 +1404,7 @@ pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
id: ast::NodeId,
has_env: bool,
output_type: ty::FnOutput<'tcx>,
param_substs: &'a Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>,
sp: Option<Span>,
block_arena: &'a TypedArena<common::BlockS<'a, 'tcx>>)
-> FunctionContext<'a, 'tcx> {
@ -1793,7 +1772,7 @@ pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
decl: &ast::FnDecl,
body: &ast::Block,
llfndecl: ValueRef,
param_substs: &Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>,
fn_ast_id: ast::NodeId,
_attributes: &[ast::Attribute],
output_type: ty::FnOutput<'tcx>,
@ -1942,7 +1921,7 @@ pub fn trans_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
decl: &ast::FnDecl,
body: &ast::Block,
llfndecl: ValueRef,
param_substs: &Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>,
id: ast::NodeId,
attrs: &[ast::Attribute]) {
let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string());
@ -1968,7 +1947,7 @@ pub fn trans_enum_variant<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
variant: &ast::Variant,
_args: &[ast::VariantArg],
disr: ty::Disr,
param_substs: &Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>,
llfndecl: ValueRef) {
let _icx = push_ctxt("trans_enum_variant");
@ -2049,7 +2028,7 @@ pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
pub fn trans_tuple_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
_fields: &[ast::StructField],
ctor_id: ast::NodeId,
param_substs: &Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>,
llfndecl: ValueRef) {
let _icx = push_ctxt("trans_tuple_struct");
@ -2064,7 +2043,7 @@ pub fn trans_tuple_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fn trans_enum_variant_or_tuple_like_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ctor_id: ast::NodeId,
disr: ty::Disr,
param_substs: &Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>,
llfndecl: ValueRef) {
let ctor_ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty);
@ -2302,13 +2281,14 @@ pub fn trans_item(ccx: &CrateContext, item: &ast::Item) {
// translated everywhere it's needed.
for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
let llfn = get_item_val(ccx, item.id);
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
if abi != Rust {
foreign::trans_rust_fn_with_foreign_abi(ccx,
&**decl,
&**body,
&item.attrs[],
llfn,
&Substs::trans_empty(),
empty_substs,
item.id,
None);
} else {
@ -2316,7 +2296,7 @@ pub fn trans_item(ccx: &CrateContext, item: &ast::Item) {
&**decl,
&**body,
llfn,
&Substs::trans_empty(),
empty_substs,
item.id,
&item.attrs[]);
}
@ -2792,7 +2772,8 @@ pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef {
// We need the translated value here, because for enums the
// LLVM type is not fully determined by the Rust type.
let (v, ty) = consts::const_expr(ccx, &**expr);
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
let (v, ty) = consts::const_expr(ccx, &**expr, empty_substs);
ccx.static_values().borrow_mut().insert(id, v);
unsafe {
// boolean SSA values are i1, but they have to be stored in i8 slots,
@ -2820,12 +2801,6 @@ pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef {
}
}
ast::ItemConst(_, ref expr) => {
let (v, _) = consts::const_expr(ccx, &**expr);
ccx.const_values().borrow_mut().insert(id, v);
v
}
ast::ItemFn(_, _, abi, _, _) => {
let sym = sym();
let llfn = if abi == Rust {

View File

@ -226,7 +226,7 @@ fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr)
pub fn trans_fn_ref<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
def_id: ast::DefId,
node: ExprOrMethodCall,
param_substs: &subst::Substs<'tcx>)
param_substs: &'tcx subst::Substs<'tcx>)
-> Datum<'tcx, Rvalue> {
let _icx = push_ctxt("trans_fn_ref");
@ -326,7 +326,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>(
&function_name[]);
//
let empty_substs = Substs::trans_empty();
let empty_substs = tcx.mk_substs(Substs::trans_empty());
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx,
@ -334,7 +334,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>(
ast::DUMMY_NODE_ID,
false,
sig.output,
&empty_substs,
empty_substs,
None,
&block_arena);
let mut bcx = init_function(&fcx, false, sig.output);
@ -386,7 +386,7 @@ pub fn trans_fn_ref_with_substs<'a, 'tcx>(
ccx: &CrateContext<'a, 'tcx>,
def_id: ast::DefId,
node: ExprOrMethodCall,
param_substs: &subst::Substs<'tcx>,
param_substs: &'tcx subst::Substs<'tcx>,
substs: subst::Substs<'tcx>)
-> Datum<'tcx, Rvalue>
{
@ -416,7 +416,9 @@ pub fn trans_fn_ref_with_substs<'a, 'tcx>(
// We need to modify the def_id and our substs in order to monomorphize
// the function.
let (is_default, def_id, substs) = match ty::provided_source(tcx, def_id) {
None => (false, def_id, substs),
None => {
(false, def_id, tcx.mk_substs(substs))
}
Some(source_id) => {
// There are two relevant substitutions when compiling
// default methods. First, there is the substitution for
@ -444,7 +446,7 @@ pub fn trans_fn_ref_with_substs<'a, 'tcx>(
.erase_regions();
// And compose them
let new_substs = first_subst.subst(tcx, &substs);
let new_substs = tcx.mk_substs(first_subst.subst(tcx, &substs));
debug!("trans_fn_with_vtables - default method: \
substs = {}, trait_subst = {}, \
@ -463,7 +465,7 @@ pub fn trans_fn_ref_with_substs<'a, 'tcx>(
};
// If this is a closure, redirect to it.
match closure::get_or_create_declaration_if_closure(ccx, def_id, &substs) {
match closure::get_or_create_declaration_if_closure(ccx, def_id, substs) {
None => {}
Some(llfn) => return llfn,
}
@ -505,7 +507,7 @@ pub fn trans_fn_ref_with_substs<'a, 'tcx>(
};
let (val, fn_ty, must_cast) =
monomorphize::monomorphic_fn(ccx, def_id, &substs, opt_ref_id);
monomorphize::monomorphic_fn(ccx, def_id, substs, opt_ref_id);
if must_cast && node != ExprId(0) {
// Monotype of the REFERENCE to the function (type params
// are subst'd)

View File

@ -9,6 +9,7 @@
// except according to those terms.
use back::link::mangle_internal_name_by_path_and_seq;
use llvm::ValueRef;
use middle::mem_categorization::Typer;
use trans::adt;
use trans::base::*;
@ -137,7 +138,7 @@ pub fn get_or_create_declaration_if_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tc
// duplicate declarations
let function_type = erase_regions(ccx.tcx(), &function_type);
let params = match function_type.sty {
ty::ty_closure(_, _, ref substs) => substs.types.clone(),
ty::ty_closure(_, _, substs) => &substs.types,
_ => unreachable!()
};
let mono_id = MonoId {
@ -171,41 +172,51 @@ pub fn get_or_create_declaration_if_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tc
Some(Datum::new(llfn, function_type, Rvalue::new(ByValue)))
}
pub fn trans_closure_expr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
pub enum Dest<'a, 'tcx: 'a> {
SaveIn(Block<'a, 'tcx>, ValueRef),
Ignore(&'a CrateContext<'a, 'tcx>)
}
pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
decl: &ast::FnDecl,
body: &ast::Block,
id: ast::NodeId,
dest: expr::Dest)
-> Block<'blk, 'tcx>
param_substs: &'tcx Substs<'tcx>)
-> Option<Block<'a, 'tcx>>
{
let ccx = match dest {
Dest::SaveIn(bcx, _) => bcx.ccx(),
Dest::Ignore(ccx) => ccx
};
let tcx = ccx.tcx();
let _icx = push_ctxt("closure::trans_closure");
debug!("trans_closure()");
let closure_id = ast_util::local_def(id);
let llfn = get_or_create_declaration_if_closure(
bcx.ccx(),
ccx,
closure_id,
bcx.fcx.param_substs).unwrap();
param_substs).unwrap();
// Get the type of this closure. Use the current `param_substs` as
// the closure substitutions. This makes sense because the closure
// takes the same set of type arguments as the enclosing fn, and
// this function (`trans_closure`) is invoked at the point
// of the closure expression.
let typer = NormalizingClosureTyper::new(bcx.tcx());
let function_type = typer.closure_type(closure_id, bcx.fcx.param_substs);
let typer = NormalizingClosureTyper::new(tcx);
let function_type = typer.closure_type(closure_id, param_substs);
let freevars: Vec<ty::Freevar> =
ty::with_freevars(bcx.tcx(), id, |fv| fv.iter().map(|&fv| fv).collect());
ty::with_freevars(tcx, id, |fv| fv.iter().map(|&fv| fv).collect());
let sig = ty::erase_late_bound_regions(bcx.tcx(), &function_type.sig);
let sig = ty::erase_late_bound_regions(tcx, &function_type.sig);
trans_closure(bcx.ccx(),
trans_closure(ccx,
decl,
body,
llfn.val,
bcx.fcx.param_substs,
param_substs,
id,
&[],
sig.output,
@ -215,15 +226,15 @@ pub fn trans_closure_expr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
// Don't hoist this to the top of the function. It's perfectly legitimate
// to have a zero-size closure (in which case dest will be `Ignore`) and
// we must still generate the closure body.
let dest_addr = match dest {
expr::SaveIn(p) => p,
expr::Ignore => {
let (mut bcx, dest_addr) = match dest {
Dest::SaveIn(bcx, p) => (bcx, p),
Dest::Ignore(_) => {
debug!("trans_closure() ignoring result");
return bcx
return None;
}
};
let repr = adt::represent_type(bcx.ccx(), node_id_type(bcx, id));
let repr = adt::represent_type(ccx, node_id_type(bcx, id));
// Create the closure.
for (i, freevar) in freevars.iter().enumerate() {
@ -235,8 +246,7 @@ pub fn trans_closure_expr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
i);
let upvar_id = ty::UpvarId { var_id: freevar.def.local_node_id(),
closure_expr_id: id };
let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap();
match upvar_capture {
match tcx.upvar_capture(upvar_id).unwrap() {
ty::UpvarCapture::ByValue => {
bcx = datum.store_to(bcx, upvar_slot_dest);
}
@ -247,6 +257,6 @@ pub fn trans_closure_expr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}
adt::trans_set_discr(bcx, &*repr, dest_addr, 0);
bcx
Some(bcx)
}

View File

@ -410,7 +410,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// If this function is being monomorphized, this contains the type
// substitutions used.
pub param_substs: &'a Substs<'tcx>,
pub param_substs: &'tcx Substs<'tcx>,
// The source span and nesting context where this function comes from, for
// error reporting and symbol generation.
@ -858,25 +858,6 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef {
C_named_struct(cx.tn().find_type("str_slice").unwrap(), &[cs, C_uint(cx, len)])
}
pub fn C_binary_slice(cx: &CrateContext, data: &[u8]) -> ValueRef {
unsafe {
let len = data.len();
let lldata = C_bytes(cx, data);
let gsym = token::gensym("binary");
let name = format!("binary{}", gsym.usize());
let name = CString::from_vec(name.into_bytes());
let g = llvm::LLVMAddGlobal(cx.llmod(), val_ty(lldata).to_ref(),
name.as_ptr());
llvm::LLVMSetInitializer(g, lldata);
llvm::LLVMSetGlobalConstant(g, True);
llvm::SetLinkage(g, llvm::InternalLinkage);
let cs = consts::ptrcast(g, Type::i8p(cx));
C_struct(cx, &[cs, C_uint(cx, len)], false)
}
}
pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
C_struct_in_context(cx.llcx(), elts, packed)
}
@ -901,6 +882,12 @@ pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef {
}
}
pub fn C_vector(elts: &[ValueRef]) -> ValueRef {
unsafe {
return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint);
}
}
pub fn C_bytes(cx: &CrateContext, bytes: &[u8]) -> ValueRef {
C_bytes_in_context(cx.llcx(), bytes)
}

View File

@ -11,13 +11,13 @@
use back::abi;
use llvm;
use llvm::{ConstFCmp, ConstICmp, SetLinkage, PrivateLinkage, ValueRef, Bool, True, False};
use llvm::{IntEQ, IntNE, IntUGT, IntUGE, IntULT, IntULE, IntSGT, IntSGE, IntSLT, IntSLE,
RealOEQ, RealOGT, RealOGE, RealOLT, RealOLE, RealONE};
use middle::{const_eval, def};
use trans::{adt, consts, debuginfo, expr, inline, machine};
use llvm::{ConstFCmp, ConstICmp, SetLinkage, SetUnnamedAddr};
use llvm::{PrivateLinkage, ValueRef, Bool, True};
use middle::{check_const, const_eval, def};
use trans::{adt, closure, debuginfo, expr, inline, machine};
use trans::base::{self, push_ctxt};
use trans::common::*;
use trans::monomorphize;
use trans::type_::Type;
use trans::type_of;
use middle::subst::Substs;
@ -74,7 +74,16 @@ pub fn const_lit(cx: &CrateContext, e: &ast::Expr, lit: &ast::Lit)
}
ast::LitBool(b) => C_bool(cx, b),
ast::LitStr(ref s, _) => C_str_slice(cx, (*s).clone()),
ast::LitBinary(ref data) => C_binary_slice(cx, &data[]),
ast::LitBinary(ref data) => {
let g = addr_of(cx, C_bytes(cx, &data[]), "binary", e.id);
let base = ptrcast(g, Type::i8p(cx));
let prev_const = cx.const_unsized().borrow_mut()
.insert(base, g);
assert!(prev_const.is_none() || prev_const == Some(g));
assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1);
C_struct(cx, &[base, C_uint(cx, data.len())], false)
}
}
}
@ -84,36 +93,41 @@ pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef {
}
}
fn const_vec(cx: &CrateContext, e: &ast::Expr,
es: &[P<ast::Expr>]) -> (ValueRef, Type) {
let vec_ty = ty::expr_ty(cx.tcx(), e);
let unit_ty = ty::sequence_element_type(cx.tcx(), vec_ty);
let llunitty = type_of::type_of(cx, unit_ty);
let vs = es.iter().map(|e| const_expr(cx, &**e).0)
.collect::<Vec<_>>();
// If the vector contains enums, an LLVM array won't work.
let v = if vs.iter().any(|vi| val_ty(*vi) != llunitty) {
C_struct(cx, &vs[], false)
} else {
C_array(llunitty, &vs[])
};
(v, llunitty)
}
pub fn const_addr_of(cx: &CrateContext, cv: ValueRef, mutbl: ast::Mutability) -> ValueRef {
fn addr_of_mut(ccx: &CrateContext,
cv: ValueRef,
kind: &str,
id: ast::NodeId)
-> ValueRef {
unsafe {
let gv = llvm::LLVMAddGlobal(cx.llmod(), val_ty(cv).to_ref(),
"const\0".as_ptr() as *const _);
let name = format!("{}{}\0", kind, id);
let gv = llvm::LLVMAddGlobal(ccx.llmod(), val_ty(cv).to_ref(),
name.as_ptr() as *const _);
llvm::LLVMSetInitializer(gv, cv);
llvm::LLVMSetGlobalConstant(gv,
if mutbl == ast::MutImmutable {True} else {False});
SetLinkage(gv, PrivateLinkage);
SetUnnamedAddr(gv, true);
gv
}
}
pub fn addr_of(ccx: &CrateContext,
cv: ValueRef,
kind: &str,
id: ast::NodeId)
-> ValueRef {
match ccx.const_globals().borrow().get(&cv) {
Some(&gv) => return gv,
None => {}
}
let gv = addr_of_mut(ccx, cv, kind, id);
unsafe {
llvm::LLVMSetGlobalConstant(gv, True);
}
ccx.const_globals().borrow_mut().insert(cv, gv);
gv
}
fn const_deref_ptr(cx: &CrateContext, v: ValueRef) -> ValueRef {
let v = match cx.const_globals().borrow().get(&(v as int)) {
let v = match cx.const_unsized().borrow().get(&v) {
Some(&v) => v,
None => v
};
@ -122,19 +136,12 @@ fn const_deref_ptr(cx: &CrateContext, v: ValueRef) -> ValueRef {
}
}
fn const_deref_newtype<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, v: ValueRef, t: Ty<'tcx>)
-> ValueRef {
let repr = adt::represent_type(cx, t);
adt::const_get_field(cx, &*repr, v, 0, 0)
}
fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, v: ValueRef,
t: Ty<'tcx>, explicit: bool)
fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
v: ValueRef,
ty: Ty<'tcx>)
-> (ValueRef, Ty<'tcx>) {
match ty::deref(t, explicit) {
Some(ref mt) => {
match t.sty {
ty::ty_ptr(mt) | ty::ty_rptr(_, mt) => {
match ty::deref(ty, true) {
Some(mt) => {
if type_is_sized(cx.tcx(), mt.ty) {
(const_deref_ptr(cx, v), mt.ty)
} else {
@ -143,87 +150,138 @@ fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, v: ValueRef,
(v, ty::mk_open(cx.tcx(), mt.ty))
}
}
ty::ty_enum(..) | ty::ty_struct(..) => {
assert!(mt.mutbl != ast::MutMutable);
(const_deref_newtype(cx, v, t), mt.ty)
}
_ => {
cx.sess().bug(&format!("unexpected dereferenceable type {}",
ty_to_string(cx.tcx(), t))[])
}
}
}
None => {
cx.sess().bug(&format!("cannot dereference const of type {}",
ty_to_string(cx.tcx(), t))[])
cx.sess().bug(&format!("unexpected dereferenceable type {}",
ty_to_string(cx.tcx(), ty))[])
}
}
}
pub fn get_const_val(cx: &CrateContext,
mut def_id: ast::DefId) -> ValueRef {
let contains_key = cx.const_values().borrow().contains_key(&def_id.node);
if !ast_util::is_local(def_id) || !contains_key {
if !ast_util::is_local(def_id) {
def_id = inline::maybe_instantiate_inline(cx, def_id);
pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
def_id: ast::DefId,
ref_expr: &ast::Expr)
-> &'tcx ast::Expr {
let def_id = inline::maybe_instantiate_inline(ccx, def_id);
if def_id.krate != ast::LOCAL_CRATE {
ccx.sess().span_bug(ref_expr.span,
"cross crate constant could not be inlined");
}
if let ast::ItemConst(..) = cx.tcx().map.expect_item(def_id.node).node {
base::get_item_val(cx, def_id.node);
let item = ccx.tcx().map.expect_item(def_id.node);
if let ast::ItemConst(_, ref expr) = item.node {
&**expr
} else {
ccx.sess().span_bug(ref_expr.span,
&format!("get_const_val given non-constant item {}",
item.repr(ccx.tcx()))[]);
}
}
cx.const_values().borrow()[def_id.node].clone()
fn get_const_val(ccx: &CrateContext,
def_id: ast::DefId,
ref_expr: &ast::Expr) -> ValueRef {
let expr = get_const_expr(ccx, def_id, ref_expr);
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
get_const_expr_as_global(ccx, expr, check_const::PURE_CONST, empty_substs)
}
pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, e: &ast::Expr)
pub fn get_const_expr_as_global<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
expr: &ast::Expr,
qualif: check_const::ConstQualif,
param_substs: &'tcx Substs<'tcx>)
-> ValueRef {
// Special-case constants to cache a common global for all uses.
match expr.node {
ast::ExprPath(_) => {
let def = ccx.tcx().def_map.borrow()[expr.id];
match def {
def::DefConst(def_id) => {
if !ccx.tcx().adjustments.borrow().contains_key(&expr.id) {
return get_const_val(ccx, def_id, expr);
}
}
_ => {}
}
}
_ => {}
}
let key = (expr.id, param_substs);
match ccx.const_values().borrow().get(&key) {
Some(&val) => return val,
None => {}
}
let val = if qualif.intersects(check_const::NON_STATIC_BORROWS) {
// Avoid autorefs as they would create global instead of stack
// references, even when only the latter are correct.
let ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs,
&ty::expr_ty(ccx.tcx(), expr));
const_expr_unadjusted(ccx, expr, ty, param_substs)
} else {
const_expr(ccx, expr, param_substs).0
};
// boolean SSA values are i1, but they have to be stored in i8 slots,
// otherwise some LLVM optimization passes don't work as expected
let val = unsafe {
if llvm::LLVMTypeOf(val) == Type::i1(ccx).to_ref() {
llvm::LLVMConstZExt(val, Type::i8(ccx).to_ref())
} else {
val
}
};
let lvalue = addr_of(ccx, val, "const", expr.id);
ccx.const_values().borrow_mut().insert(key, lvalue);
lvalue
}
pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
e: &ast::Expr,
param_substs: &'tcx Substs<'tcx>)
-> (ValueRef, Ty<'tcx>) {
let llconst = const_expr_unadjusted(cx, e);
let ety = monomorphize::apply_param_substs(cx.tcx(), param_substs,
&ty::expr_ty(cx.tcx(), e));
let llconst = const_expr_unadjusted(cx, e, ety, param_substs);
let mut llconst = llconst;
let ety = ty::expr_ty(cx.tcx(), e);
let mut ety_adjusted = ty::expr_ty_adjusted(cx.tcx(), e);
let mut ety_adjusted = monomorphize::apply_param_substs(cx.tcx(), param_substs,
&ty::expr_ty_adjusted(cx.tcx(), e));
let opt_adj = cx.tcx().adjustments.borrow().get(&e.id).cloned();
match opt_adj {
None => { }
Some(adj) => {
match adj {
ty::AdjustReifyFnPointer(_def_id) => {
Some(ty::AdjustReifyFnPointer(_def_id)) => {
// FIXME(#19925) once fn item types are
// zero-sized, we'll need to do something here
}
ty::AdjustDerefRef(ref adj) => {
Some(ty::AdjustDerefRef(adj)) => {
let mut ty = ety;
// Save the last autoderef in case we can avoid it.
if adj.autoderefs > 0 {
for _ in 0..adj.autoderefs-1 {
let (dv, dt) = const_deref(cx, llconst, ty, false);
let (dv, dt) = const_deref(cx, llconst, ty);
llconst = dv;
ty = dt;
}
}
match adj.autoref {
let second_autoref = match adj.autoref {
None => {
let (dv, dt) = const_deref(cx, llconst, ty, false);
let (dv, dt) = const_deref(cx, llconst, ty);
llconst = dv;
// If we derefed a fat pointer then we will have an
// open type here. So we need to update the type with
// the one returned from const_deref.
ety_adjusted = dt;
None
}
Some(ref autoref) => {
match *autoref {
ty::AutoUnsafe(_, None) |
ty::AutoPtr(ty::ReStatic, _, None) => {
Some(ty::AutoUnsafe(_, opt_autoref)) |
Some(ty::AutoPtr(_, _, opt_autoref)) => {
if adj.autoderefs == 0 {
// Don't copy data to do a deref+ref
// (i.e., skip the last auto-deref).
if adj.autoderefs == 0 {
llconst = const_addr_of(cx, llconst, ast::MutImmutable);
}
}
ty::AutoPtr(ty::ReStatic, _, Some(box ty::AutoUnsize(..))) => {
if adj.autoderefs > 0 {
llconst = addr_of(cx, llconst, "autoref", e.id);
} else {
// Seeing as we are deref'ing here and take a reference
// again to make the pointer part of the far pointer below,
// we just skip the whole thing. We still need the type
@ -231,46 +289,56 @@ pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, e: &ast::Expr)
// because of byref semantics. Note that this is not just
// an optimisation, it is necessary for mutable vectors to
// work properly.
let (_, dt) = const_deref(cx, llconst, ty, false);
ty = dt;
ty = match ty::deref(ty, true) {
Some(mt) => {
if type_is_sized(cx.tcx(), mt.ty) {
mt.ty
} else {
llconst = const_addr_of(cx, llconst, ast::MutImmutable)
// Derefing a fat pointer does not change the representation,
// just the type to ty_open.
ty::mk_open(cx.tcx(), mt.ty)
}
match ty.sty {
ty::ty_vec(unit_ty, Some(len)) => {
let llunitty = type_of::type_of(cx, unit_ty);
let llptr = ptrcast(llconst, llunitty.ptr_to());
let prev_const = cx.const_globals().borrow_mut()
.insert(llptr as int, llconst);
assert!(prev_const.is_none() ||
prev_const == Some(llconst));
assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1);
llconst = C_struct(cx, &[
llptr,
C_uint(cx, len)
], false);
}
_ => cx.sess().span_bug(e.span,
&format!("unimplemented type in const unsize: {}",
None => {
cx.sess().bug(&format!("unexpected dereferenceable type {}",
ty_to_string(cx.tcx(), ty))[])
}
}
_ => {
cx.sess()
.span_bug(e.span,
&format!("unimplemented const \
autoref {:?}",
autoref)[])
}
}
}
}
}
}
opt_autoref
}
Some(autoref) => {
cx.sess().span_bug(e.span,
&format!("unimplemented const first autoref {:?}", autoref)[])
}
};
match second_autoref {
None => {}
Some(box ty::AutoUnsafe(_, None)) |
Some(box ty::AutoPtr(_, _, None)) => {
llconst = addr_of(cx, llconst, "autoref", e.id);
}
Some(box ty::AutoUnsize(ref k)) => {
let unsized_ty = ty::unsize_ty(cx.tcx(), ty, k, e.span);
let info = expr::unsized_info(cx, k, e.id, ty, param_substs,
|t| ty::mk_imm_rptr(cx.tcx(), cx.tcx().mk_region(ty::ReStatic), t));
let base = ptrcast(llconst, type_of::type_of(cx, unsized_ty).ptr_to());
let prev_const = cx.const_unsized().borrow_mut()
.insert(base, llconst);
assert!(prev_const.is_none() || prev_const == Some(llconst));
assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1);
llconst = C_struct(cx, &[base, info], false);
}
Some(autoref) => {
cx.sess().span_bug(e.span,
&format!("unimplemented const second autoref {:?}", autoref)[])
}
}
}
None => {}
};
let llty = type_of::sizing_type_of(cx, ety_adjusted);
let csize = machine::llsize_of_alloc(cx, val_ty(llconst));
@ -282,36 +350,42 @@ pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, e: &ast::Expr)
llvm::LLVMDumpValue(C_undef(llty));
}
cx.sess().bug(&format!("const {} of type {} has size {} instead of {}",
e.repr(cx.tcx()), ty_to_string(cx.tcx(), ety),
e.repr(cx.tcx()), ty_to_string(cx.tcx(), ety_adjusted),
csize, tsize)[]);
}
(llconst, ety_adjusted)
}
// the bool returned is whether this expression can be inlined into other crates
// if it's assigned to a static.
fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
e: &ast::Expr,
ety: Ty<'tcx>,
param_substs: &'tcx Substs<'tcx>) -> ValueRef {
let map_list = |exprs: &[P<ast::Expr>]| {
exprs.iter().map(|e| const_expr(cx, &**e).0)
exprs.iter().map(|e| const_expr(cx, &**e, param_substs).0)
.fold(Vec::new(), |mut l, val| { l.push(val); l })
};
unsafe {
let _icx = push_ctxt("const_expr");
return match e.node {
ast::ExprLit(ref lit) => {
consts::const_lit(cx, e, &**lit)
const_lit(cx, e, &**lit)
}
ast::ExprBinary(b, ref e1, ref e2) => {
let (te1, _) = const_expr(cx, &**e1);
let (te2, _) = const_expr(cx, &**e2);
let te2 = base::cast_shift_const_rhs(b, te1, te2);
/* Neither type is bottom, and we expect them to be unified
* already, so the following is safe. */
let ty = ty::expr_ty(cx.tcx(), &**e1);
let is_float = ty::type_is_fp(ty);
let signed = ty::type_is_signed(ty);
let (te1, ty) = const_expr(cx, &**e1, param_substs);
let is_simd = ty::type_is_simd(cx.tcx(), ty);
let intype = if is_simd {
ty::simd_type(cx.tcx(), ty)
} else {
ty
};
let is_float = ty::type_is_fp(intype);
let signed = ty::type_is_signed(intype);
let (te2, _) = const_expr(cx, &**e2, param_substs);
let te2 = base::cast_shift_const_rhs(b, te1, te2);
return match b.node {
ast::BiAdd => {
if is_float { llvm::LLVMConstFAdd(te1, te2) }
@ -345,52 +419,30 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
if signed { llvm::LLVMConstAShr(te1, te2) }
else { llvm::LLVMConstLShr(te1, te2) }
}
ast::BiEq => {
if is_float { ConstFCmp(RealOEQ, te1, te2) }
else { ConstICmp(IntEQ, te1, te2) }
},
ast::BiLt => {
if is_float { ConstFCmp(RealOLT, te1, te2) }
else {
if signed { ConstICmp(IntSLT, te1, te2) }
else { ConstICmp(IntULT, te1, te2) }
ast::BiEq | ast::BiNe | ast::BiLt | ast::BiLe | ast::BiGt | ast::BiGe => {
if is_float {
let cmp = base::bin_op_to_fcmp_predicate(cx, b.node);
ConstFCmp(cmp, te1, te2)
} else {
let cmp = base::bin_op_to_icmp_predicate(cx, b.node, signed);
let bool_val = ConstICmp(cmp, te1, te2);
if is_simd {
// LLVM outputs an `< size x i1 >`, so we need to perform
// a sign extension to get the correctly sized type.
llvm::LLVMConstIntCast(bool_val, val_ty(te1).to_ref(), True)
} else {
bool_val
}
},
ast::BiLe => {
if is_float { ConstFCmp(RealOLE, te1, te2) }
else {
if signed { ConstICmp(IntSLE, te1, te2) }
else { ConstICmp(IntULE, te1, te2) }
}
},
ast::BiNe => {
if is_float { ConstFCmp(RealONE, te1, te2) }
else { ConstICmp(IntNE, te1, te2) }
},
ast::BiGe => {
if is_float { ConstFCmp(RealOGE, te1, te2) }
else {
if signed { ConstICmp(IntSGE, te1, te2) }
else { ConstICmp(IntUGE, te1, te2) }
}
},
ast::BiGt => {
if is_float { ConstFCmp(RealOGT, te1, te2) }
else {
if signed { ConstICmp(IntSGT, te1, te2) }
else { ConstICmp(IntUGT, te1, te2) }
}
},
}
},
ast::ExprUnary(u, ref e) => {
let (te, _) = const_expr(cx, &**e);
let ty = ty::expr_ty(cx.tcx(), &**e);
let (te, ty) = const_expr(cx, &**e, param_substs);
let is_float = ty::type_is_fp(ty);
return match u {
ast::UnUniq | ast::UnDeref => {
let (dv, _dt) = const_deref(cx, te, ty, true);
dv
const_deref(cx, te, ty).0
}
ast::UnNot => llvm::LLVMConstNot(te),
ast::UnNeg => {
@ -400,7 +452,7 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
}
}
ast::ExprField(ref base, field) => {
let (bv, bt) = const_expr(cx, &**base);
let (bv, bt) = const_expr(cx, &**base, param_substs);
let brepr = adt::represent_type(cx, bt);
expr::with_field_tys(cx.tcx(), bt, None, |discr, field_tys| {
let ix = ty::field_idx_strict(cx.tcx(), field.node.name, field_tys);
@ -408,7 +460,7 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
})
}
ast::ExprTupField(ref base, idx) => {
let (bv, bt) = const_expr(cx, &**base);
let (bv, bt) = const_expr(cx, &**base, param_substs);
let brepr = adt::represent_type(cx, bt);
expr::with_field_tys(cx.tcx(), bt, None, |discr, _| {
adt::const_get_field(cx, &*brepr, bv, discr, idx.node)
@ -416,7 +468,7 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
}
ast::ExprIndex(ref base, ref index) => {
let (bv, bt) = const_expr(cx, &**base);
let (bv, bt) = const_expr(cx, &**base, param_substs);
let iv = match const_eval::eval_const_expr(cx.tcx(), &**index) {
const_eval::const_int(i) => i as u64,
const_eval::const_uint(u) => u,
@ -470,9 +522,11 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
const_get_elt(cx, arr, &[iv as c_uint])
}
ast::ExprCast(ref base, _) => {
let ety = ty::expr_ty(cx.tcx(), e);
let llty = type_of::type_of(cx, ety);
let (v, basety) = const_expr(cx, &**base);
let (v, basety) = const_expr(cx, &**base, param_substs);
if expr::cast_is_noop(basety, ety) {
return v;
}
return match (expr::cast_type_kind(cx.tcx(), basety),
expr::cast_type_kind(cx.tcx(), ety)) {
@ -523,7 +577,7 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
}
}
}
ast::ExprAddrOf(mutbl, ref sub) => {
ast::ExprAddrOf(ast::MutImmutable, ref sub) => {
// If this is the address of some static, then we need to return
// the actual address of the static itself (short circuit the rest
// of const eval).
@ -531,41 +585,48 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
loop {
match cur.node {
ast::ExprParen(ref sub) => cur = sub,
ast::ExprBlock(ref blk) => {
if let Some(ref sub) = blk.expr {
cur = sub;
} else {
break;
}
}
_ => break,
}
}
let opt_def = cx.tcx().def_map.borrow().get(&cur.id).cloned();
if let Some(def::DefStatic(def_id, _)) = opt_def {
let ty = ty::expr_ty(cx.tcx(), e);
return get_static_val(cx, def_id, ty);
return get_static_val(cx, def_id, ety);
}
// If this isn't the address of a static, then keep going through
// normal constant evaluation.
let (e, _) = const_expr(cx, &**sub);
const_addr_of(cx, e, mutbl)
let (v, _) = const_expr(cx, &**sub, param_substs);
addr_of(cx, v, "ref", e.id)
}
ast::ExprAddrOf(ast::MutMutable, ref sub) => {
let (v, _) = const_expr(cx, &**sub, param_substs);
addr_of_mut(cx, v, "ref_mut_slice", e.id)
}
ast::ExprTup(ref es) => {
let ety = ty::expr_ty(cx.tcx(), e);
let repr = adt::represent_type(cx, ety);
let vals = map_list(&es[]);
adt::trans_const(cx, &*repr, 0, &vals[])
}
ast::ExprStruct(_, ref fs, ref base_opt) => {
let ety = ty::expr_ty(cx.tcx(), e);
let repr = adt::represent_type(cx, ety);
let tcx = cx.tcx();
let base_val = match *base_opt {
Some(ref base) => Some(const_expr(cx, &**base)),
Some(ref base) => Some(const_expr(cx, &**base, param_substs)),
None => None
};
expr::with_field_tys(tcx, ety, Some(e.id), |discr, field_tys| {
expr::with_field_tys(cx.tcx(), ety, Some(e.id), |discr, field_tys| {
let cs = field_tys.iter().enumerate()
.map(|(ix, &field_ty)| {
match fs.iter().find(|f| field_ty.name == f.ident.node.name) {
Some(ref f) => const_expr(cx, &*f.expr).0,
Some(ref f) => const_expr(cx, &*f.expr, param_substs).0,
None => {
match base_val {
Some((bv, _)) => {
@ -580,23 +641,36 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
}
}
}).collect::<Vec<_>>();
if ty::type_is_simd(cx.tcx(), ety) {
C_vector(&cs[])
} else {
adt::trans_const(cx, &*repr, discr, &cs[])
}
})
}
ast::ExprVec(ref es) => {
const_vec(cx, e, es).0
let unit_ty = ty::sequence_element_type(cx.tcx(), ety);
let llunitty = type_of::type_of(cx, unit_ty);
let vs = es.iter().map(|e| const_expr(cx, &**e, param_substs).0)
.collect::<Vec<_>>();
// If the vector contains enums, an LLVM array won't work.
if vs.iter().any(|vi| val_ty(*vi) != llunitty) {
C_struct(cx, &vs[], false)
} else {
C_array(llunitty, &vs[])
}
}
ast::ExprRepeat(ref elem, ref count) => {
let vec_ty = ty::expr_ty(cx.tcx(), e);
let unit_ty = ty::sequence_element_type(cx.tcx(), vec_ty);
let unit_ty = ty::sequence_element_type(cx.tcx(), ety);
let llunitty = type_of::type_of(cx, unit_ty);
let n = match const_eval::eval_const_expr(cx.tcx(), &**count) {
const_eval::const_int(i) => i as uint,
const_eval::const_uint(i) => i as uint,
_ => cx.sess().span_bug(count.span, "count must be integral const expression.")
};
let vs: Vec<_> = repeat(const_expr(cx, &**elem).0).take(n).collect();
if vs.iter().any(|vi| val_ty(*vi) != llunitty) {
let unit_val = const_expr(cx, &**elem, param_substs).0;
let vs: Vec<_> = repeat(unit_val).take(n).collect();
if val_ty(unit_val) != llunitty {
C_struct(cx, &vs[], false)
} else {
C_array(llunitty, &vs[])
@ -606,10 +680,10 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
let def = cx.tcx().def_map.borrow()[e.id];
match def {
def::DefFn(..) | def::DefStaticMethod(..) | def::DefMethod(..) => {
expr::trans_def_fn_unadjusted(cx, e, def, &Substs::trans_empty()).val
expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val
}
def::DefConst(def_id) => {
get_const_val(cx, def_id)
const_deref_ptr(cx, get_const_val(cx, def_id, e))
}
def::DefVariant(enum_did, variant_did, _) => {
let vinfo = ty::enum_variant_with_id(cx.tcx(),
@ -617,19 +691,17 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
variant_did);
if vinfo.args.len() > 0 {
// N-ary variant.
expr::trans_def_fn_unadjusted(cx, e, def, &Substs::trans_empty()).val
expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val
} else {
// Nullary variant.
let ety = ty::expr_ty(cx.tcx(), e);
let repr = adt::represent_type(cx, ety);
adt::trans_const(cx, &*repr, vinfo.disr_val, &[])
}
}
def::DefStruct(_) => {
let ety = ty::expr_ty(cx.tcx(), e);
if let ty::ty_bare_fn(..) = ety.sty {
// Tuple struct.
expr::trans_def_fn_unadjusted(cx, e, def, &Substs::trans_empty()).val
expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val
} else {
// Unit struct.
C_null(type_of::type_of(cx, ety))
@ -643,20 +715,21 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
}
ast::ExprCall(ref callee, ref args) => {
let opt_def = cx.tcx().def_map.borrow().get(&callee.id).cloned();
let arg_vals = map_list(&args[]);
match opt_def {
Some(def::DefStruct(_)) => {
let ety = ty::expr_ty(cx.tcx(), e);
if ty::type_is_simd(cx.tcx(), ety) {
C_vector(&arg_vals[])
} else {
let repr = adt::represent_type(cx, ety);
let arg_vals = map_list(&args[]);
adt::trans_const(cx, &*repr, 0, &arg_vals[])
}
}
Some(def::DefVariant(enum_did, variant_did, _)) => {
let ety = ty::expr_ty(cx.tcx(), e);
let repr = adt::represent_type(cx, ety);
let vinfo = ty::enum_variant_with_id(cx.tcx(),
enum_did,
variant_did);
let arg_vals = map_list(&args[]);
adt::trans_const(cx,
&*repr,
vinfo.disr_val,
@ -665,13 +738,19 @@ fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr) -> ValueRef {
_ => cx.sess().span_bug(e.span, "expected a struct or variant def")
}
}
ast::ExprParen(ref e) => const_expr(cx, &**e).0,
ast::ExprParen(ref e) => const_expr(cx, &**e, param_substs).0,
ast::ExprBlock(ref block) => {
match block.expr {
Some(ref expr) => const_expr(cx, &**expr).0,
Some(ref expr) => const_expr(cx, &**expr, param_substs).0,
None => C_nil(cx)
}
}
ast::ExprClosure(_, ref decl, ref body) => {
closure::trans_closure_expr(closure::Dest::Ignore(cx),
&**decl, &**body, e.id,
param_substs);
C_null(type_of::type_of(cx, ety))
}
_ => cx.sess().span_bug(e.span,
"bad constant expression type in consts::const_expr")
};

View File

@ -22,6 +22,7 @@ use trans::common::{ExternMap,tydesc_info,BuilderRef_res};
use trans::debuginfo;
use trans::monomorphize::MonoId;
use trans::type_::{Type, TypeNames};
use middle::subst::Substs;
use middle::ty::{self, Ty};
use session::config::NoDebugInfo;
use session::Session;
@ -105,17 +106,20 @@ pub struct LocalCrateContext<'tcx> {
const_cstr_cache: RefCell<FnvHashMap<InternedString, ValueRef>>,
/// Reverse-direction for const ptrs cast from globals.
/// Key is an int, cast from a ValueRef holding a *T,
/// Key is a ValueRef holding a *T,
/// Val is a ValueRef holding a *[T].
///
/// Needed because LLVM loses pointer->pointee association
/// when we ptrcast, and we have to ptrcast during translation
/// of a [T] const because we form a slice, a [*T,int] pair, not
/// a pointer to an LLVM array type.
const_globals: RefCell<FnvHashMap<int, ValueRef>>,
/// of a [T] const because we form a slice, a (*T,usize) pair, not
/// a pointer to an LLVM array type. Similar for trait objects.
const_unsized: RefCell<FnvHashMap<ValueRef, ValueRef>>,
/// Cache of emitted const globals (value -> global)
const_globals: RefCell<FnvHashMap<ValueRef, ValueRef>>,
/// Cache of emitted const values
const_values: RefCell<NodeMap<ValueRef>>,
const_values: RefCell<FnvHashMap<(ast::NodeId, &'tcx Substs<'tcx>), ValueRef>>,
/// Cache of emitted static values
static_values: RefCell<NodeMap<ValueRef>>,
@ -400,8 +404,9 @@ impl<'tcx> LocalCrateContext<'tcx> {
monomorphizing: RefCell::new(DefIdMap()),
vtables: RefCell::new(FnvHashMap()),
const_cstr_cache: RefCell::new(FnvHashMap()),
const_unsized: RefCell::new(FnvHashMap()),
const_globals: RefCell::new(FnvHashMap()),
const_values: RefCell::new(NodeMap()),
const_values: RefCell::new(FnvHashMap()),
static_values: RefCell::new(NodeMap()),
extern_const_values: RefCell::new(DefIdMap()),
impl_method_cache: RefCell::new(FnvHashMap()),
@ -615,11 +620,16 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
&self.local.const_cstr_cache
}
pub fn const_globals<'a>(&'a self) -> &'a RefCell<FnvHashMap<int, ValueRef>> {
pub fn const_unsized<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>> {
&self.local.const_unsized
}
pub fn const_globals<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>> {
&self.local.const_globals
}
pub fn const_values<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
pub fn const_values<'a>(&'a self) -> &'a RefCell<FnvHashMap<(ast::NodeId, &'tcx Substs<'tcx>),
ValueRef>> {
&self.local.const_values
}

View File

@ -372,7 +372,8 @@ pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let filename = C_str_slice(ccx, filename);
let line = C_uint(ccx, loc.line);
let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false);
let expr_file_line = consts::const_addr_of(ccx, expr_file_line_const, ast::MutImmutable);
let expr_file_line = consts::addr_of(ccx, expr_file_line_const,
"panic_loc", call_info.id);
let args = vec!(expr_file_line);
let did = langcall(bcx, Some(call_info.span), "", PanicFnLangItem);
let bcx = callee::trans_lang_call(bcx,
@ -400,7 +401,8 @@ pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let filename = C_str_slice(ccx, filename);
let line = C_uint(ccx, loc.line);
let file_line_const = C_struct(ccx, &[filename, line], false);
let file_line = consts::const_addr_of(ccx, file_line_const, ast::MutImmutable);
let file_line = consts::addr_of(ccx, file_line_const,
"panic_bounds_check_loc", call_info.id);
let args = vec!(file_line, index, len);
let did = langcall(bcx, Some(call_info.span), "", PanicBoundsCheckFnLangItem);
let bcx = callee::trans_lang_call(bcx,

View File

@ -54,6 +54,7 @@ use self::lazy_binop_ty::*;
use back::abi;
use llvm::{self, ValueRef};
use middle::check_const;
use middle::def;
use middle::mem_categorization::Typer;
use middle::subst::{self, Substs};
@ -68,7 +69,6 @@ use trans::glue;
use trans::machine;
use trans::meth;
use trans::monomorphize;
use trans::inline;
use trans::tvec;
use trans::type_of;
use middle::ty::{struct_fields, tup_fields};
@ -84,8 +84,9 @@ use trans::type_::Type;
use syntax::{ast, ast_util, codemap};
use syntax::ptr::P;
use syntax::parse::token;
use std::rc::Rc;
use std::iter::repeat;
use std::mem;
use std::rc::Rc;
// Destinations
@ -115,11 +116,56 @@ pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-> Block<'blk, 'tcx> {
let mut bcx = bcx;
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
if bcx.tcx().adjustments.borrow().contains_key(&expr.id) {
// use trans, which may be less efficient but
// which will perform the adjustments:
let datum = unpack_datum!(bcx, trans(bcx, expr));
return datum.store_to_dest(bcx, dest, expr.id)
return datum.store_to_dest(bcx, dest, expr.id);
}
let qualif = bcx.tcx().const_qualif_map.borrow()[expr.id];
if !qualif.intersects(check_const::NOT_CONST | check_const::NEEDS_DROP) {
if !qualif.intersects(check_const::PREFER_IN_PLACE) {
if let SaveIn(lldest) = dest {
let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
bcx.fcx.param_substs);
// Cast pointer to destination, because constants
// have different types.
let lldest = PointerCast(bcx, lldest, val_ty(global));
memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
}
// Don't do anything in the Ignore case, consts don't need drop.
return bcx;
} else {
// The only way we're going to see a `const` at this point is if
// it prefers in-place instantiation, likely because it contains
// `[x; N]` somewhere within.
match expr.node {
ast::ExprPath(_) | ast::ExprQPath(_) => {
match bcx.def(expr.id) {
def::DefConst(did) => {
let expr = consts::get_const_expr(bcx.ccx(), did, expr);
// Temporarily get cleanup scopes out of the way,
// as they require sub-expressions to be contained
// inside the current AST scope.
// These should record no cleanups anyways, `const`
// can't have destructors.
let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
vec![]);
bcx = trans_into(bcx, expr, dest);
let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
scopes);
assert!(scopes.is_empty());
return bcx;
}
_ => {}
}
}
_ => {}
}
}
}
debug!("trans_into() expr={}", expr.repr(bcx.tcx()));
@ -130,7 +176,6 @@ pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
false);
bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
let kind = ty::expr_kind(bcx.tcx(), expr);
bcx = match kind {
ty::LvalueExpr | ty::RvalueDatumExpr => {
@ -157,14 +202,70 @@ pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let mut bcx = bcx;
let fcx = bcx.fcx;
let qualif = bcx.tcx().const_qualif_map.borrow()[expr.id];
let adjusted_global = !qualif.intersects(check_const::NON_STATIC_BORROWS);
let global = if !qualif.intersects(check_const::NOT_CONST | check_const::NEEDS_DROP) {
let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
bcx.fcx.param_substs);
if qualif.intersects(check_const::HAS_STATIC_BORROWS) {
// Is borrowed as 'static, must return lvalue.
// Cast pointer to global, because constants have different types.
let const_ty = expr_ty_adjusted(bcx, expr);
let llty = type_of::type_of(bcx.ccx(), const_ty);
let global = PointerCast(bcx, global, llty.ptr_to());
let datum = Datum::new(global, const_ty, Lvalue);
return DatumBlock::new(bcx, datum.to_expr_datum());
}
// Otherwise, keep around and perform adjustments, if needed.
let const_ty = if adjusted_global {
expr_ty_adjusted(bcx, expr)
} else {
expr_ty(bcx, expr)
};
// This could use a better heuristic.
Some(if type_is_immediate(bcx.ccx(), const_ty) {
// Cast pointer to global, because constants have different types.
let llty = type_of::type_of(bcx.ccx(), const_ty);
let global = PointerCast(bcx, global, llty.ptr_to());
// Maybe just get the value directly, instead of loading it?
immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
} else {
let llty = type_of::type_of(bcx.ccx(), const_ty);
// HACK(eddyb) get around issues with lifetime intrinsics.
let scratch = alloca_no_lifetime(bcx, llty, "const");
let lldest = if !ty::type_is_structural(const_ty) {
// Cast pointer to slot, because constants have different types.
PointerCast(bcx, scratch, val_ty(global))
} else {
// In this case, memcpy_ty calls llvm.memcpy after casting both
// source and destination to i8*, so we don't need any casts.
scratch
};
memcpy_ty(bcx, lldest, global, const_ty);
Datum::new(scratch, const_ty, Rvalue::new(ByRef))
})
} else {
None
};
let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
expr.id,
expr.span,
false);
fcx.push_ast_cleanup_scope(cleanup_debug_loc);
let datum = unpack_datum!(bcx, trans_unadjusted(bcx, expr));
let datum = unpack_datum!(bcx, apply_adjustments(bcx, expr, datum));
let datum = match global {
Some(rvalue) => rvalue.to_expr_datum(),
None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
};
let datum = if adjusted_global {
datum // trans::consts already performed adjustments.
} else {
unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
};
bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
return DatumBlock::new(bcx, datum);
}
@ -177,6 +278,54 @@ pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_ADDR])
}
// Retrieve the information we are losing (making dynamic) in an unsizing
// adjustment.
// When making a dtor, we need to do different things depending on the
// ownership of the object.. mk_ty is a function for turning `unadjusted_ty`
// into a type to be destructed. If we want to end up with a Box pointer,
// then mk_ty should make a Box pointer (T -> Box<T>), if we want a
// borrowed reference then it should be T -> &T.
pub fn unsized_info<'a, 'tcx, F>(ccx: &CrateContext<'a, 'tcx>,
kind: &ty::UnsizeKind<'tcx>,
id: ast::NodeId,
unadjusted_ty: Ty<'tcx>,
param_substs: &'tcx subst::Substs<'tcx>,
mk_ty: F) -> ValueRef where
F: FnOnce(Ty<'tcx>) -> Ty<'tcx>,
{
// FIXME(#19596) workaround: `|t| t` causes monomorphization recursion
fn identity<T>(t: T) -> T { t }
debug!("unsized_info(kind={:?}, id={}, unadjusted_ty={})",
kind, id, unadjusted_ty.repr(ccx.tcx()));
match kind {
&ty::UnsizeLength(len) => C_uint(ccx, len),
&ty::UnsizeStruct(box ref k, tp_index) => match unadjusted_ty.sty {
ty::ty_struct(_, ref substs) => {
let ty_substs = substs.types.get_slice(subst::TypeSpace);
// The dtor for a field treats it like a value, so mk_ty
// should just be the identity function.
unsized_info(ccx, k, id, ty_substs[tp_index], param_substs, identity)
}
_ => ccx.sess().bug(&format!("UnsizeStruct with bad sty: {}",
unadjusted_ty.repr(ccx.tcx()))[])
},
&ty::UnsizeVtable(ty::TyTrait { ref principal, .. }, _) => {
// Note that we preserve binding levels here:
let substs = principal.0.substs.with_self_ty(unadjusted_ty).erase_regions();
let substs = ccx.tcx().mk_substs(substs);
let trait_ref = ty::Binder(Rc::new(ty::TraitRef { def_id: principal.def_id(),
substs: substs }));
let trait_ref = monomorphize::apply_param_substs(ccx.tcx(),
param_substs,
&trait_ref);
let box_ty = mk_ty(unadjusted_ty);
consts::ptrcast(meth::get_vtable(ccx, box_ty, trait_ref, param_substs),
Type::vtable_ptr(ccx))
}
}
}
/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
/// translation of `expr`.
fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
@ -262,13 +411,17 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let datum = match autoref {
&AutoPtr(_, _, ref a) | &AutoUnsafe(_, ref a) => {
debug!(" AutoPtr");
match a {
&Some(box ref a) => {
if let &Some(box ref a) = a {
datum = unpack_datum!(bcx, apply_autoref(a, bcx, expr, datum));
}
&None => {}
if !type_is_sized(bcx.tcx(), datum.ty) {
// Arrange cleanup
let lval = unpack_datum!(bcx,
datum.to_lvalue_datum(bcx, "ref_fat_ptr", expr.id));
unpack_datum!(bcx, ref_fat_ptr(bcx, lval))
} else {
unpack_datum!(bcx, auto_ref(bcx, datum, expr))
}
unpack_datum!(bcx, ref_ptr(bcx, expr, datum))
}
&ty::AutoUnsize(ref k) => {
debug!(" AutoUnsize");
@ -288,139 +441,35 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
DatumBlock::new(bcx, datum)
}
fn ref_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>)
-> DatumBlock<'blk, 'tcx, Expr> {
debug!("ref_ptr(expr={}, datum={})",
expr.repr(bcx.tcx()),
datum.to_string(bcx.ccx()));
if !type_is_sized(bcx.tcx(), datum.ty) {
debug!("Taking address of unsized type {}",
bcx.ty_to_string(datum.ty));
ref_fat_ptr(bcx, expr, datum)
} else {
debug!("Taking address of sized type {}",
bcx.ty_to_string(datum.ty));
auto_ref(bcx, datum, expr)
}
}
// Retrieve the information we are losing (making dynamic) in an unsizing
// adjustment.
// When making a dtor, we need to do different things depending on the
// ownership of the object.. mk_ty is a function for turning `unadjusted_ty`
// into a type to be destructed. If we want to end up with a Box pointer,
// then mk_ty should make a Box pointer (T -> Box<T>), if we want a
// borrowed reference then it should be T -> &T.
fn unsized_info<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
kind: &ty::UnsizeKind<'tcx>,
id: ast::NodeId,
unadjusted_ty: Ty<'tcx>,
mk_ty: F) -> ValueRef where
F: FnOnce(Ty<'tcx>) -> Ty<'tcx>,
{
// FIXME(#19596) workaround: `|t| t` causes monomorphization recursion
fn identity<T>(t: T) -> T { t }
debug!("unsized_info(kind={:?}, id={}, unadjusted_ty={})",
kind, id, unadjusted_ty.repr(bcx.tcx()));
match kind {
&ty::UnsizeLength(len) => C_uint(bcx.ccx(), len),
&ty::UnsizeStruct(box ref k, tp_index) => match unadjusted_ty.sty {
ty::ty_struct(_, ref substs) => {
let ty_substs = substs.types.get_slice(subst::TypeSpace);
// The dtor for a field treats it like a value, so mk_ty
// should just be the identity function.
unsized_info(bcx, k, id, ty_substs[tp_index], identity)
}
_ => bcx.sess().bug(&format!("UnsizeStruct with bad sty: {}",
bcx.ty_to_string(unadjusted_ty))[])
},
&ty::UnsizeVtable(ty::TyTrait { ref principal, .. }, _) => {
// Note that we preserve binding levels here:
let substs = principal.0.substs.with_self_ty(unadjusted_ty).erase_regions();
let substs = bcx.tcx().mk_substs(substs);
let trait_ref =
ty::Binder(Rc::new(ty::TraitRef { def_id: principal.def_id(),
substs: substs }));
let trait_ref = bcx.monomorphize(&trait_ref);
let box_ty = mk_ty(unadjusted_ty);
PointerCast(bcx,
meth::get_vtable(bcx, box_ty, trait_ref),
Type::vtable_ptr(bcx.ccx()))
}
}
}
fn unsize_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>,
k: &ty::UnsizeKind<'tcx>)
-> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let tcx = bcx.tcx();
let datum_ty = datum.ty;
let unsized_ty = ty::unsize_ty(tcx, datum_ty, k, expr.span);
debug!("unsized_ty={}", unsized_ty.repr(bcx.tcx()));
let dest_ty = ty::mk_open(tcx, unsized_ty);
debug!("dest_ty={}", unsized_ty.repr(bcx.tcx()));
// Closures for extracting and manipulating the data and payload parts of
// the fat pointer.
let info = |bcx, _val| unsized_info(bcx,
k,
expr.id,
datum_ty,
|t| ty::mk_rptr(tcx,
tcx.mk_region(ty::ReStatic),
ty::mt{
ty: t,
mutbl: ast::MutImmutable
}));
match *k {
ty::UnsizeStruct(..) =>
into_fat_ptr(bcx, expr, datum, dest_ty, |bcx, val| {
PointerCast(bcx, val, type_of::type_of(bcx.ccx(), unsized_ty).ptr_to())
}, info),
ty::UnsizeLength(..) =>
into_fat_ptr(bcx, expr, datum, dest_ty, |bcx, val| {
GEPi(bcx, val, &[0, 0])
}, info),
ty::UnsizeVtable(..) =>
into_fat_ptr(bcx, expr, datum, dest_ty, |_bcx, val| {
PointerCast(bcx, val, Type::i8p(bcx.ccx()))
}, info),
}
}
fn ref_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>)
-> DatumBlock<'blk, 'tcx, Expr> {
let tcx = bcx.tcx();
let dest_ty = ty::close_type(tcx, datum.ty);
let base = |bcx, val| Load(bcx, get_dataptr(bcx, val));
let len = |bcx, val| Load(bcx, get_len(bcx, val));
into_fat_ptr(bcx, expr, datum, dest_ty, base, len)
}
fn into_fat_ptr<'blk, 'tcx, F, G>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>,
dest_ty: Ty<'tcx>,
base: F,
info: G)
-> DatumBlock<'blk, 'tcx, Expr> where
F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> ValueRef,
G: FnOnce(Block<'blk, 'tcx>, ValueRef) -> ValueRef,
{
let mut bcx = bcx;
let info = unsized_info(bcx.ccx(), k, expr.id, datum_ty, bcx.fcx.param_substs,
|t| ty::mk_imm_rptr(tcx, tcx.mk_region(ty::ReStatic), t));
// Arrange cleanup
let lval = unpack_datum!(bcx,
datum.to_lvalue_datum(bcx, "into_fat_ptr", expr.id));
let base = base(bcx, lval.val);
let info = info(bcx, lval.val);
// Compute the base pointer. This doesn't change the pointer value,
// but merely its type.
let base = match *k {
ty::UnsizeStruct(..) | ty::UnsizeVtable(..) => {
PointerCast(bcx, lval.val, type_of::type_of(bcx.ccx(), unsized_ty).ptr_to())
}
ty::UnsizeLength(..) => {
GEPi(bcx, lval.val, &[0u, 0u])
}
};
let scratch = rvalue_scratch_datum(bcx, dest_ty, "__fat_ptr");
Store(bcx, base, get_dataptr(bcx, scratch.val));
@ -490,7 +539,8 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let base = PointerCast(bcx, get_dataptr(bcx, scratch.val), llbox_ty.ptr_to());
bcx = datum.store_to(bcx, base);
let info = unsized_info(bcx, k, expr.id, unboxed_ty, |t| ty::mk_uniq(tcx, t));
let info = unsized_info(bcx.ccx(), k, expr.id, unboxed_ty, bcx.fcx.param_substs,
|t| ty::mk_uniq(tcx, t));
Store(bcx, info, get_len(bcx, scratch.val));
DatumBlock::new(bcx, scratch.to_expr_datum())
@ -847,10 +897,8 @@ fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// pointer to that.
let const_ty = expr_ty(bcx, ref_expr);
fn get_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, did: ast::DefId,
const_ty: Ty<'tcx>) -> ValueRef {
// For external constants, we don't inline.
if did.krate == ast::LOCAL_CRATE {
let val = if did.krate == ast::LOCAL_CRATE {
// Case 1.
// The LLVM global has the type of its initializer,
@ -862,38 +910,12 @@ fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
} else {
// Case 2.
base::get_extern_const(bcx.ccx(), did, const_ty)
}
}
let val = get_val(bcx, did, const_ty);
};
DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr))
}
def::DefConst(did) => {
// First, inline any external constants into the local crate so we
// can be sure to get the LLVM value corresponding to it.
let did = inline::maybe_instantiate_inline(bcx.ccx(), did);
if did.krate != ast::LOCAL_CRATE {
bcx.tcx().sess.span_bug(ref_expr.span,
"cross crate constant could not \
be inlined");
}
let val = base::get_item_val(bcx.ccx(), did.node);
// Next, we need to crate a ByRef rvalue datum to return. We can't
// use the normal .to_ref_datum() function because the type of
// `val` is not actually the same as `const_ty`.
//
// To get around this, we make a custom alloca slot with the
// appropriate type (const_ty), and then we cast it to a pointer of
// typeof(val), store the value, and then hand this slot over to
// the datum infrastructure.
let const_ty = expr_ty(bcx, ref_expr);
let llty = type_of::type_of(bcx.ccx(), const_ty);
let slot = alloca(bcx, llty, "const");
let pty = Type::from_ref(unsafe { llvm::LLVMTypeOf(val) }).ptr_to();
Store(bcx, val, PointerCast(bcx, slot, pty));
let datum = Datum::new(slot, const_ty, Rvalue::new(ByRef));
DatumBlock::new(bcx, datum.to_expr_datum())
def::DefConst(_) => {
bcx.sess().span_bug(ref_expr.span,
"constant expression should not reach expr::trans_def")
}
_ => {
DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum())
@ -1119,7 +1141,12 @@ fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
tvec::trans_fixed_vstore(bcx, expr, dest)
}
ast::ExprClosure(_, ref decl, ref body) => {
closure::trans_closure_expr(bcx, &**decl, &**body, expr.id, dest)
let dest = match dest {
SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
Ignore => closure::Dest::Ignore(bcx.ccx())
};
closure::trans_closure_expr(dest, &**decl, &**body, expr.id, bcx.fcx.param_substs)
.unwrap_or(bcx)
}
ast::ExprCall(ref f, ref args) => {
if bcx.tcx().is_method_call(expr.id) {
@ -1247,7 +1274,7 @@ fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ref_expr: &ast::Expr,
def: def::Def,
param_substs: &subst::Substs<'tcx>)
param_substs: &'tcx subst::Substs<'tcx>)
-> Datum<'tcx, Rvalue> {
let _icx = push_ctxt("trans_def_datum_unadjusted");
@ -1641,6 +1668,16 @@ fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
}
fn ref_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lval: Datum<'tcx, Lvalue>)
-> DatumBlock<'blk, 'tcx, Expr> {
let dest_ty = ty::close_type(bcx.tcx(), lval.ty);
let scratch = rvalue_scratch_datum(bcx, dest_ty, "__fat_ptr");
memcpy_ty(bcx, scratch.val, lval.val, scratch.ty);
DatumBlock::new(bcx, scratch.to_expr_datum())
}
fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
subexpr: &ast::Expr)
@ -1651,18 +1688,7 @@ fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
match sub_datum.ty.sty {
ty::ty_open(_) => {
// Opened DST value, close to a fat pointer
debug!("Closing fat pointer {}", bcx.ty_to_string(sub_datum.ty));
let scratch = rvalue_scratch_datum(bcx,
ty::close_type(bcx.tcx(), sub_datum.ty),
"fat_addr_of");
let base = Load(bcx, get_dataptr(bcx, sub_datum.val));
Store(bcx, base, get_dataptr(bcx, scratch.val));
let len = Load(bcx, get_len(bcx, sub_datum.val));
Store(bcx, len, get_len(bcx, scratch.val));
DatumBlock::new(bcx, scratch.to_expr_datum())
ref_fat_ptr(bcx, sub_datum)
}
_ => {
// Sized value, ref to a thin pointer
@ -1687,9 +1713,10 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let tcx = bcx.tcx();
let is_simd = ty::type_is_simd(tcx, lhs_t);
let intype = {
if is_simd { ty::simd_type(tcx, lhs_t) }
else { lhs_t }
let intype = if is_simd {
ty::simd_type(tcx, lhs_t)
} else {
lhs_t
};
let is_float = ty::type_is_fp(intype);
let is_signed = ty::type_is_signed(intype);
@ -1766,24 +1793,10 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
}
}
ast::BiEq | ast::BiNe | ast::BiLt | ast::BiGe | ast::BiLe | ast::BiGt => {
if ty::type_is_scalar(rhs_t) {
unpack_result!(bcx,
base::compare_scalar_types(bcx,
lhs,
rhs,
rhs_t,
op.node,
binop_debug_loc))
} else if is_simd {
base::compare_simd_types(bcx,
lhs,
rhs,
intype,
ty::simd_size(tcx, lhs_t),
op.node,
binop_debug_loc)
if is_simd {
base::compare_simd_types(bcx, lhs, rhs, intype, op.node, binop_debug_loc)
} else {
bcx.tcx().sess.span_bug(binop_expr.span, "comparison operator unsupported for type")
base::compare_scalar_types(bcx, lhs, rhs, intype, op.node, binop_debug_loc)
}
}
_ => {
@ -1997,7 +2010,7 @@ pub fn cast_type_kind<'tcx>(tcx: &ty::ctxt<'tcx>, t: Ty<'tcx>) -> cast_kind {
}
}
fn cast_is_noop<'tcx>(t_in: Ty<'tcx>, t_out: Ty<'tcx>) -> bool {
pub fn cast_is_noop<'tcx>(t_in: Ty<'tcx>, t_out: Ty<'tcx>) -> bool {
match (ty::deref(t_in, true), ty::deref(t_out, true)) {
(Some(ty::mt{ ty: t_in, .. }), Some(ty::mt{ ty: t_out, .. })) => {
t_in == t_out

View File

@ -557,7 +557,7 @@ pub fn trans_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
body: &ast::Block,
attrs: &[ast::Attribute],
llwrapfn: ValueRef,
param_substs: &Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>,
id: ast::NodeId,
hash: Option<&str>) {
let _icx = push_ctxt("foreign::build_foreign_fn");
@ -577,7 +577,7 @@ pub fn trans_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fn build_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
decl: &ast::FnDecl,
body: &ast::Block,
param_substs: &Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>,
attrs: &[ast::Attribute],
id: ast::NodeId,
hash: Option<&str>)

View File

@ -558,12 +558,12 @@ fn make_generic_glue<'a, 'tcx, F>(ccx: &CrateContext<'a, 'tcx>,
let glue_name = format!("glue {} {}", name, ty_to_short_str(ccx.tcx(), t));
let _s = StatRecorder::new(ccx, glue_name);
let empty_param_substs = Substs::trans_empty();
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
ty::FnConverging(ty::mk_nil(ccx.tcx())),
&empty_param_substs, None, &arena);
empty_substs, None, &arena);
let bcx = init_function(&fcx, false, ty::FnConverging(ty::mk_nil(ccx.tcx())));

View File

@ -159,13 +159,14 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: ast::DefId)
let unparameterized = impl_tpt.generics.types.is_empty() &&
mth.pe_generics().ty_params.is_empty();
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
if unparameterized {
let llfn = get_item_val(ccx, mth.id);
trans_fn(ccx,
&*mth.pe_fn_decl(),
&*mth.pe_body(),
llfn,
&Substs::trans_empty(),
empty_substs,
mth.id,
&[]);
// Use InternalLinkage so LLVM can optimize more

View File

@ -11,7 +11,7 @@
use arena::TypedArena;
use back::abi;
use back::link;
use llvm::{self, ValueRef, get_param};
use llvm::{ValueRef, get_param};
use metadata::csearch;
use middle::subst::Substs;
use middle::subst::VecPerParamSpace;
@ -23,6 +23,7 @@ use trans::callee::*;
use trans::callee;
use trans::cleanup;
use trans::common::*;
use trans::consts;
use trans::datum::*;
use trans::debuginfo::DebugLoc;
use trans::expr::{SaveIn, Ignore};
@ -36,7 +37,6 @@ use middle::ty::{self, Ty};
use middle::ty::MethodCall;
use util::ppaux::Repr;
use std::ffi::CString;
use std::rc::Rc;
use syntax::abi::{Rust, RustCall};
use syntax::parse::token;
@ -82,11 +82,12 @@ pub fn trans_impl(ccx: &CrateContext,
let trans_everywhere = attr::requests_inline(&method.attrs[]);
for (ref ccx, is_origin) in ccx.maybe_iter(trans_everywhere) {
let llfn = get_item_val(ccx, method.id);
let empty_substs = tcx.mk_substs(Substs::trans_empty());
trans_fn(ccx,
method.pe_fn_decl(),
method.pe_body(),
llfn,
&Substs::trans_empty(),
empty_substs,
method.id,
&[]);
update_linkage(ccx,
@ -174,7 +175,7 @@ pub fn trans_static_method_callee<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
method_id: ast::DefId,
trait_id: ast::DefId,
expr_id: ast::NodeId,
param_substs: &subst::Substs<'tcx>)
param_substs: &'tcx subst::Substs<'tcx>)
-> Datum<'tcx, Rvalue>
{
let _icx = push_ctxt("meth::trans_static_method_callee");
@ -599,7 +600,7 @@ pub fn trans_object_shim<'a, 'tcx>(
let sig = ty::erase_late_bound_regions(ccx.tcx(), &fty.sig);
let empty_substs = Substs::trans_empty();
let empty_substs = tcx.mk_substs(Substs::trans_empty());
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx,
@ -607,7 +608,7 @@ pub fn trans_object_shim<'a, 'tcx>(
ast::DUMMY_NODE_ID,
false,
sig.output,
&empty_substs,
empty_substs,
None,
&block_arena);
let mut bcx = init_function(&fcx, false, sig.output);
@ -689,19 +690,19 @@ pub fn trans_object_shim<'a, 'tcx>(
/// `trait_ref` would map `T:Trait`, but `box_ty` would be
/// `Foo<T>`. This `box_ty` is primarily used to encode the destructor.
/// This will hopefully change now that DST is underway.
pub fn get_vtable<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
box_ty: Ty<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>)
trait_ref: ty::PolyTraitRef<'tcx>,
param_substs: &'tcx subst::Substs<'tcx>)
-> ValueRef
{
debug!("get_vtable(box_ty={}, trait_ref={})",
box_ty.repr(bcx.tcx()),
trait_ref.repr(bcx.tcx()));
let tcx = bcx.tcx();
let ccx = bcx.ccx();
let tcx = ccx.tcx();
let _icx = push_ctxt("meth::get_vtable");
debug!("get_vtable(box_ty={}, trait_ref={})",
box_ty.repr(tcx),
trait_ref.repr(tcx));
// Check the cache.
let cache_key = (box_ty, trait_ref.clone());
match ccx.vtables().borrow().get(&cache_key) {
@ -711,9 +712,7 @@ pub fn get_vtable<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// Not in the cache. Build it.
let methods = traits::supertraits(tcx, trait_ref.clone()).flat_map(|trait_ref| {
let vtable = fulfill_obligation(bcx.ccx(),
DUMMY_SP,
trait_ref.clone());
let vtable = fulfill_obligation(ccx, DUMMY_SP, trait_ref.clone());
match vtable {
traits::VtableBuiltin(_) => {
Vec::new().into_iter()
@ -723,83 +722,61 @@ pub fn get_vtable<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
impl_def_id: id,
substs,
nested: _ }) => {
emit_vtable_methods(bcx, id, substs).into_iter()
emit_vtable_methods(ccx, id, substs, param_substs).into_iter()
}
traits::VtableClosure(closure_def_id, substs) => {
let llfn = trans_fn_ref_with_substs(
bcx.ccx(),
ccx,
closure_def_id,
ExprId(0),
bcx.fcx.param_substs,
substs.clone()).val;
param_substs,
substs).val;
(vec!(llfn)).into_iter()
vec![llfn].into_iter()
}
traits::VtableFnPointer(bare_fn_ty) => {
let llfn = vec![trans_fn_pointer_shim(bcx.ccx(), bare_fn_ty)];
llfn.into_iter()
vec![trans_fn_pointer_shim(ccx, bare_fn_ty)].into_iter()
}
traits::VtableObject(ref data) => {
// this would imply that the Self type being erased is
// an object type; this cannot happen because we
// cannot cast an unsized type into a trait object
bcx.sess().bug(
tcx.sess.bug(
&format!("cannot get vtable for an object type: {}",
data.repr(bcx.tcx())));
data.repr(tcx)));
}
traits::VtableParam(..) => {
bcx.sess().bug(
tcx.sess.bug(
&format!("resolved vtable for {} to bad vtable {} in trans",
trait_ref.repr(bcx.tcx()),
vtable.repr(bcx.tcx()))[]);
trait_ref.repr(tcx),
vtable.repr(tcx))[]);
}
}
});
let size_ty = sizing_type_of(ccx, trait_ref.self_ty());
let size = machine::llsize_of_alloc(ccx, size_ty);
let ll_size = C_uint(ccx, size);
let align = align_of(ccx, trait_ref.self_ty());
let ll_align = C_uint(ccx, align);
let components: Vec<_> = vec![
// Generate a destructor for the vtable.
let drop_glue = glue::get_drop_glue(ccx, box_ty);
let vtable = make_vtable(ccx, drop_glue, ll_size, ll_align, methods);
glue::get_drop_glue(ccx, box_ty),
C_uint(ccx, size),
C_uint(ccx, align)
].into_iter().chain(methods).collect();
let vtable = consts::addr_of(ccx, C_struct(ccx, &components, false),
"vtable", trait_ref.def_id().node);
ccx.vtables().borrow_mut().insert(cache_key, vtable);
vtable
}
/// Helper function to declare and initialize the vtable.
pub fn make_vtable<I: Iterator<Item=ValueRef>>(ccx: &CrateContext,
drop_glue: ValueRef,
size: ValueRef,
align: ValueRef,
ptrs: I)
-> ValueRef {
let _icx = push_ctxt("meth::make_vtable");
let head = vec![drop_glue, size, align];
let components: Vec<_> = head.into_iter().chain(ptrs).collect();
unsafe {
let tbl = C_struct(ccx, &components[], false);
let sym = token::gensym("vtable");
let buf = CString::from_vec(format!("vtable{}", sym.usize()).into_bytes());
let vt_gvar = llvm::LLVMAddGlobal(ccx.llmod(), val_ty(tbl).to_ref(),
buf.as_ptr());
llvm::LLVMSetInitializer(vt_gvar, tbl);
llvm::LLVMSetGlobalConstant(vt_gvar, llvm::True);
llvm::SetLinkage(vt_gvar, llvm::InternalLinkage);
vt_gvar
}
}
fn emit_vtable_methods<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
fn emit_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
impl_id: ast::DefId,
substs: subst::Substs<'tcx>)
substs: subst::Substs<'tcx>,
param_substs: &'tcx subst::Substs<'tcx>)
-> Vec<ValueRef> {
let ccx = bcx.ccx();
let tcx = ccx.tcx();
let trt_id = match ty::impl_trait_ref(tcx, impl_id) {
@ -808,7 +785,7 @@ fn emit_vtable_methods<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
make a vtable for a type impl!")
};
ty::populate_implementations_for_trait_if_necessary(bcx.tcx(), trt_id);
ty::populate_implementations_for_trait_if_necessary(tcx, trt_id);
let trait_item_def_ids = ty::trait_item_def_ids(tcx, trt_id);
trait_item_def_ids.iter().flat_map(|method_def_id| {
@ -835,7 +812,7 @@ fn emit_vtable_methods<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ccx,
m_id,
ExprId(0),
bcx.fcx.param_substs,
param_substs,
substs.clone()).val;
// currently, at least, by-value self is not object safe
@ -882,7 +859,7 @@ pub fn trans_trait_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
bcx = datum.store_to(bcx, llboxdest);
// Store the vtable into the second half of pair.
let vtable = get_vtable(bcx, datum_ty, trait_ref);
let vtable = get_vtable(bcx.ccx(), datum_ty, trait_ref, bcx.fcx.param_substs);
let llvtabledest = GEPi(bcx, lldest, &[0, abi::FAT_PTR_EXTRA]);
let llvtabledest = PointerCast(bcx, llvtabledest, val_ty(vtable).ptr_to());
Store(bcx, vtable, llvtabledest);

View File

@ -36,7 +36,7 @@ use std::hash::{Hasher, Hash, SipHasher};
pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fn_id: ast::DefId,
psubsts: &subst::Substs<'tcx>,
psubsts: &'tcx subst::Substs<'tcx>,
ref_id: Option<ast::NodeId>)
-> (ValueRef, Ty<'tcx>, bool) {
debug!("monomorphic_fn(\
@ -55,7 +55,7 @@ pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let hash_id = MonoId {
def: fn_id,
params: psubsts.types.clone()
params: &psubsts.types
};
let item_ty = ty::lookup_item_type(ccx.tcx(), fn_id).ty;
@ -289,7 +289,7 @@ pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
#[derive(PartialEq, Eq, Hash, Debug)]
pub struct MonoId<'tcx> {
pub def: ast::DefId,
pub params: subst::VecPerParamSpace<Ty<'tcx>>
pub params: &'tcx subst::VecPerParamSpace<Ty<'tcx>>
}
/// Monomorphizes a type from the AST by first applying the in-scope

View File

@ -309,8 +309,6 @@ pub fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|set_bcx, lleltptr, _| {
elem.shallow_copy(set_bcx, lleltptr)
});
elem.add_clean_if_rvalue(bcx, element.id);
bcx
}
}

View File

@ -0,0 +1,55 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{int, i8, i16, i32, i64};
use std::thread::Thread;
fn main() {
assert!(Thread::scoped(move|| int::MIN / -1).join().is_err());
//~^ ERROR attempted to divide with overflow in a constant expression
assert!(Thread::scoped(move|| i8::MIN / -1).join().is_err());
//~^ ERROR attempted to divide with overflow in a constant expression
assert!(Thread::scoped(move|| i16::MIN / -1).join().is_err());
//~^ ERROR attempted to divide with overflow in a constant expression
assert!(Thread::scoped(move|| i32::MIN / -1).join().is_err());
//~^ ERROR attempted to divide with overflow in a constant expression
assert!(Thread::scoped(move|| i64::MIN / -1).join().is_err());
//~^ ERROR attempted to divide with overflow in a constant expression
assert!(Thread::scoped(move|| 1is / 0).join().is_err());
//~^ ERROR attempted to divide by zero in a constant expression
assert!(Thread::scoped(move|| 1i8 / 0).join().is_err());
//~^ ERROR attempted to divide by zero in a constant expression
assert!(Thread::scoped(move|| 1i16 / 0).join().is_err());
//~^ ERROR attempted to divide by zero in a constant expression
assert!(Thread::scoped(move|| 1i32 / 0).join().is_err());
//~^ ERROR attempted to divide by zero in a constant expression
assert!(Thread::scoped(move|| 1i64 / 0).join().is_err());
//~^ ERROR attempted to divide by zero in a constant expression
assert!(Thread::scoped(move|| int::MIN % -1).join().is_err());
//~^ ERROR attempted remainder with overflow in a constant expression
assert!(Thread::scoped(move|| i8::MIN % -1).join().is_err());
//~^ ERROR attempted remainder with overflow in a constant expression
assert!(Thread::scoped(move|| i16::MIN % -1).join().is_err());
//~^ ERROR attempted remainder with overflow in a constant expression
assert!(Thread::scoped(move|| i32::MIN % -1).join().is_err());
//~^ ERROR attempted remainder with overflow in a constant expression
assert!(Thread::scoped(move|| i64::MIN % -1).join().is_err());
//~^ ERROR attempted remainder with overflow in a constant expression
assert!(Thread::scoped(move|| 1is % 0).join().is_err());
//~^ ERROR attempted remainder with a divisor of zero in a constant expression
assert!(Thread::scoped(move|| 1i8 % 0).join().is_err());
//~^ ERROR attempted remainder with a divisor of zero in a constant expression
assert!(Thread::scoped(move|| 1i16 % 0).join().is_err());
//~^ ERROR attempted remainder with a divisor of zero in a constant expression
assert!(Thread::scoped(move|| 1i32 % 0).join().is_err());
//~^ ERROR attempted remainder with a divisor of zero in a constant expression
assert!(Thread::scoped(move|| 1i64 % 0).join().is_err());
//~^ ERROR attempted remainder with a divisor of zero in a constant expression
}

View File

@ -25,7 +25,9 @@ fn main() {
let mut dropped = false;
{
let leak = Leak { dropped: &mut dropped };
for ((), leaked) in Some(((),leak)).into_iter() {}
// FIXME(#21721) "hack" used to be () but that can cause
// certain LLVM versions to abort during optimizations.
for (_, leaked) in Some(("hack", leak)).into_iter() {}
}
assert!(dropped);

View File

@ -8,28 +8,32 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{int, i8, i16, i32, i64};
use std::num::Int;
use std::thread::Thread;
// Avoid using constants, which would trigger compile-time errors.
fn min_val<T: Int>() -> T { Int::min_value() }
fn zero<T: Int>() -> T { Int::zero() }
fn main() {
assert!(Thread::scoped(move|| int::MIN / -1).join().is_err());
assert!(Thread::scoped(move|| i8::MIN / -1).join().is_err());
assert!(Thread::scoped(move|| i16::MIN / -1).join().is_err());
assert!(Thread::scoped(move|| i32::MIN / -1).join().is_err());
assert!(Thread::scoped(move|| i64::MIN / -1).join().is_err());
assert!(Thread::scoped(move|| 1 / 0).join().is_err());
assert!(Thread::scoped(move|| 1i8 / 0).join().is_err());
assert!(Thread::scoped(move|| 1i16 / 0).join().is_err());
assert!(Thread::scoped(move|| 1i32 / 0).join().is_err());
assert!(Thread::scoped(move|| 1i64 / 0).join().is_err());
assert!(Thread::scoped(move|| int::MIN % -1).join().is_err());
assert!(Thread::scoped(move|| i8::MIN % -1).join().is_err());
assert!(Thread::scoped(move|| i16::MIN % -1).join().is_err());
assert!(Thread::scoped(move|| i32::MIN % -1).join().is_err());
assert!(Thread::scoped(move|| i64::MIN % -1).join().is_err());
assert!(Thread::scoped(move|| 1 % 0).join().is_err());
assert!(Thread::scoped(move|| 1i8 % 0).join().is_err());
assert!(Thread::scoped(move|| 1i16 % 0).join().is_err());
assert!(Thread::scoped(move|| 1i32 % 0).join().is_err());
assert!(Thread::scoped(move|| 1i64 % 0).join().is_err());
assert!(Thread::scoped(move|| min_val::<isize>() / -1).join().is_err());
assert!(Thread::scoped(move|| min_val::<i8>() / -1).join().is_err());
assert!(Thread::scoped(move|| min_val::<i16>() / -1).join().is_err());
assert!(Thread::scoped(move|| min_val::<i32>() / -1).join().is_err());
assert!(Thread::scoped(move|| min_val::<i64>() / -1).join().is_err());
assert!(Thread::scoped(move|| 1is / zero()).join().is_err());
assert!(Thread::scoped(move|| 1i8 / zero()).join().is_err());
assert!(Thread::scoped(move|| 1i16 / zero()).join().is_err());
assert!(Thread::scoped(move|| 1i32 / zero()).join().is_err());
assert!(Thread::scoped(move|| 1i64 / zero()).join().is_err());
assert!(Thread::scoped(move|| min_val::<isize>() % -1).join().is_err());
assert!(Thread::scoped(move|| min_val::<i8>() % -1).join().is_err());
assert!(Thread::scoped(move|| min_val::<i16>() % -1).join().is_err());
assert!(Thread::scoped(move|| min_val::<i32>() % -1).join().is_err());
assert!(Thread::scoped(move|| min_val::<i64>() % -1).join().is_err());
assert!(Thread::scoped(move|| 1is % zero()).join().is_err());
assert!(Thread::scoped(move|| 1i8 % zero()).join().is_err());
assert!(Thread::scoped(move|| 1i16 % zero()).join().is_err());
assert!(Thread::scoped(move|| 1i32 % zero()).join().is_err());
assert!(Thread::scoped(move|| 1i64 % zero()).join().is_err());
}