2018-07-10 10:28:39 +00:00
|
|
|
use llvm::{self, AttributePlace};
|
2018-10-03 14:56:24 +00:00
|
|
|
use rustc_codegen_ssa::MemFlags;
|
|
|
|
use builder::Builder;
|
2018-01-05 05:01:54 +00:00
|
|
|
use context::CodegenCx;
|
2018-10-03 14:56:24 +00:00
|
|
|
use rustc_codegen_ssa::mir::place::PlaceRef;
|
|
|
|
use rustc_codegen_ssa::mir::operand::OperandValue;
|
2016-03-22 17:23:36 +00:00
|
|
|
use type_::Type;
|
2017-10-04 23:21:10 +00:00
|
|
|
use type_of::{LayoutLlvmExt, PointerKind};
|
2018-07-10 10:28:39 +00:00
|
|
|
use value::Value;
|
2018-09-20 13:47:22 +00:00
|
|
|
use rustc_target::abi::call::ArgType;
|
2016-02-23 19:55:19 +00:00
|
|
|
|
2018-11-16 11:45:28 +00:00
|
|
|
use rustc_codegen_ssa::traits::*;
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2018-10-11 15:50:00 +00:00
|
|
|
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi};
|
2018-09-20 13:47:22 +00:00
|
|
|
use rustc::ty::{self, Ty, Instance};
|
2018-04-18 13:01:26 +00:00
|
|
|
use rustc::ty::layout;
|
2016-02-23 19:55:19 +00:00
|
|
|
|
2016-03-06 14:30:21 +00:00
|
|
|
use libc::c_uint;
|
|
|
|
|
2018-04-25 16:30:39 +00:00
|
|
|
pub use rustc_target::spec::abi::Abi;
|
2016-04-19 06:11:46 +00:00
|
|
|
pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
|
2017-12-28 17:07:02 +00:00
|
|
|
pub use rustc_target::abi::call::*;
|
2016-11-16 22:36:08 +00:00
|
|
|
|
|
|
|
macro_rules! for_each_kind {
|
|
|
|
($flags: ident, $f: ident, $($kind: ident),+) => ({
|
|
|
|
$(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
trait ArgAttributeExt {
|
|
|
|
fn for_each_kind<F>(&self, f: F) where F: FnMut(llvm::Attribute);
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ArgAttributeExt for ArgAttribute {
|
2016-11-16 22:36:08 +00:00
|
|
|
fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
|
|
|
|
for_each_kind!(self, f,
|
2016-12-21 18:42:10 +00:00
|
|
|
ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
pub trait ArgAttributesExt {
|
2018-07-10 10:28:39 +00:00
|
|
|
fn apply_llfn(&self, idx: AttributePlace, llfn: &Value);
|
|
|
|
fn apply_callsite(&self, idx: AttributePlace, callsite: &Value);
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
impl ArgAttributesExt for ArgAttributes {
|
2018-07-10 10:28:39 +00:00
|
|
|
fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
|
2017-10-03 07:45:07 +00:00
|
|
|
let mut regular = self.regular;
|
2016-11-16 22:36:08 +00:00
|
|
|
unsafe {
|
2017-10-03 07:45:07 +00:00
|
|
|
let deref = self.pointee_size.bytes();
|
|
|
|
if deref != 0 {
|
|
|
|
if regular.contains(ArgAttribute::NonNull) {
|
|
|
|
llvm::LLVMRustAddDereferenceableAttr(llfn,
|
|
|
|
idx.as_uint(),
|
|
|
|
deref);
|
|
|
|
} else {
|
|
|
|
llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
|
|
|
|
idx.as_uint(),
|
|
|
|
deref);
|
|
|
|
}
|
|
|
|
regular -= ArgAttribute::NonNull;
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
2017-10-03 07:45:07 +00:00
|
|
|
if let Some(align) = self.pointee_align {
|
|
|
|
llvm::LLVMRustAddAlignmentAttr(llfn,
|
|
|
|
idx.as_uint(),
|
2018-09-08 22:16:45 +00:00
|
|
|
align.bytes() as u32);
|
2017-10-03 07:45:07 +00:00
|
|
|
}
|
|
|
|
regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-10 10:28:39 +00:00
|
|
|
fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
|
2017-10-03 07:45:07 +00:00
|
|
|
let mut regular = self.regular;
|
2016-11-16 22:36:08 +00:00
|
|
|
unsafe {
|
2017-10-03 07:45:07 +00:00
|
|
|
let deref = self.pointee_size.bytes();
|
|
|
|
if deref != 0 {
|
|
|
|
if regular.contains(ArgAttribute::NonNull) {
|
|
|
|
llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
|
|
|
|
idx.as_uint(),
|
|
|
|
deref);
|
|
|
|
} else {
|
|
|
|
llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
|
|
|
|
idx.as_uint(),
|
|
|
|
deref);
|
|
|
|
}
|
|
|
|
regular -= ArgAttribute::NonNull;
|
|
|
|
}
|
|
|
|
if let Some(align) = self.pointee_align {
|
|
|
|
llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
|
|
|
|
idx.as_uint(),
|
2018-09-08 22:16:45 +00:00
|
|
|
align.bytes() as u32);
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
2017-10-03 07:45:07 +00:00
|
|
|
regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-03-10 04:25:57 +00:00
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
pub trait LlvmType {
|
2018-07-02 14:52:53 +00:00
|
|
|
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
impl LlvmType for Reg {
|
2018-07-02 14:52:53 +00:00
|
|
|
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
|
2017-03-10 04:25:57 +00:00
|
|
|
match self.kind {
|
2018-09-06 20:52:15 +00:00
|
|
|
RegKind::Integer => cx.type_ix(self.size.bits()),
|
2017-03-10 04:25:57 +00:00
|
|
|
RegKind::Float => {
|
|
|
|
match self.size.bits() {
|
2018-09-06 20:52:15 +00:00
|
|
|
32 => cx.type_f32(),
|
|
|
|
64 => cx.type_f64(),
|
2017-03-10 04:25:57 +00:00
|
|
|
_ => bug!("unsupported float: {:?}", self)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
RegKind::Vector => {
|
2018-09-06 20:52:15 +00:00
|
|
|
cx.type_vector(cx.type_i8(), self.size.bytes())
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
impl LlvmType for CastTarget {
|
2018-07-02 14:52:53 +00:00
|
|
|
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
|
2018-02-14 12:47:38 +00:00
|
|
|
let rest_ll_unit = self.rest.unit.llvm_type(cx);
|
2018-05-17 03:02:01 +00:00
|
|
|
let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
|
|
|
|
(0, 0)
|
|
|
|
} else {
|
|
|
|
(self.rest.total.bytes() / self.rest.unit.size.bytes(),
|
|
|
|
self.rest.total.bytes() % self.rest.unit.size.bytes())
|
|
|
|
};
|
2018-02-14 12:47:38 +00:00
|
|
|
|
|
|
|
if self.prefix.iter().all(|x| x.is_none()) {
|
|
|
|
// Simplify to a single unit when there is no prefix and size <= unit size
|
|
|
|
if self.rest.total <= self.rest.unit.size {
|
|
|
|
return rest_ll_unit;
|
|
|
|
}
|
2018-02-06 17:11:27 +00:00
|
|
|
|
2018-02-14 12:47:38 +00:00
|
|
|
// Simplify to array when all chunks are the same size and type
|
|
|
|
if rem_bytes == 0 {
|
2018-09-06 20:52:15 +00:00
|
|
|
return cx.type_array(rest_ll_unit, rest_count);
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-14 12:47:38 +00:00
|
|
|
|
|
|
|
// Create list of fields in the main structure
|
|
|
|
let mut args: Vec<_> =
|
|
|
|
self.prefix.iter().flat_map(|option_kind| option_kind.map(
|
2018-10-08 14:52:34 +00:00
|
|
|
|kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
|
2018-02-14 12:47:38 +00:00
|
|
|
.chain((0..rest_count).map(|_| rest_ll_unit))
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
// Append final integer
|
|
|
|
if rem_bytes != 0 {
|
|
|
|
// Only integers can be really split further.
|
|
|
|
assert_eq!(self.rest.unit.kind, RegKind::Integer);
|
2018-09-06 20:52:15 +00:00
|
|
|
args.push(cx.type_ix(rem_bytes * 8));
|
2018-02-14 12:47:38 +00:00
|
|
|
}
|
|
|
|
|
2018-09-06 20:52:15 +00:00
|
|
|
cx.type_struct(&args, false)
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
}
|
2016-11-16 22:36:08 +00:00
|
|
|
|
2018-07-10 10:28:39 +00:00
|
|
|
pub trait ArgTypeExt<'ll, 'tcx> {
|
|
|
|
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
|
2018-08-07 15:14:40 +00:00
|
|
|
fn store(
|
|
|
|
&self,
|
2018-10-05 13:08:49 +00:00
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-08-07 15:14:40 +00:00
|
|
|
val: &'ll Value,
|
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
|
|
|
);
|
2018-08-02 14:48:44 +00:00
|
|
|
fn store_fn_arg(
|
|
|
|
&self,
|
2018-10-05 13:08:49 +00:00
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-08-02 14:48:44 +00:00
|
|
|
idx: &mut usize,
|
2018-08-03 12:20:10 +00:00
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
2018-08-02 14:48:44 +00:00
|
|
|
);
|
2013-09-25 10:30:44 +00:00
|
|
|
}
|
|
|
|
|
2018-07-10 10:28:39 +00:00
|
|
|
impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
2019-02-08 13:53:55 +00:00
|
|
|
/// Gets the LLVM type for a place of the original Rust type of
|
2018-11-27 02:59:49 +00:00
|
|
|
/// this argument/return, i.e., the result of `type_of::type_of`.
|
2018-07-10 10:28:39 +00:00
|
|
|
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
|
2018-01-05 05:04:08 +00:00
|
|
|
self.layout.llvm_type(cx)
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
|
2019-02-08 13:53:55 +00:00
|
|
|
/// Stores a direct/indirect value described by this ArgType into a
|
2017-12-01 12:39:51 +00:00
|
|
|
/// place for the original Rust type of this argument/return.
|
2016-03-06 11:23:20 +00:00
|
|
|
/// Can be used for both storing formal arguments into Rust variables
|
|
|
|
/// or results of call/invoke instructions into their destinations.
|
2018-08-07 15:14:40 +00:00
|
|
|
fn store(
|
|
|
|
&self,
|
2018-10-05 13:08:49 +00:00
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-08-07 15:14:40 +00:00
|
|
|
val: &'ll Value,
|
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
|
|
|
) {
|
2016-03-06 11:23:20 +00:00
|
|
|
if self.is_ignore() {
|
|
|
|
return;
|
|
|
|
}
|
2018-08-03 14:32:21 +00:00
|
|
|
if self.is_sized_indirect() {
|
2018-09-08 22:16:45 +00:00
|
|
|
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
|
2018-05-28 15:12:55 +00:00
|
|
|
} else if self.is_unsized_indirect() {
|
|
|
|
bug!("unsized ArgType must be handled through store_fn_arg");
|
2017-10-10 17:54:50 +00:00
|
|
|
} else if let PassMode::Cast(cast) = self.mode {
|
2016-06-07 21:35:01 +00:00
|
|
|
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
|
|
|
|
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
|
|
|
|
let can_store_through_cast_ptr = false;
|
|
|
|
if can_store_through_cast_ptr {
|
2018-11-27 18:00:25 +00:00
|
|
|
let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
|
2018-10-05 13:08:49 +00:00
|
|
|
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
|
2018-09-08 22:16:45 +00:00
|
|
|
bx.store(val, cast_dst, self.layout.align.abi);
|
2016-06-07 21:35:01 +00:00
|
|
|
} else {
|
|
|
|
// The actual return type is a struct, but the ABI
|
|
|
|
// adaptation code has cast it into some scalar type. The
|
|
|
|
// code that follows is the only reliable way I have
|
|
|
|
// found to do a transform like i64 -> {i32,i32}.
|
|
|
|
// Basically we dump the data onto the stack then memcpy it.
|
|
|
|
//
|
|
|
|
// Other approaches I tried:
|
|
|
|
// - Casting rust ret pointer to the foreign type and using Store
|
|
|
|
// is (a) unsafe if size of foreign type > size of rust type and
|
|
|
|
// (b) runs afoul of strict aliasing rules, yielding invalid
|
|
|
|
// assembly under -O (specifically, the store gets removed).
|
|
|
|
// - Truncating foreign type to correct integral type and then
|
|
|
|
// bitcasting to the struct type yields invalid cast errors.
|
|
|
|
|
|
|
|
// We instead thus allocate some scratch space...
|
2018-11-27 18:00:25 +00:00
|
|
|
let scratch_size = cast.size(bx);
|
|
|
|
let scratch_align = cast.align(bx);
|
|
|
|
let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align);
|
2018-01-05 05:12:32 +00:00
|
|
|
bx.lifetime_start(llscratch, scratch_size);
|
2016-06-07 21:35:01 +00:00
|
|
|
|
|
|
|
// ...where we first store the value...
|
2018-01-05 05:12:32 +00:00
|
|
|
bx.store(val, llscratch, scratch_align);
|
2016-06-07 21:35:01 +00:00
|
|
|
|
|
|
|
// ...and then memcpy it to the intended destination.
|
2018-09-10 15:59:20 +00:00
|
|
|
bx.memcpy(
|
2018-10-05 13:08:49 +00:00
|
|
|
dst.llval,
|
2018-09-08 22:16:45 +00:00
|
|
|
self.layout.align.abi,
|
2018-10-05 13:08:49 +00:00
|
|
|
llscratch,
|
2018-09-10 15:59:20 +00:00
|
|
|
scratch_align,
|
2018-11-27 18:00:25 +00:00
|
|
|
bx.const_usize(self.layout.size.bytes()),
|
2018-09-10 15:59:20 +00:00
|
|
|
MemFlags::empty()
|
|
|
|
);
|
2016-06-07 21:35:01 +00:00
|
|
|
|
2018-01-05 05:12:32 +00:00
|
|
|
bx.lifetime_end(llscratch, scratch_size);
|
2016-03-06 11:23:20 +00:00
|
|
|
}
|
|
|
|
} else {
|
2018-01-05 05:12:32 +00:00
|
|
|
OperandValue::Immediate(val).store(bx, dst);
|
2016-03-06 11:23:20 +00:00
|
|
|
}
|
|
|
|
}
|
2016-03-06 14:30:21 +00:00
|
|
|
|
2018-08-02 14:48:44 +00:00
|
|
|
fn store_fn_arg(
|
|
|
|
&self,
|
2018-10-05 13:08:49 +00:00
|
|
|
bx: &mut Builder<'a, 'll, 'tcx>,
|
2018-08-02 14:48:44 +00:00
|
|
|
idx: &mut usize,
|
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
|
|
|
) {
|
2017-10-10 17:54:50 +00:00
|
|
|
let mut next = || {
|
2018-01-05 05:12:32 +00:00
|
|
|
let val = llvm::get_param(bx.llfn(), *idx as c_uint);
|
2017-10-10 17:54:50 +00:00
|
|
|
*idx += 1;
|
|
|
|
val
|
|
|
|
};
|
|
|
|
match self.mode {
|
|
|
|
PassMode::Ignore => {},
|
|
|
|
PassMode::Pair(..) => {
|
2018-01-05 05:12:32 +00:00
|
|
|
OperandValue::Pair(next(), next()).store(bx, dst);
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2018-08-03 14:32:21 +00:00
|
|
|
PassMode::Indirect(_, Some(_)) => {
|
2018-09-08 22:16:45 +00:00
|
|
|
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
|
2018-05-28 15:12:55 +00:00
|
|
|
}
|
2018-08-03 14:32:21 +00:00
|
|
|
PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
|
2018-01-05 05:12:32 +00:00
|
|
|
self.store(bx, next(), dst);
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2016-03-06 14:30:21 +00:00
|
|
|
}
|
|
|
|
}
|
2013-01-25 22:56:56 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 13:47:22 +00:00
|
|
|
impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
|
|
|
fn store_fn_arg(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-09-20 13:47:22 +00:00
|
|
|
ty: &ArgType<'tcx, Ty<'tcx>>,
|
|
|
|
idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>
|
|
|
|
) {
|
|
|
|
ty.store_fn_arg(self, idx, dst)
|
|
|
|
}
|
|
|
|
fn store_arg_ty(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-09-20 13:47:22 +00:00
|
|
|
ty: &ArgType<'tcx, Ty<'tcx>>,
|
|
|
|
val: &'ll Value,
|
|
|
|
dst: PlaceRef<'tcx, &'ll Value>
|
|
|
|
) {
|
|
|
|
ty.store(self, val, dst)
|
|
|
|
}
|
|
|
|
fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
|
2018-11-27 18:00:25 +00:00
|
|
|
ty.memory_ty(self)
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-10 10:28:39 +00:00
|
|
|
pub trait FnTypeExt<'tcx> {
|
2018-10-08 14:52:34 +00:00
|
|
|
fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self;
|
2018-07-10 10:28:39 +00:00
|
|
|
fn new(cx: &CodegenCx<'ll, 'tcx>,
|
2018-04-25 13:45:29 +00:00
|
|
|
sig: ty::FnSig<'tcx>,
|
|
|
|
extra_args: &[Ty<'tcx>]) -> Self;
|
2018-07-10 10:28:39 +00:00
|
|
|
fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
|
2018-04-25 13:45:29 +00:00
|
|
|
sig: ty::FnSig<'tcx>,
|
|
|
|
extra_args: &[Ty<'tcx>]) -> Self;
|
2018-07-05 21:17:13 +00:00
|
|
|
fn new_internal(
|
2018-07-10 10:28:39 +00:00
|
|
|
cx: &CodegenCx<'ll, 'tcx>,
|
2018-07-05 21:17:13 +00:00
|
|
|
sig: ty::FnSig<'tcx>,
|
|
|
|
extra_args: &[Ty<'tcx>],
|
|
|
|
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
|
|
|
|
) -> Self;
|
2018-04-25 13:45:29 +00:00
|
|
|
fn adjust_for_abi(&mut self,
|
2018-07-10 10:28:39 +00:00
|
|
|
cx: &CodegenCx<'ll, 'tcx>,
|
2018-04-25 13:45:29 +00:00
|
|
|
abi: Abi);
|
2018-07-10 10:28:39 +00:00
|
|
|
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
|
2018-10-11 15:50:00 +00:00
|
|
|
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
|
2018-04-25 13:45:29 +00:00
|
|
|
fn llvm_cconv(&self) -> llvm::CallConv;
|
2018-07-10 10:28:39 +00:00
|
|
|
fn apply_attrs_llfn(&self, llfn: &'ll Value);
|
2018-10-05 13:08:49 +00:00
|
|
|
fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
|
2013-05-21 19:25:44 +00:00
|
|
|
}
|
2013-04-18 22:53:29 +00:00
|
|
|
|
2018-07-10 10:28:39 +00:00
|
|
|
impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
|
2018-10-08 14:52:34 +00:00
|
|
|
fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self {
|
2018-10-06 14:00:35 +00:00
|
|
|
let sig = instance.fn_sig(cx.tcx);
|
2018-03-03 13:23:28 +00:00
|
|
|
let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
|
2018-01-05 05:04:08 +00:00
|
|
|
FnType::new(cx, sig, &[])
|
2017-05-10 23:01:25 +00:00
|
|
|
}
|
|
|
|
|
2018-07-10 10:28:39 +00:00
|
|
|
fn new(cx: &CodegenCx<'ll, 'tcx>,
|
2018-10-08 14:52:34 +00:00
|
|
|
sig: ty::FnSig<'tcx>,
|
|
|
|
extra_args: &[Ty<'tcx>]) -> Self {
|
2018-07-05 21:17:13 +00:00
|
|
|
FnType::new_internal(cx, sig, extra_args, |ty, _| {
|
|
|
|
ArgType::new(cx.layout_of(ty))
|
|
|
|
})
|
2016-03-06 10:38:46 +00:00
|
|
|
}
|
|
|
|
|
2018-07-10 10:28:39 +00:00
|
|
|
fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
|
2018-10-08 14:52:34 +00:00
|
|
|
sig: ty::FnSig<'tcx>,
|
|
|
|
extra_args: &[Ty<'tcx>]) -> Self {
|
2018-07-05 21:17:13 +00:00
|
|
|
FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| {
|
|
|
|
let mut layout = cx.layout_of(ty);
|
|
|
|
// Don't pass the vtable, it's not an argument of the virtual fn.
|
2018-09-20 07:16:10 +00:00
|
|
|
// Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
|
|
|
|
// or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
|
2018-07-05 21:17:13 +00:00
|
|
|
if arg_idx == Some(0) {
|
2018-09-20 07:16:10 +00:00
|
|
|
let fat_pointer_ty = if layout.is_unsized() {
|
|
|
|
// unsized `self` is passed as a pointer to `self`
|
|
|
|
// FIXME (mikeyhew) change this to use &own if it is ever added to the language
|
|
|
|
cx.tcx.mk_mut_ptr(layout.ty)
|
2018-09-11 14:32:18 +00:00
|
|
|
} else {
|
2018-09-20 07:16:10 +00:00
|
|
|
match layout.abi {
|
|
|
|
LayoutAbi::ScalarPair(..) => (),
|
|
|
|
_ => bug!("receiver type has unsupported layout: {:?}", layout)
|
|
|
|
}
|
|
|
|
|
2018-11-01 22:28:52 +00:00
|
|
|
// In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
|
|
|
|
// with a Scalar (not ScalarPair) ABI. This is a hack that is understood
|
|
|
|
// elsewhere in the compiler as a method on a `dyn Trait`.
|
|
|
|
// To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
|
|
|
|
// get a built-in pointer type
|
2018-09-20 07:16:10 +00:00
|
|
|
let mut fat_pointer_layout = layout;
|
|
|
|
'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
|
|
|
|
&& !fat_pointer_layout.ty.is_region_ptr()
|
|
|
|
{
|
|
|
|
'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
|
|
|
|
let field_layout = fat_pointer_layout.field(cx, i);
|
|
|
|
|
|
|
|
if !field_layout.is_zst() {
|
|
|
|
fat_pointer_layout = field_layout;
|
|
|
|
continue 'descend_newtypes
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
|
|
|
|
}
|
|
|
|
|
|
|
|
fat_pointer_layout.ty
|
2018-09-11 14:32:18 +00:00
|
|
|
};
|
2018-09-20 07:16:10 +00:00
|
|
|
|
|
|
|
// we now have a type like `*mut RcBox<dyn Trait>`
|
|
|
|
// change its layout to that of `*mut ()`, a thin pointer, but keep the same type
|
|
|
|
// this is understood as a special case elsewhere in the compiler
|
|
|
|
let unit_pointer_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_unit());
|
|
|
|
layout = cx.layout_of(unit_pointer_ty);
|
|
|
|
layout.ty = fat_pointer_ty;
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2018-07-05 21:17:13 +00:00
|
|
|
ArgType::new(layout)
|
|
|
|
})
|
2017-03-08 16:33:21 +00:00
|
|
|
}
|
|
|
|
|
2018-07-05 21:17:13 +00:00
|
|
|
fn new_internal(
|
2018-07-10 10:28:39 +00:00
|
|
|
cx: &CodegenCx<'ll, 'tcx>,
|
2018-07-05 21:17:13 +00:00
|
|
|
sig: ty::FnSig<'tcx>,
|
|
|
|
extra_args: &[Ty<'tcx>],
|
|
|
|
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
|
|
|
|
) -> Self {
|
|
|
|
debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
|
2017-05-10 23:01:25 +00:00
|
|
|
|
2016-02-23 20:43:04 +00:00
|
|
|
use self::Abi::*;
|
2018-04-25 13:45:29 +00:00
|
|
|
let conv = match cx.sess().target.target.adjust_abi(sig.abi) {
|
2016-03-06 10:38:46 +00:00
|
|
|
RustIntrinsic | PlatformIntrinsic |
|
2018-04-25 13:45:29 +00:00
|
|
|
Rust | RustCall => Conv::C,
|
2016-02-23 19:55:19 +00:00
|
|
|
|
2018-10-08 14:59:24 +00:00
|
|
|
// It's the ABI's job to select this, not ours.
|
2016-03-28 23:46:02 +00:00
|
|
|
System => bug!("system abi should be selected elsewhere"),
|
2016-02-23 19:55:19 +00:00
|
|
|
|
2018-04-25 13:45:29 +00:00
|
|
|
Stdcall => Conv::X86Stdcall,
|
|
|
|
Fastcall => Conv::X86Fastcall,
|
|
|
|
Vectorcall => Conv::X86VectorCall,
|
|
|
|
Thiscall => Conv::X86ThisCall,
|
|
|
|
C => Conv::C,
|
|
|
|
Unadjusted => Conv::C,
|
|
|
|
Win64 => Conv::X86_64Win64,
|
|
|
|
SysV64 => Conv::X86_64SysV,
|
|
|
|
Aapcs => Conv::ArmAapcs,
|
|
|
|
PtxKernel => Conv::PtxKernel,
|
|
|
|
Msp430Interrupt => Conv::Msp430Intr,
|
|
|
|
X86Interrupt => Conv::X86Intr,
|
2018-07-02 03:42:00 +00:00
|
|
|
AmdGpuKernel => Conv::AmdGpuKernel,
|
2016-02-23 19:55:19 +00:00
|
|
|
|
|
|
|
// These API constants ought to be more specific...
|
2018-04-25 13:45:29 +00:00
|
|
|
Cdecl => Conv::C,
|
2016-02-23 19:55:19 +00:00
|
|
|
};
|
|
|
|
|
2016-11-29 02:35:38 +00:00
|
|
|
let mut inputs = sig.inputs();
|
2017-02-13 08:51:06 +00:00
|
|
|
let extra_args = if sig.abi == RustCall {
|
2016-11-29 03:25:33 +00:00
|
|
|
assert!(!sig.variadic && extra_args.is_empty());
|
2016-02-24 17:37:22 +00:00
|
|
|
|
2016-11-29 02:35:38 +00:00
|
|
|
match sig.inputs().last().unwrap().sty {
|
2018-08-22 00:35:02 +00:00
|
|
|
ty::Tuple(ref tupled_arguments) => {
|
2016-11-29 02:35:38 +00:00
|
|
|
inputs = &sig.inputs()[0..sig.inputs().len() - 1];
|
2017-03-30 03:52:45 +00:00
|
|
|
tupled_arguments
|
2016-02-24 17:37:22 +00:00
|
|
|
}
|
|
|
|
_ => {
|
2016-03-28 23:46:02 +00:00
|
|
|
bug!("argument to function with \"rust-call\" ABI \
|
|
|
|
is not a tuple");
|
2016-02-24 17:37:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2016-11-29 03:25:33 +00:00
|
|
|
assert!(sig.variadic || extra_args.is_empty());
|
2016-02-24 17:37:22 +00:00
|
|
|
extra_args
|
|
|
|
};
|
|
|
|
|
2018-01-05 05:04:08 +00:00
|
|
|
let target = &cx.sess().target.target;
|
2016-03-18 12:28:11 +00:00
|
|
|
let win_x64_gnu = target.target_os == "windows"
|
|
|
|
&& target.arch == "x86_64"
|
|
|
|
&& target.target_env == "gnu";
|
2016-09-09 21:00:23 +00:00
|
|
|
let linux_s390x = target.target_os == "linux"
|
|
|
|
&& target.arch == "s390x"
|
|
|
|
&& target.target_env == "gnu";
|
2018-12-23 19:33:52 +00:00
|
|
|
let linux_sparc64 = target.target_os == "linux"
|
|
|
|
&& target.arch == "sparc64"
|
|
|
|
&& target.target_env == "gnu";
|
2017-02-13 08:51:06 +00:00
|
|
|
let rust_abi = match sig.abi {
|
2016-03-18 12:28:11 +00:00
|
|
|
RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
|
|
|
|
_ => false
|
|
|
|
};
|
|
|
|
|
2017-10-03 07:45:07 +00:00
|
|
|
// Handle safe Rust thin and fat pointers.
|
2017-10-10 17:54:50 +00:00
|
|
|
let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
|
|
|
|
scalar: &layout::Scalar,
|
2018-04-18 13:01:26 +00:00
|
|
|
layout: TyLayout<'tcx, Ty<'tcx>>,
|
2017-10-10 17:54:50 +00:00
|
|
|
offset: Size,
|
|
|
|
is_return: bool| {
|
|
|
|
// Booleans are always an i1 that needs to be zero-extended.
|
|
|
|
if scalar.is_bool() {
|
|
|
|
attrs.set(ArgAttribute::ZExt);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only pointer types handled below.
|
|
|
|
if scalar.value != layout::Pointer {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-04-05 21:21:47 +00:00
|
|
|
if scalar.valid_range.start() < scalar.valid_range.end() {
|
|
|
|
if *scalar.valid_range.start() > 0 {
|
2017-10-10 17:54:50 +00:00
|
|
|
attrs.set(ArgAttribute::NonNull);
|
2017-10-06 07:25:35 +00:00
|
|
|
}
|
2016-02-25 23:10:40 +00:00
|
|
|
}
|
|
|
|
|
2018-01-05 05:04:08 +00:00
|
|
|
if let Some(pointee) = layout.pointee_info_at(cx, offset) {
|
2017-10-04 23:21:10 +00:00
|
|
|
if let Some(kind) = pointee.safe {
|
2017-10-10 17:54:50 +00:00
|
|
|
attrs.pointee_size = pointee.size;
|
|
|
|
attrs.pointee_align = Some(pointee.align);
|
2016-02-25 23:10:40 +00:00
|
|
|
|
2017-10-04 23:21:10 +00:00
|
|
|
// `Box` pointer parameters never alias because ownership is transferred
|
2017-10-03 07:45:07 +00:00
|
|
|
// `&mut` pointer parameters never alias other parameters,
|
|
|
|
// or mutable global data
|
|
|
|
//
|
|
|
|
// `&T` where `T` contains no `UnsafeCell<U>` is immutable,
|
|
|
|
// and can be marked as both `readonly` and `noalias`, as
|
|
|
|
// LLVM's definition of `noalias` is based solely on memory
|
|
|
|
// dependencies rather than pointer equality
|
2017-10-04 23:21:10 +00:00
|
|
|
let no_alias = match kind {
|
|
|
|
PointerKind::Shared => false,
|
2017-11-25 12:08:59 +00:00
|
|
|
PointerKind::UniqueOwned => true,
|
|
|
|
PointerKind::Frozen |
|
2017-10-04 23:21:10 +00:00
|
|
|
PointerKind::UniqueBorrowed => !is_return
|
|
|
|
};
|
|
|
|
if no_alias {
|
2017-10-10 17:54:50 +00:00
|
|
|
attrs.set(ArgAttribute::NoAlias);
|
2017-10-03 07:45:07 +00:00
|
|
|
}
|
2016-02-25 23:10:40 +00:00
|
|
|
|
2017-10-04 23:21:10 +00:00
|
|
|
if kind == PointerKind::Frozen && !is_return {
|
2017-10-10 17:54:50 +00:00
|
|
|
attrs.set(ArgAttribute::ReadOnly);
|
2017-10-03 07:45:07 +00:00
|
|
|
}
|
2016-02-25 23:10:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-07-05 21:17:13 +00:00
|
|
|
let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
|
|
|
|
let is_return = arg_idx.is_none();
|
|
|
|
let mut arg = mk_arg_type(ty, arg_idx);
|
2017-10-03 07:45:07 +00:00
|
|
|
if arg.layout.is_zst() {
|
|
|
|
// For some forsaken reason, x86_64-pc-windows-gnu
|
|
|
|
// doesn't ignore zero-sized struct arguments.
|
2018-12-23 19:33:52 +00:00
|
|
|
// The same is true for s390x-unknown-linux-gnu
|
|
|
|
// and sparc64-unknown-linux-gnu.
|
|
|
|
if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) {
|
2017-10-10 17:54:50 +00:00
|
|
|
arg.mode = PassMode::Ignore;
|
2017-09-20 02:16:06 +00:00
|
|
|
}
|
2017-10-03 07:45:07 +00:00
|
|
|
}
|
|
|
|
|
2017-10-10 17:54:50 +00:00
|
|
|
// FIXME(eddyb) other ABIs don't have logic for scalar pairs.
|
|
|
|
if !is_return && rust_abi {
|
|
|
|
if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
|
|
|
|
let mut a_attrs = ArgAttributes::new();
|
|
|
|
let mut b_attrs = ArgAttributes::new();
|
|
|
|
adjust_for_rust_scalar(&mut a_attrs,
|
|
|
|
a,
|
|
|
|
arg.layout,
|
2018-05-20 12:14:39 +00:00
|
|
|
Size::ZERO,
|
2017-10-10 17:54:50 +00:00
|
|
|
false);
|
|
|
|
adjust_for_rust_scalar(&mut b_attrs,
|
|
|
|
b,
|
|
|
|
arg.layout,
|
2018-09-08 22:16:45 +00:00
|
|
|
a.value.size(cx).align_to(b.value.align(cx).abi),
|
2017-10-10 17:54:50 +00:00
|
|
|
false);
|
|
|
|
arg.mode = PassMode::Pair(a_attrs, b_attrs);
|
|
|
|
return arg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
|
|
|
|
if let PassMode::Direct(ref mut attrs) = arg.mode {
|
|
|
|
adjust_for_rust_scalar(attrs,
|
|
|
|
scalar,
|
|
|
|
arg.layout,
|
2018-05-20 12:14:39 +00:00
|
|
|
Size::ZERO,
|
2017-10-10 17:54:50 +00:00
|
|
|
is_return);
|
|
|
|
}
|
2016-02-24 09:09:25 +00:00
|
|
|
}
|
2017-10-03 07:45:07 +00:00
|
|
|
|
|
|
|
arg
|
|
|
|
};
|
2016-02-24 09:09:25 +00:00
|
|
|
|
2018-07-05 21:17:13 +00:00
|
|
|
let mut fn_ty = FnType {
|
|
|
|
ret: arg_of(sig.output(), None),
|
|
|
|
args: inputs.iter().chain(extra_args).enumerate().map(|(i, ty)| {
|
|
|
|
arg_of(ty, Some(i))
|
2017-10-03 07:45:07 +00:00
|
|
|
}).collect(),
|
2016-11-29 03:25:33 +00:00
|
|
|
variadic: sig.variadic,
|
2018-04-25 13:45:29 +00:00
|
|
|
conv,
|
2018-07-05 21:17:13 +00:00
|
|
|
};
|
|
|
|
fn_ty.adjust_for_abi(cx, sig.abi);
|
|
|
|
fn_ty
|
2016-03-06 10:38:46 +00:00
|
|
|
}
|
2016-02-23 19:55:19 +00:00
|
|
|
|
2017-03-10 04:25:57 +00:00
|
|
|
fn adjust_for_abi(&mut self,
|
2018-07-10 10:28:39 +00:00
|
|
|
cx: &CodegenCx<'ll, 'tcx>,
|
2017-10-10 17:54:50 +00:00
|
|
|
abi: Abi) {
|
2016-12-23 08:05:41 +00:00
|
|
|
if abi == Abi::Unadjusted { return }
|
|
|
|
|
2016-03-06 10:38:46 +00:00
|
|
|
if abi == Abi::Rust || abi == Abi::RustCall ||
|
|
|
|
abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
|
2018-04-25 13:45:29 +00:00
|
|
|
let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
|
2017-10-10 17:54:50 +00:00
|
|
|
if arg.is_ignore() { return; }
|
|
|
|
|
2017-09-14 19:50:18 +00:00
|
|
|
match arg.layout.abi {
|
2017-09-17 01:42:22 +00:00
|
|
|
layout::Abi::Aggregate { .. } => {}
|
rustc: SIMD types use pointers in Rust's ABI
This commit changes the ABI of SIMD types in the "Rust" ABI to unconditionally
be passed via pointers instead of being passed as immediates. This should fix a
longstanding issue, #44367, where SIMD-using programs ended up showing very odd
behavior at runtime because the ABI between functions was mismatched.
As a bit of a recap, this is sort of an LLVM bug and sort of an LLVM feature
(today's behavior). LLVM will generate code for a function solely looking at the
function it's generating, including calls to other functions. Let's then say
you've got something that looks like:
```llvm
define void @foo() { ; no target features enabled
call void @bar(<i64 x 4> zeroinitializer)
ret void
}
define void @bar(<i64 x 4>) #0 { ; enables the AVX feature
...
}
```
LLVM will codegen the call to `bar` *without* using AVX registers becauase `foo`
doesn't have access to these registers. Instead it's generated with emulation
that uses two 128-bit registers. The `bar` function, on the other hand, will
expect its argument in an AVX register (as it has AVX enabled). This means we've
got a codegen problem!
Comments on #44367 have some more contexutal information but the crux of the
issue is that if we want SIMD to work in general we'll need to ensure that
whenever a function calls another they ABI of the arguments being passed is in
agreement.
One possible solution to this would be to insert "shim functions" where whenever
a `target_feature` mismatch is detected the compiler inserts a shim function
where you pass arguments via memory to the shim and then the shim loads the
values and calls the target function (where the shim and the target have the
same target features enabled). This unfortunately is quite nontrivial to
implement in rustc today (especially when accounting for function pointers and
such).
This commit takes a different solution, *always* passing SIMD arguments through
memory instead of passing as immediates. This strategy solves the problem at the
LLVM layer because the ABI between two functions never uses SIMD registers. This
also shouldn't be a hit to performance because SIMD performance is thought to
often rely on inlining anyway, where a `call` instruction, even if using SIMD
registers, would be disastrous to performance regardless. LLVM should then be
more than capable of fixing all our memory usage to use registers instead after
enough inlining has been performed.
Note that there's a few caveats to this commit though:
* The "platform intrinsic" ABI is omitted from "always pass via memory". This
ABI is used to define intrinsics like `simd_shuffle4` where LLVM and rustc
need to have the arguments as an immediate.
* Additionally this commit does *not* fix the `extern` ("C") ABI. This means
that the bug in #44367 can still happen when using non-Rust-ABI functions. My
hope is that before stabilization we can ban and/or warn about SIMD types in
these functions (as AFAIK there's not much motivation to belong there anyway),
but I'll leave that for a later commit and if this is merged I'll file a
follow-up issue.
All in all this...
Closes #44367
2018-01-25 16:00:22 +00:00
|
|
|
|
|
|
|
// This is a fun case! The gist of what this is doing is
|
|
|
|
// that we want callers and callees to always agree on the
|
|
|
|
// ABI of how they pass SIMD arguments. If we were to *not*
|
|
|
|
// make these arguments indirect then they'd be immediates
|
|
|
|
// in LLVM, which means that they'd used whatever the
|
|
|
|
// appropriate ABI is for the callee and the caller. That
|
|
|
|
// means, for example, if the caller doesn't have AVX
|
|
|
|
// enabled but the callee does, then passing an AVX argument
|
|
|
|
// across this boundary would cause corrupt data to show up.
|
|
|
|
//
|
|
|
|
// This problem is fixed by unconditionally passing SIMD
|
|
|
|
// arguments through memory between callers and callees
|
|
|
|
// which should get them all to agree on ABI regardless of
|
|
|
|
// target feature sets. Some more information about this
|
|
|
|
// issue can be found in #44367.
|
|
|
|
//
|
|
|
|
// Note that the platform intrinsic ABI is exempt here as
|
|
|
|
// that's how we connect up to LLVM and it's unstable
|
|
|
|
// anyway, we control all calls to it in libstd.
|
2018-10-13 00:04:31 +00:00
|
|
|
layout::Abi::Vector { .. }
|
|
|
|
if abi != Abi::PlatformIntrinsic &&
|
|
|
|
cx.sess().target.target.options.simd_types_indirect =>
|
|
|
|
{
|
rustc: SIMD types use pointers in Rust's ABI
This commit changes the ABI of SIMD types in the "Rust" ABI to unconditionally
be passed via pointers instead of being passed as immediates. This should fix a
longstanding issue, #44367, where SIMD-using programs ended up showing very odd
behavior at runtime because the ABI between functions was mismatched.
As a bit of a recap, this is sort of an LLVM bug and sort of an LLVM feature
(today's behavior). LLVM will generate code for a function solely looking at the
function it's generating, including calls to other functions. Let's then say
you've got something that looks like:
```llvm
define void @foo() { ; no target features enabled
call void @bar(<i64 x 4> zeroinitializer)
ret void
}
define void @bar(<i64 x 4>) #0 { ; enables the AVX feature
...
}
```
LLVM will codegen the call to `bar` *without* using AVX registers becauase `foo`
doesn't have access to these registers. Instead it's generated with emulation
that uses two 128-bit registers. The `bar` function, on the other hand, will
expect its argument in an AVX register (as it has AVX enabled). This means we've
got a codegen problem!
Comments on #44367 have some more contexutal information but the crux of the
issue is that if we want SIMD to work in general we'll need to ensure that
whenever a function calls another they ABI of the arguments being passed is in
agreement.
One possible solution to this would be to insert "shim functions" where whenever
a `target_feature` mismatch is detected the compiler inserts a shim function
where you pass arguments via memory to the shim and then the shim loads the
values and calls the target function (where the shim and the target have the
same target features enabled). This unfortunately is quite nontrivial to
implement in rustc today (especially when accounting for function pointers and
such).
This commit takes a different solution, *always* passing SIMD arguments through
memory instead of passing as immediates. This strategy solves the problem at the
LLVM layer because the ABI between two functions never uses SIMD registers. This
also shouldn't be a hit to performance because SIMD performance is thought to
often rely on inlining anyway, where a `call` instruction, even if using SIMD
registers, would be disastrous to performance regardless. LLVM should then be
more than capable of fixing all our memory usage to use registers instead after
enough inlining has been performed.
Note that there's a few caveats to this commit though:
* The "platform intrinsic" ABI is omitted from "always pass via memory". This
ABI is used to define intrinsics like `simd_shuffle4` where LLVM and rustc
need to have the arguments as an immediate.
* Additionally this commit does *not* fix the `extern` ("C") ABI. This means
that the bug in #44367 can still happen when using non-Rust-ABI functions. My
hope is that before stabilization we can ban and/or warn about SIMD types in
these functions (as AFAIK there's not much motivation to belong there anyway),
but I'll leave that for a later commit and if this is merged I'll file a
follow-up issue.
All in all this...
Closes #44367
2018-01-25 16:00:22 +00:00
|
|
|
arg.make_indirect();
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-14 19:50:18 +00:00
|
|
|
_ => return
|
2016-03-17 23:01:47 +00:00
|
|
|
}
|
|
|
|
|
2017-09-22 19:44:40 +00:00
|
|
|
let size = arg.layout.size;
|
2018-08-03 14:32:21 +00:00
|
|
|
if arg.layout.is_unsized() || size > layout::Pointer.size(cx) {
|
2017-09-22 19:44:40 +00:00
|
|
|
arg.make_indirect();
|
2017-03-10 04:25:57 +00:00
|
|
|
} else {
|
2016-02-24 17:37:22 +00:00
|
|
|
// We want to pass small aggregates as immediates, but using
|
|
|
|
// a LLVM aggregate type for this leads to bad optimizations,
|
|
|
|
// so we pick an appropriately sized integer type instead.
|
2017-06-01 18:50:53 +00:00
|
|
|
arg.cast_to(Reg {
|
2017-03-10 04:25:57 +00:00
|
|
|
kind: RegKind::Integer,
|
|
|
|
size
|
|
|
|
});
|
2016-02-24 17:37:22 +00:00
|
|
|
}
|
|
|
|
};
|
2017-10-10 17:54:50 +00:00
|
|
|
fixup(&mut self.ret);
|
2016-03-06 10:38:46 +00:00
|
|
|
for arg in &mut self.args {
|
2016-02-24 17:37:22 +00:00
|
|
|
fixup(arg);
|
|
|
|
}
|
2018-08-03 14:32:21 +00:00
|
|
|
if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
|
2017-10-10 17:54:50 +00:00
|
|
|
attrs.set(ArgAttribute::StructRet);
|
2016-02-25 17:35:40 +00:00
|
|
|
}
|
2016-03-06 10:38:46 +00:00
|
|
|
return;
|
2016-02-24 17:37:22 +00:00
|
|
|
}
|
|
|
|
|
2018-04-18 13:01:26 +00:00
|
|
|
if let Err(msg) = self.adjust_for_cabi(cx, abi) {
|
|
|
|
cx.sess().fatal(&msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-10 10:28:39 +00:00
|
|
|
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
|
2018-07-16 17:35:45 +00:00
|
|
|
let args_capacity: usize = self.args.iter().map(|arg|
|
|
|
|
if arg.pad.is_some() { 1 } else { 0 } +
|
|
|
|
if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
|
|
|
|
).sum();
|
|
|
|
let mut llargument_tys = Vec::with_capacity(
|
2018-08-03 14:32:21 +00:00
|
|
|
if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity
|
2018-07-16 17:35:45 +00:00
|
|
|
);
|
2016-02-23 19:55:19 +00:00
|
|
|
|
2017-10-10 17:54:50 +00:00
|
|
|
let llreturn_ty = match self.ret.mode {
|
2018-09-06 20:52:15 +00:00
|
|
|
PassMode::Ignore => cx.type_void(),
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Direct(_) | PassMode::Pair(..) => {
|
2018-01-05 05:04:08 +00:00
|
|
|
self.ret.layout.immediate_llvm_type(cx)
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2018-01-05 05:04:08 +00:00
|
|
|
PassMode::Cast(cast) => cast.llvm_type(cx),
|
2018-08-03 14:32:21 +00:00
|
|
|
PassMode::Indirect(..) => {
|
2018-09-06 20:52:15 +00:00
|
|
|
llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
|
|
|
|
cx.type_void()
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2016-02-23 19:55:19 +00:00
|
|
|
};
|
|
|
|
|
2017-10-10 17:54:50 +00:00
|
|
|
for arg in &self.args {
|
|
|
|
// add padding
|
|
|
|
if let Some(ty) = arg.pad {
|
2018-01-05 05:04:08 +00:00
|
|
|
llargument_tys.push(ty.llvm_type(cx));
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2016-02-23 19:55:19 +00:00
|
|
|
|
2017-10-10 17:54:50 +00:00
|
|
|
let llarg_ty = match arg.mode {
|
|
|
|
PassMode::Ignore => continue,
|
2018-01-05 05:04:08 +00:00
|
|
|
PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Pair(..) => {
|
Store scalar pair bools as i8 in memory
We represent `bool` as `i1` in a `ScalarPair`, unlike other aggregates,
to optimize IR for checked operators and the like. With this patch, we
still do so when the pair is an immediate value, but we use the `i8`
memory type when the value is loaded or stored as an LLVM aggregate.
So `(bool, bool)` looks like an `{ i1, i1 }` immediate, but `{ i8, i8 }`
in memory. When a pair is a direct function argument, `PassMode::Pair`,
it is still passed using the immediate `i1` type, but as a return value
it will use the `i8` memory type. Also, `bool`-like` enum tags will now
use scalar pairs when possible, where they were previously excluded due
to optimization issues.
2018-06-15 22:47:54 +00:00
|
|
|
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
|
|
|
|
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
|
2017-09-20 02:16:06 +00:00
|
|
|
continue;
|
|
|
|
}
|
2018-08-03 14:32:21 +00:00
|
|
|
PassMode::Indirect(_, Some(_)) => {
|
2018-05-28 15:12:55 +00:00
|
|
|
let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
|
|
|
|
let ptr_layout = cx.layout_of(ptr_ty);
|
|
|
|
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
|
|
|
|
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
|
|
|
|
continue;
|
|
|
|
}
|
2018-01-05 05:04:08 +00:00
|
|
|
PassMode::Cast(cast) => cast.llvm_type(cx),
|
2018-09-06 20:52:15 +00:00
|
|
|
PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
|
2017-10-10 17:54:50 +00:00
|
|
|
};
|
|
|
|
llargument_tys.push(llarg_ty);
|
2016-02-23 19:55:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if self.variadic {
|
2018-09-06 20:52:15 +00:00
|
|
|
cx.type_variadic_func(&llargument_tys, llreturn_ty)
|
2016-02-23 19:55:19 +00:00
|
|
|
} else {
|
2018-09-06 20:52:15 +00:00
|
|
|
cx.type_func(&llargument_tys, llreturn_ty)
|
2016-02-23 19:55:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-11 15:50:00 +00:00
|
|
|
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMPointerType(self.llvm_type(cx),
|
|
|
|
cx.data_layout().instruction_address_space as c_uint)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-25 13:45:29 +00:00
|
|
|
fn llvm_cconv(&self) -> llvm::CallConv {
|
|
|
|
match self.conv {
|
|
|
|
Conv::C => llvm::CCallConv,
|
2018-07-02 03:42:00 +00:00
|
|
|
Conv::AmdGpuKernel => llvm::AmdGpuKernel,
|
2018-04-25 13:45:29 +00:00
|
|
|
Conv::ArmAapcs => llvm::ArmAapcsCallConv,
|
|
|
|
Conv::Msp430Intr => llvm::Msp430Intr,
|
|
|
|
Conv::PtxKernel => llvm::PtxKernel,
|
|
|
|
Conv::X86Fastcall => llvm::X86FastcallCallConv,
|
|
|
|
Conv::X86Intr => llvm::X86_Intr,
|
|
|
|
Conv::X86Stdcall => llvm::X86StdcallCallConv,
|
|
|
|
Conv::X86ThisCall => llvm::X86_ThisCall,
|
|
|
|
Conv::X86VectorCall => llvm::X86_VectorCall,
|
|
|
|
Conv::X86_64SysV => llvm::X86_64_SysV,
|
|
|
|
Conv::X86_64Win64 => llvm::X86_64_Win64,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-10 10:28:39 +00:00
|
|
|
fn apply_attrs_llfn(&self, llfn: &'ll Value) {
|
2017-10-10 17:54:50 +00:00
|
|
|
let mut i = 0;
|
|
|
|
let mut apply = |attrs: &ArgAttributes| {
|
|
|
|
attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
|
|
|
|
i += 1;
|
2017-09-20 02:16:06 +00:00
|
|
|
};
|
2017-10-10 17:54:50 +00:00
|
|
|
match self.ret.mode {
|
|
|
|
PassMode::Direct(ref attrs) => {
|
|
|
|
attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
|
|
|
|
}
|
2018-08-03 14:32:21 +00:00
|
|
|
PassMode::Indirect(ref attrs, _) => apply(attrs),
|
2017-10-10 17:54:50 +00:00
|
|
|
_ => {}
|
|
|
|
}
|
2017-09-20 02:16:06 +00:00
|
|
|
for arg in &self.args {
|
2017-10-10 17:54:50 +00:00
|
|
|
if arg.pad.is_some() {
|
|
|
|
apply(&ArgAttributes::new());
|
|
|
|
}
|
|
|
|
match arg.mode {
|
|
|
|
PassMode::Ignore => {}
|
|
|
|
PassMode::Direct(ref attrs) |
|
2018-08-03 14:32:21 +00:00
|
|
|
PassMode::Indirect(ref attrs, None) => apply(attrs),
|
|
|
|
PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
|
2018-05-28 15:12:55 +00:00
|
|
|
apply(attrs);
|
|
|
|
apply(extra_attrs);
|
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Pair(ref a, ref b) => {
|
|
|
|
apply(a);
|
|
|
|
apply(b);
|
2017-09-20 02:16:06 +00:00
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Cast(_) => apply(&ArgAttributes::new()),
|
2017-09-20 02:16:06 +00:00
|
|
|
}
|
2016-02-25 23:10:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
|
2017-10-10 17:54:50 +00:00
|
|
|
let mut i = 0;
|
|
|
|
let mut apply = |attrs: &ArgAttributes| {
|
|
|
|
attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
|
|
|
|
i += 1;
|
2017-09-20 02:16:06 +00:00
|
|
|
};
|
2017-10-10 17:54:50 +00:00
|
|
|
match self.ret.mode {
|
|
|
|
PassMode::Direct(ref attrs) => {
|
|
|
|
attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
|
|
|
|
}
|
2018-08-03 14:32:21 +00:00
|
|
|
PassMode::Indirect(ref attrs, _) => apply(attrs),
|
2017-10-10 17:54:50 +00:00
|
|
|
_ => {}
|
|
|
|
}
|
2018-04-22 16:40:54 +00:00
|
|
|
if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi {
|
|
|
|
// If the value is a boolean, the range is 0..2 and that ultimately
|
|
|
|
// become 0..0 when the type becomes i1, which would be rejected
|
|
|
|
// by the LLVM verifier.
|
2018-10-08 14:58:26 +00:00
|
|
|
if let layout::Int(..) = scalar.value {
|
|
|
|
if !scalar.is_bool() {
|
2018-11-27 18:00:25 +00:00
|
|
|
let range = scalar.valid_range_exclusive(bx);
|
2018-04-22 16:40:54 +00:00
|
|
|
if range.start != range.end {
|
2018-06-29 08:28:51 +00:00
|
|
|
bx.range_metadata(callsite, range);
|
2018-04-22 16:40:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-09-20 02:16:06 +00:00
|
|
|
for arg in &self.args {
|
2017-10-10 17:54:50 +00:00
|
|
|
if arg.pad.is_some() {
|
|
|
|
apply(&ArgAttributes::new());
|
|
|
|
}
|
|
|
|
match arg.mode {
|
|
|
|
PassMode::Ignore => {}
|
|
|
|
PassMode::Direct(ref attrs) |
|
2018-08-03 14:32:21 +00:00
|
|
|
PassMode::Indirect(ref attrs, None) => apply(attrs),
|
|
|
|
PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
|
2018-05-28 15:12:55 +00:00
|
|
|
apply(attrs);
|
|
|
|
apply(extra_attrs);
|
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Pair(ref a, ref b) => {
|
|
|
|
apply(a);
|
|
|
|
apply(b);
|
2017-09-20 02:16:06 +00:00
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Cast(_) => apply(&ArgAttributes::new()),
|
2017-09-20 02:16:06 +00:00
|
|
|
}
|
2016-02-23 19:55:19 +00:00
|
|
|
}
|
2016-03-06 10:34:31 +00:00
|
|
|
|
2018-04-25 13:45:29 +00:00
|
|
|
let cconv = self.llvm_cconv();
|
|
|
|
if cconv != llvm::CCallConv {
|
|
|
|
llvm::SetInstructionCallConv(callsite, cconv);
|
2016-03-06 10:34:31 +00:00
|
|
|
}
|
2013-01-25 22:56:56 +00:00
|
|
|
}
|
|
|
|
}
|
2018-09-20 13:47:22 +00:00
|
|
|
|
|
|
|
impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx> {
|
|
|
|
fn new_fn_type(&self, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>> {
|
|
|
|
FnType::new(&self, sig, extra_args)
|
|
|
|
}
|
|
|
|
fn new_vtable(
|
|
|
|
&self,
|
|
|
|
sig: ty::FnSig<'tcx>,
|
|
|
|
extra_args: &[Ty<'tcx>]
|
|
|
|
) -> FnType<'tcx, Ty<'tcx>> {
|
|
|
|
FnType::new_vtable(&self, sig, extra_args)
|
|
|
|
}
|
|
|
|
fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>> {
|
|
|
|
FnType::of_instance(&self, instance)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
|
|
|
fn apply_attrs_callsite(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-09-20 13:47:22 +00:00
|
|
|
ty: &FnType<'tcx, Ty<'tcx>>,
|
|
|
|
callsite: Self::Value
|
|
|
|
) {
|
|
|
|
ty.apply_attrs_callsite(self, callsite)
|
|
|
|
}
|
|
|
|
}
|