2022-02-21 16:19:16 +00:00
|
|
|
use crate::attributes;
|
2019-02-17 18:58:58 +00:00
|
|
|
use crate::builder::Builder;
|
|
|
|
use crate::context::CodegenCx;
|
2022-02-21 16:19:16 +00:00
|
|
|
use crate::llvm::{self, Attribute, AttributePlace};
|
2019-02-17 18:58:58 +00:00
|
|
|
use crate::type_::Type;
|
2019-05-17 01:20:14 +00:00
|
|
|
use crate::type_of::LayoutLlvmExt;
|
2019-02-17 18:58:58 +00:00
|
|
|
use crate::value::Value;
|
2019-05-17 01:20:14 +00:00
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
use rustc_codegen_ssa::mir::operand::OperandValue;
|
|
|
|
use rustc_codegen_ssa::mir::place::PlaceRef;
|
2018-11-16 11:45:28 +00:00
|
|
|
use rustc_codegen_ssa::traits::*;
|
2019-12-22 22:42:04 +00:00
|
|
|
use rustc_codegen_ssa::MemFlags;
|
2020-03-29 15:19:48 +00:00
|
|
|
use rustc_middle::bug;
|
2021-08-30 14:38:27 +00:00
|
|
|
use rustc_middle::ty::layout::LayoutOf;
|
2020-03-31 16:16:47 +00:00
|
|
|
pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
|
2020-03-29 15:19:48 +00:00
|
|
|
use rustc_middle::ty::Ty;
|
2022-02-17 05:58:13 +00:00
|
|
|
use rustc_session::config;
|
2019-05-17 01:20:14 +00:00
|
|
|
use rustc_target::abi::call::ArgAbi;
|
2017-12-28 17:07:02 +00:00
|
|
|
pub use rustc_target::abi::call::*;
|
2021-08-30 14:38:27 +00:00
|
|
|
use rustc_target::abi::{self, HasDataLayout, Int};
|
2019-12-22 22:42:04 +00:00
|
|
|
pub use rustc_target::spec::abi::Abi;
|
2016-11-16 22:36:08 +00:00
|
|
|
|
2020-03-31 16:16:47 +00:00
|
|
|
use libc::c_uint;
|
2022-02-21 16:19:16 +00:00
|
|
|
use smallvec::SmallVec;
|
2020-03-31 16:16:47 +00:00
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
pub trait ArgAttributesExt {
|
2021-03-18 20:50:28 +00:00
|
|
|
fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
|
|
|
|
fn apply_attrs_to_callsite(
|
|
|
|
&self,
|
|
|
|
idx: AttributePlace,
|
|
|
|
cx: &CodegenCx<'_, '_>,
|
|
|
|
callsite: &Value,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool {
|
2021-11-05 17:26:16 +00:00
|
|
|
// LLVM prior to version 12 had known miscompiles in the presence of
|
|
|
|
// noalias attributes (see #54878), but we don't support earlier
|
|
|
|
// versions at all anymore. We now enable mutable noalias by default.
|
|
|
|
cx.tcx.sess.opts.debugging_opts.mutable_noalias.unwrap_or(true)
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
|
|
|
|
2022-02-21 16:19:16 +00:00
|
|
|
const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] =
|
|
|
|
[(ArgAttribute::InReg, llvm::AttributeKind::InReg)];
|
2022-02-17 05:58:13 +00:00
|
|
|
|
2022-02-21 16:19:16 +00:00
|
|
|
const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 5] = [
|
|
|
|
(ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias),
|
|
|
|
(ArgAttribute::NoCapture, llvm::AttributeKind::NoCapture),
|
|
|
|
(ArgAttribute::NonNull, llvm::AttributeKind::NonNull),
|
|
|
|
(ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly),
|
|
|
|
(ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef),
|
2022-02-17 05:58:13 +00:00
|
|
|
];
|
|
|
|
|
2022-02-26 21:58:17 +00:00
|
|
|
fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> {
|
2022-02-21 16:19:16 +00:00
|
|
|
let mut regular = this.regular;
|
|
|
|
|
2022-02-26 21:58:17 +00:00
|
|
|
let mut attrs = SmallVec::new();
|
2022-02-21 16:19:16 +00:00
|
|
|
|
|
|
|
// ABI-affecting attributes must always be applied
|
|
|
|
for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES {
|
|
|
|
if regular.contains(attr) {
|
|
|
|
attrs.push(llattr.create_attr(cx.llcx));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if let Some(align) = this.pointee_align {
|
|
|
|
attrs.push(llvm::CreateAlignmentAttr(cx.llcx, align.bytes()));
|
|
|
|
}
|
|
|
|
match this.arg_ext {
|
|
|
|
ArgExtension::None => {}
|
|
|
|
ArgExtension::Zext => attrs.push(llvm::AttributeKind::ZExt.create_attr(cx.llcx)),
|
|
|
|
ArgExtension::Sext => attrs.push(llvm::AttributeKind::SExt.create_attr(cx.llcx)),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only apply remaining attributes when optimizing
|
|
|
|
if cx.sess().opts.optimize != config::OptLevel::No {
|
|
|
|
let deref = this.pointee_size.bytes();
|
|
|
|
if deref != 0 {
|
|
|
|
if regular.contains(ArgAttribute::NonNull) {
|
|
|
|
attrs.push(llvm::CreateDereferenceableAttr(cx.llcx, deref));
|
|
|
|
} else {
|
|
|
|
attrs.push(llvm::CreateDereferenceableOrNullAttr(cx.llcx, deref));
|
2017-10-03 07:45:07 +00:00
|
|
|
}
|
2022-02-21 16:19:16 +00:00
|
|
|
regular -= ArgAttribute::NonNull;
|
|
|
|
}
|
|
|
|
for (attr, llattr) in OPTIMIZATION_ATTRIBUTES {
|
|
|
|
if regular.contains(attr) {
|
|
|
|
attrs.push(llattr.create_attr(cx.llcx));
|
2021-03-18 20:50:28 +00:00
|
|
|
}
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
2022-02-21 16:19:16 +00:00
|
|
|
if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
|
|
|
|
attrs.push(llvm::AttributeKind::NoAlias.create_attr(cx.llcx));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
attrs
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ArgAttributesExt for ArgAttributes {
|
|
|
|
fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
|
|
|
|
let attrs = get_attrs(self, cx);
|
|
|
|
attributes::apply_to_llfn(llfn, idx, &attrs);
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
|
|
|
|
2021-03-18 20:50:28 +00:00
|
|
|
fn apply_attrs_to_callsite(
|
|
|
|
&self,
|
|
|
|
idx: AttributePlace,
|
|
|
|
cx: &CodegenCx<'_, '_>,
|
|
|
|
callsite: &Value,
|
|
|
|
) {
|
2022-02-21 16:19:16 +00:00
|
|
|
let attrs = get_attrs(self, cx);
|
|
|
|
attributes::apply_to_callsite(callsite, idx, &attrs);
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
|
|
|
}
|
2017-03-10 04:25:57 +00:00
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
pub trait LlvmType {
|
2021-12-14 18:49:49 +00:00
|
|
|
fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
impl LlvmType for Reg {
|
2021-12-14 18:49:49 +00:00
|
|
|
fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
|
2017-03-10 04:25:57 +00:00
|
|
|
match self.kind {
|
2018-09-06 20:52:15 +00:00
|
|
|
RegKind::Integer => cx.type_ix(self.size.bits()),
|
2019-12-22 22:42:04 +00:00
|
|
|
RegKind::Float => match self.size.bits() {
|
|
|
|
32 => cx.type_f32(),
|
|
|
|
64 => cx.type_f64(),
|
|
|
|
_ => bug!("unsupported float: {:?}", self),
|
|
|
|
},
|
|
|
|
RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
impl LlvmType for CastTarget {
|
2021-12-14 18:49:49 +00:00
|
|
|
fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
|
2018-02-14 12:47:38 +00:00
|
|
|
let rest_ll_unit = self.rest.unit.llvm_type(cx);
|
2018-05-17 03:02:01 +00:00
|
|
|
let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
|
|
|
|
(0, 0)
|
|
|
|
} else {
|
2019-12-22 22:42:04 +00:00
|
|
|
(
|
|
|
|
self.rest.total.bytes() / self.rest.unit.size.bytes(),
|
|
|
|
self.rest.total.bytes() % self.rest.unit.size.bytes(),
|
|
|
|
)
|
2018-05-17 03:02:01 +00:00
|
|
|
};
|
2018-02-14 12:47:38 +00:00
|
|
|
|
|
|
|
if self.prefix.iter().all(|x| x.is_none()) {
|
|
|
|
// Simplify to a single unit when there is no prefix and size <= unit size
|
|
|
|
if self.rest.total <= self.rest.unit.size {
|
|
|
|
return rest_ll_unit;
|
|
|
|
}
|
2018-02-06 17:11:27 +00:00
|
|
|
|
2018-02-14 12:47:38 +00:00
|
|
|
// Simplify to array when all chunks are the same size and type
|
|
|
|
if rem_bytes == 0 {
|
2018-09-06 20:52:15 +00:00
|
|
|
return cx.type_array(rest_ll_unit, rest_count);
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-14 12:47:38 +00:00
|
|
|
|
|
|
|
// Create list of fields in the main structure
|
2019-12-22 22:42:04 +00:00
|
|
|
let mut args: Vec<_> = self
|
|
|
|
.prefix
|
|
|
|
.iter()
|
2021-12-01 09:03:45 +00:00
|
|
|
.flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)))
|
2018-02-14 12:47:38 +00:00
|
|
|
.chain((0..rest_count).map(|_| rest_ll_unit))
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
// Append final integer
|
|
|
|
if rem_bytes != 0 {
|
|
|
|
// Only integers can be really split further.
|
|
|
|
assert_eq!(self.rest.unit.kind, RegKind::Integer);
|
2018-09-06 20:52:15 +00:00
|
|
|
args.push(cx.type_ix(rem_bytes * 8));
|
2018-02-14 12:47:38 +00:00
|
|
|
}
|
|
|
|
|
2018-09-06 20:52:15 +00:00
|
|
|
cx.type_struct(&args, false)
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
}
|
2016-11-16 22:36:08 +00:00
|
|
|
|
2019-10-29 17:17:16 +00:00
|
|
|
pub trait ArgAbiExt<'ll, 'tcx> {
|
2018-07-10 10:28:39 +00:00
|
|
|
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
|
2018-08-07 15:14:40 +00:00
|
|
|
fn store(
|
|
|
|
&self,
|
2018-10-05 13:08:49 +00:00
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-08-07 15:14:40 +00:00
|
|
|
val: &'ll Value,
|
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
|
|
|
);
|
2018-08-02 14:48:44 +00:00
|
|
|
fn store_fn_arg(
|
|
|
|
&self,
|
2018-10-05 13:08:49 +00:00
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-08-02 14:48:44 +00:00
|
|
|
idx: &mut usize,
|
2018-08-03 12:20:10 +00:00
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
2018-08-02 14:48:44 +00:00
|
|
|
);
|
2013-09-25 10:30:44 +00:00
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
|
2019-02-08 13:53:55 +00:00
|
|
|
/// Gets the LLVM type for a place of the original Rust type of
|
2018-11-27 02:59:49 +00:00
|
|
|
/// this argument/return, i.e., the result of `type_of::type_of`.
|
2018-07-10 10:28:39 +00:00
|
|
|
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
|
2018-01-05 05:04:08 +00:00
|
|
|
self.layout.llvm_type(cx)
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
|
2019-10-29 14:35:26 +00:00
|
|
|
/// Stores a direct/indirect value described by this ArgAbi into a
|
2017-12-01 12:39:51 +00:00
|
|
|
/// place for the original Rust type of this argument/return.
|
2016-03-06 11:23:20 +00:00
|
|
|
/// Can be used for both storing formal arguments into Rust variables
|
|
|
|
/// or results of call/invoke instructions into their destinations.
|
2018-08-07 15:14:40 +00:00
|
|
|
fn store(
|
|
|
|
&self,
|
2018-10-05 13:08:49 +00:00
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-08-07 15:14:40 +00:00
|
|
|
val: &'ll Value,
|
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
|
|
|
) {
|
2016-03-06 11:23:20 +00:00
|
|
|
if self.is_ignore() {
|
|
|
|
return;
|
|
|
|
}
|
2018-08-03 14:32:21 +00:00
|
|
|
if self.is_sized_indirect() {
|
2018-09-08 22:16:45 +00:00
|
|
|
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
|
2018-05-28 15:12:55 +00:00
|
|
|
} else if self.is_unsized_indirect() {
|
2019-05-17 01:20:14 +00:00
|
|
|
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
|
2017-10-10 17:54:50 +00:00
|
|
|
} else if let PassMode::Cast(cast) = self.mode {
|
2016-06-07 21:35:01 +00:00
|
|
|
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
|
|
|
|
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
|
|
|
|
let can_store_through_cast_ptr = false;
|
|
|
|
if can_store_through_cast_ptr {
|
2018-11-27 18:00:25 +00:00
|
|
|
let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
|
2018-10-05 13:08:49 +00:00
|
|
|
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
|
2018-09-08 22:16:45 +00:00
|
|
|
bx.store(val, cast_dst, self.layout.align.abi);
|
2016-06-07 21:35:01 +00:00
|
|
|
} else {
|
|
|
|
// The actual return type is a struct, but the ABI
|
|
|
|
// adaptation code has cast it into some scalar type. The
|
|
|
|
// code that follows is the only reliable way I have
|
|
|
|
// found to do a transform like i64 -> {i32,i32}.
|
|
|
|
// Basically we dump the data onto the stack then memcpy it.
|
|
|
|
//
|
|
|
|
// Other approaches I tried:
|
|
|
|
// - Casting rust ret pointer to the foreign type and using Store
|
|
|
|
// is (a) unsafe if size of foreign type > size of rust type and
|
|
|
|
// (b) runs afoul of strict aliasing rules, yielding invalid
|
|
|
|
// assembly under -O (specifically, the store gets removed).
|
|
|
|
// - Truncating foreign type to correct integral type and then
|
|
|
|
// bitcasting to the struct type yields invalid cast errors.
|
|
|
|
|
|
|
|
// We instead thus allocate some scratch space...
|
2018-11-27 18:00:25 +00:00
|
|
|
let scratch_size = cast.size(bx);
|
|
|
|
let scratch_align = cast.align(bx);
|
2019-09-12 16:04:30 +00:00
|
|
|
let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
|
2018-01-05 05:12:32 +00:00
|
|
|
bx.lifetime_start(llscratch, scratch_size);
|
2016-06-07 21:35:01 +00:00
|
|
|
|
2019-05-17 01:20:14 +00:00
|
|
|
// ... where we first store the value...
|
2018-01-05 05:12:32 +00:00
|
|
|
bx.store(val, llscratch, scratch_align);
|
2016-06-07 21:35:01 +00:00
|
|
|
|
2019-05-17 01:20:14 +00:00
|
|
|
// ... and then memcpy it to the intended destination.
|
2018-09-10 15:59:20 +00:00
|
|
|
bx.memcpy(
|
2018-10-05 13:08:49 +00:00
|
|
|
dst.llval,
|
2018-09-08 22:16:45 +00:00
|
|
|
self.layout.align.abi,
|
2018-10-05 13:08:49 +00:00
|
|
|
llscratch,
|
2018-09-10 15:59:20 +00:00
|
|
|
scratch_align,
|
2018-11-27 18:00:25 +00:00
|
|
|
bx.const_usize(self.layout.size.bytes()),
|
2019-12-22 22:42:04 +00:00
|
|
|
MemFlags::empty(),
|
2018-09-10 15:59:20 +00:00
|
|
|
);
|
2016-06-07 21:35:01 +00:00
|
|
|
|
2018-01-05 05:12:32 +00:00
|
|
|
bx.lifetime_end(llscratch, scratch_size);
|
2016-03-06 11:23:20 +00:00
|
|
|
}
|
|
|
|
} else {
|
2018-01-05 05:12:32 +00:00
|
|
|
OperandValue::Immediate(val).store(bx, dst);
|
2016-03-06 11:23:20 +00:00
|
|
|
}
|
|
|
|
}
|
2016-03-06 14:30:21 +00:00
|
|
|
|
2018-08-02 14:48:44 +00:00
|
|
|
fn store_fn_arg(
|
|
|
|
&self,
|
2021-12-14 18:49:49 +00:00
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-08-02 14:48:44 +00:00
|
|
|
idx: &mut usize,
|
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
|
|
|
) {
|
2017-10-10 17:54:50 +00:00
|
|
|
let mut next = || {
|
2018-01-05 05:12:32 +00:00
|
|
|
let val = llvm::get_param(bx.llfn(), *idx as c_uint);
|
2017-10-10 17:54:50 +00:00
|
|
|
*idx += 1;
|
|
|
|
val
|
|
|
|
};
|
|
|
|
match self.mode {
|
2019-08-10 11:38:17 +00:00
|
|
|
PassMode::Ignore => {}
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Pair(..) => {
|
2018-01-05 05:12:32 +00:00
|
|
|
OperandValue::Pair(next(), next()).store(bx, dst);
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2020-11-14 13:29:40 +00:00
|
|
|
PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
|
2018-09-08 22:16:45 +00:00
|
|
|
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
|
2018-05-28 15:12:55 +00:00
|
|
|
}
|
2020-11-14 13:29:40 +00:00
|
|
|
PassMode::Direct(_)
|
|
|
|
| PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
|
|
|
|
| PassMode::Cast(_) => {
|
2019-02-27 20:32:12 +00:00
|
|
|
let next_arg = next();
|
|
|
|
self.store(bx, next_arg, dst);
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2016-03-06 14:30:21 +00:00
|
|
|
}
|
|
|
|
}
|
2013-01-25 22:56:56 +00:00
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'ll, 'tcx> ArgAbiMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
2018-09-20 13:47:22 +00:00
|
|
|
fn store_fn_arg(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2019-10-29 14:35:26 +00:00
|
|
|
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
|
2019-12-22 22:42:04 +00:00
|
|
|
idx: &mut usize,
|
|
|
|
dst: PlaceRef<'tcx, Self::Value>,
|
2018-09-20 13:47:22 +00:00
|
|
|
) {
|
2019-10-29 14:35:26 +00:00
|
|
|
arg_abi.store_fn_arg(self, idx, dst)
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|
2019-10-29 14:35:26 +00:00
|
|
|
fn store_arg(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2019-10-29 14:35:26 +00:00
|
|
|
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
|
2018-09-20 13:47:22 +00:00
|
|
|
val: &'ll Value,
|
2019-12-22 22:42:04 +00:00
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
2018-09-20 13:47:22 +00:00
|
|
|
) {
|
2019-10-29 14:35:26 +00:00
|
|
|
arg_abi.store(self, val, dst)
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|
2019-10-29 14:35:26 +00:00
|
|
|
fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
|
|
|
|
arg_abi.memory_ty(self)
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
pub trait FnAbiLlvmExt<'ll, 'tcx> {
|
2021-08-04 18:20:31 +00:00
|
|
|
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
|
2018-10-11 15:50:00 +00:00
|
|
|
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
|
2018-04-25 13:45:29 +00:00
|
|
|
fn llvm_cconv(&self) -> llvm::CallConv;
|
2019-07-06 19:52:25 +00:00
|
|
|
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
|
2021-12-14 18:49:49 +00:00
|
|
|
fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value);
|
2013-05-21 19:25:44 +00:00
|
|
|
}
|
2013-04-18 22:53:29 +00:00
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
2021-08-04 18:20:31 +00:00
|
|
|
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
|
|
|
|
// Ignore "extra" args from the call site for C variadic functions.
|
|
|
|
// Only the "fixed" args are part of the LLVM function signature.
|
|
|
|
let args = if self.c_variadic { &self.args[..self.fixed_count] } else { &self.args };
|
2021-08-03 22:09:57 +00:00
|
|
|
|
|
|
|
let args_capacity: usize = args.iter().map(|arg|
|
2018-07-16 17:35:45 +00:00
|
|
|
if arg.pad.is_some() { 1 } else { 0 } +
|
|
|
|
if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
|
|
|
|
).sum();
|
|
|
|
let mut llargument_tys = Vec::with_capacity(
|
2020-11-14 13:29:40 +00:00
|
|
|
if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + args_capacity,
|
2018-07-16 17:35:45 +00:00
|
|
|
);
|
2016-02-23 19:55:19 +00:00
|
|
|
|
2017-10-10 17:54:50 +00:00
|
|
|
let llreturn_ty = match self.ret.mode {
|
2019-08-10 11:38:17 +00:00
|
|
|
PassMode::Ignore => cx.type_void(),
|
2019-12-22 22:42:04 +00:00
|
|
|
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
|
2018-01-05 05:04:08 +00:00
|
|
|
PassMode::Cast(cast) => cast.llvm_type(cx),
|
2020-11-14 13:29:40 +00:00
|
|
|
PassMode::Indirect { .. } => {
|
2018-09-06 20:52:15 +00:00
|
|
|
llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
|
|
|
|
cx.type_void()
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2016-02-23 19:55:19 +00:00
|
|
|
};
|
|
|
|
|
2021-08-03 22:09:57 +00:00
|
|
|
for arg in args {
|
2017-10-10 17:54:50 +00:00
|
|
|
// add padding
|
|
|
|
if let Some(ty) = arg.pad {
|
2018-01-05 05:04:08 +00:00
|
|
|
llargument_tys.push(ty.llvm_type(cx));
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2016-02-23 19:55:19 +00:00
|
|
|
|
2017-10-10 17:54:50 +00:00
|
|
|
let llarg_ty = match arg.mode {
|
2019-08-10 11:38:17 +00:00
|
|
|
PassMode::Ignore => continue,
|
2018-01-05 05:04:08 +00:00
|
|
|
PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Pair(..) => {
|
Store scalar pair bools as i8 in memory
We represent `bool` as `i1` in a `ScalarPair`, unlike other aggregates,
to optimize IR for checked operators and the like. With this patch, we
still do so when the pair is an immediate value, but we use the `i8`
memory type when the value is loaded or stored as an LLVM aggregate.
So `(bool, bool)` looks like an `{ i1, i1 }` immediate, but `{ i8, i8 }`
in memory. When a pair is a direct function argument, `PassMode::Pair`,
it is still passed using the immediate `i1` type, but as a return value
it will use the `i8` memory type. Also, `bool`-like` enum tags will now
use scalar pairs when possible, where they were previously excluded due
to optimization issues.
2018-06-15 22:47:54 +00:00
|
|
|
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
|
|
|
|
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
|
2017-09-20 02:16:06 +00:00
|
|
|
continue;
|
|
|
|
}
|
2020-11-14 13:29:40 +00:00
|
|
|
PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
|
2018-05-28 15:12:55 +00:00
|
|
|
let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
|
|
|
|
let ptr_layout = cx.layout_of(ptr_ty);
|
|
|
|
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
|
|
|
|
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
|
|
|
|
continue;
|
|
|
|
}
|
2018-01-05 05:04:08 +00:00
|
|
|
PassMode::Cast(cast) => cast.llvm_type(cx),
|
2020-11-14 13:29:40 +00:00
|
|
|
PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
|
|
|
|
cx.type_ptr_to(arg.memory_ty(cx))
|
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
};
|
|
|
|
llargument_tys.push(llarg_ty);
|
2016-02-23 19:55:19 +00:00
|
|
|
}
|
|
|
|
|
2019-02-08 17:30:42 +00:00
|
|
|
if self.c_variadic {
|
2018-09-06 20:52:15 +00:00
|
|
|
cx.type_variadic_func(&llargument_tys, llreturn_ty)
|
2016-02-23 19:55:19 +00:00
|
|
|
} else {
|
2018-09-06 20:52:15 +00:00
|
|
|
cx.type_func(&llargument_tys, llreturn_ty)
|
2016-02-23 19:55:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-11 15:50:00 +00:00
|
|
|
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
|
|
|
|
unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMPointerType(
|
2021-08-04 18:20:31 +00:00
|
|
|
self.llvm_type(cx),
|
2020-06-11 05:52:09 +00:00
|
|
|
cx.data_layout().instruction_address_space.0 as c_uint,
|
2019-12-22 22:42:04 +00:00
|
|
|
)
|
2018-10-11 15:50:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-25 13:45:29 +00:00
|
|
|
fn llvm_cconv(&self) -> llvm::CallConv {
|
|
|
|
match self.conv {
|
2021-01-24 17:15:05 +00:00
|
|
|
Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv,
|
2018-07-02 03:42:00 +00:00
|
|
|
Conv::AmdGpuKernel => llvm::AmdGpuKernel,
|
2016-05-06 13:32:10 +00:00
|
|
|
Conv::AvrInterrupt => llvm::AvrInterrupt,
|
|
|
|
Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
|
2018-04-25 13:45:29 +00:00
|
|
|
Conv::ArmAapcs => llvm::ArmAapcsCallConv,
|
|
|
|
Conv::Msp430Intr => llvm::Msp430Intr,
|
|
|
|
Conv::PtxKernel => llvm::PtxKernel,
|
|
|
|
Conv::X86Fastcall => llvm::X86FastcallCallConv,
|
|
|
|
Conv::X86Intr => llvm::X86_Intr,
|
|
|
|
Conv::X86Stdcall => llvm::X86StdcallCallConv,
|
|
|
|
Conv::X86ThisCall => llvm::X86_ThisCall,
|
|
|
|
Conv::X86VectorCall => llvm::X86_VectorCall,
|
|
|
|
Conv::X86_64SysV => llvm::X86_64_SysV,
|
|
|
|
Conv::X86_64Win64 => llvm::X86_64_Win64,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-06 19:52:25 +00:00
|
|
|
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
|
2022-02-21 16:19:16 +00:00
|
|
|
let mut func_attrs = SmallVec::<[_; 2]>::new();
|
2019-10-29 17:46:18 +00:00
|
|
|
if self.ret.layout.abi.is_uninhabited() {
|
2022-02-21 16:19:16 +00:00
|
|
|
func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
|
2019-10-29 17:46:18 +00:00
|
|
|
}
|
2020-03-31 12:27:09 +00:00
|
|
|
if !self.can_unwind {
|
2022-02-21 16:19:16 +00:00
|
|
|
func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
|
2020-03-31 12:27:09 +00:00
|
|
|
}
|
2022-02-21 16:19:16 +00:00
|
|
|
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
|
2020-03-31 12:27:09 +00:00
|
|
|
|
2017-10-10 17:54:50 +00:00
|
|
|
let mut i = 0;
|
2020-11-14 13:29:40 +00:00
|
|
|
let mut apply = |attrs: &ArgAttributes| {
|
2021-03-18 20:50:28 +00:00
|
|
|
attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn);
|
2017-10-10 17:54:50 +00:00
|
|
|
i += 1;
|
2020-11-14 13:29:40 +00:00
|
|
|
i - 1
|
2017-09-20 02:16:06 +00:00
|
|
|
};
|
2017-10-10 17:54:50 +00:00
|
|
|
match self.ret.mode {
|
|
|
|
PassMode::Direct(ref attrs) => {
|
2021-03-18 20:50:28 +00:00
|
|
|
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
|
2020-11-14 13:29:40 +00:00
|
|
|
}
|
|
|
|
PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
|
|
|
|
assert!(!on_stack);
|
2020-11-14 18:16:43 +00:00
|
|
|
let i = apply(attrs);
|
2022-02-21 16:19:16 +00:00
|
|
|
let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
|
|
|
|
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2021-12-01 09:03:45 +00:00
|
|
|
PassMode::Cast(cast) => {
|
|
|
|
cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
|
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
_ => {}
|
|
|
|
}
|
2017-09-20 02:16:06 +00:00
|
|
|
for arg in &self.args {
|
2017-10-10 17:54:50 +00:00
|
|
|
if arg.pad.is_some() {
|
2020-11-14 13:29:40 +00:00
|
|
|
apply(&ArgAttributes::new());
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
|
|
|
match arg.mode {
|
2019-08-10 11:38:17 +00:00
|
|
|
PassMode::Ignore => {}
|
2020-11-14 13:29:40 +00:00
|
|
|
PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
|
|
|
|
let i = apply(attrs);
|
2022-02-21 16:19:16 +00:00
|
|
|
let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
|
|
|
|
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
|
2020-11-14 13:29:40 +00:00
|
|
|
}
|
|
|
|
PassMode::Direct(ref attrs)
|
|
|
|
| PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
|
|
|
|
apply(attrs);
|
2019-12-22 22:42:04 +00:00
|
|
|
}
|
2020-11-14 13:29:40 +00:00
|
|
|
PassMode::Indirect { ref attrs, extra_attrs: Some(ref extra_attrs), on_stack } => {
|
|
|
|
assert!(!on_stack);
|
|
|
|
apply(attrs);
|
|
|
|
apply(extra_attrs);
|
2018-05-28 15:12:55 +00:00
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Pair(ref a, ref b) => {
|
2020-11-14 13:29:40 +00:00
|
|
|
apply(a);
|
|
|
|
apply(b);
|
|
|
|
}
|
2021-12-01 09:03:45 +00:00
|
|
|
PassMode::Cast(cast) => {
|
|
|
|
apply(&cast.attrs);
|
2017-09-20 02:16:06 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-25 23:10:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) {
|
2022-02-21 16:19:16 +00:00
|
|
|
let mut func_attrs = SmallVec::<[_; 2]>::new();
|
2021-09-06 11:10:11 +00:00
|
|
|
if self.ret.layout.abi.is_uninhabited() {
|
2022-02-21 16:19:16 +00:00
|
|
|
func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx));
|
2021-09-06 11:10:11 +00:00
|
|
|
}
|
|
|
|
if !self.can_unwind {
|
2022-02-21 16:19:16 +00:00
|
|
|
func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(bx.cx.llcx));
|
2021-09-06 11:10:11 +00:00
|
|
|
}
|
2022-02-21 16:19:16 +00:00
|
|
|
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &{ func_attrs });
|
2020-03-31 12:27:09 +00:00
|
|
|
|
2017-10-10 17:54:50 +00:00
|
|
|
let mut i = 0;
|
2021-03-18 20:50:28 +00:00
|
|
|
let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
|
|
|
|
attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite);
|
2017-10-10 17:54:50 +00:00
|
|
|
i += 1;
|
2020-11-14 13:29:40 +00:00
|
|
|
i - 1
|
2017-09-20 02:16:06 +00:00
|
|
|
};
|
2017-10-10 17:54:50 +00:00
|
|
|
match self.ret.mode {
|
|
|
|
PassMode::Direct(ref attrs) => {
|
2021-09-30 17:38:50 +00:00
|
|
|
attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
|
2020-11-14 13:29:40 +00:00
|
|
|
}
|
|
|
|
PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
|
|
|
|
assert!(!on_stack);
|
2021-03-18 20:50:28 +00:00
|
|
|
let i = apply(bx.cx, attrs);
|
2022-02-21 16:19:16 +00:00
|
|
|
let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
|
|
|
|
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2021-12-01 09:03:45 +00:00
|
|
|
PassMode::Cast(cast) => {
|
|
|
|
cast.attrs.apply_attrs_to_callsite(
|
|
|
|
llvm::AttributePlace::ReturnValue,
|
|
|
|
&bx.cx,
|
|
|
|
callsite,
|
|
|
|
);
|
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
_ => {}
|
|
|
|
}
|
2021-08-29 09:06:55 +00:00
|
|
|
if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
|
2018-04-22 16:40:54 +00:00
|
|
|
// If the value is a boolean, the range is 0..2 and that ultimately
|
|
|
|
// become 0..0 when the type becomes i1, which would be rejected
|
|
|
|
// by the LLVM verifier.
|
2020-03-31 16:16:47 +00:00
|
|
|
if let Int(..) = scalar.value {
|
2021-09-07 18:51:09 +00:00
|
|
|
if !scalar.is_bool() && !scalar.is_always_valid(bx) {
|
2021-08-29 09:06:55 +00:00
|
|
|
bx.range_metadata(callsite, scalar.valid_range);
|
2018-04-22 16:40:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-09-20 02:16:06 +00:00
|
|
|
for arg in &self.args {
|
2017-10-10 17:54:50 +00:00
|
|
|
if arg.pad.is_some() {
|
2021-03-18 20:50:28 +00:00
|
|
|
apply(bx.cx, &ArgAttributes::new());
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
|
|
|
match arg.mode {
|
2019-08-10 11:38:17 +00:00
|
|
|
PassMode::Ignore => {}
|
2020-11-14 13:29:40 +00:00
|
|
|
PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
|
2021-03-18 20:50:28 +00:00
|
|
|
let i = apply(bx.cx, attrs);
|
2022-02-21 16:19:16 +00:00
|
|
|
let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
|
|
|
|
attributes::apply_to_callsite(
|
|
|
|
callsite,
|
|
|
|
llvm::AttributePlace::Argument(i),
|
|
|
|
&[byval],
|
|
|
|
);
|
2019-12-22 22:42:04 +00:00
|
|
|
}
|
2020-11-14 13:29:40 +00:00
|
|
|
PassMode::Direct(ref attrs)
|
|
|
|
| PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
|
2021-03-18 20:50:28 +00:00
|
|
|
apply(bx.cx, attrs);
|
2020-11-14 13:29:40 +00:00
|
|
|
}
|
|
|
|
PassMode::Indirect {
|
|
|
|
ref attrs,
|
|
|
|
extra_attrs: Some(ref extra_attrs),
|
|
|
|
on_stack: _,
|
|
|
|
} => {
|
2021-03-18 20:50:28 +00:00
|
|
|
apply(bx.cx, attrs);
|
|
|
|
apply(bx.cx, extra_attrs);
|
2018-05-28 15:12:55 +00:00
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Pair(ref a, ref b) => {
|
2021-03-18 20:50:28 +00:00
|
|
|
apply(bx.cx, a);
|
|
|
|
apply(bx.cx, b);
|
2020-11-14 13:29:40 +00:00
|
|
|
}
|
2021-12-01 09:03:45 +00:00
|
|
|
PassMode::Cast(cast) => {
|
|
|
|
apply(bx.cx, &cast.attrs);
|
2017-09-20 02:16:06 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-23 19:55:19 +00:00
|
|
|
}
|
2016-03-06 10:34:31 +00:00
|
|
|
|
2018-04-25 13:45:29 +00:00
|
|
|
let cconv = self.llvm_cconv();
|
|
|
|
if cconv != llvm::CCallConv {
|
|
|
|
llvm::SetInstructionCallConv(callsite, cconv);
|
2016-03-06 10:34:31 +00:00
|
|
|
}
|
2021-01-24 17:15:05 +00:00
|
|
|
|
|
|
|
if self.conv == Conv::CCmseNonSecureCall {
|
|
|
|
// This will probably get ignored on all targets but those supporting the TrustZone-M
|
|
|
|
// extension (thumbv8m targets).
|
2022-02-21 16:19:16 +00:00
|
|
|
let cmse_nonsecure_call =
|
|
|
|
llvm::CreateAttrString(bx.cx.llcx, cstr::cstr!("cmse_nonsecure_call"));
|
|
|
|
attributes::apply_to_callsite(
|
Improve `unused_unsafe` lint
Main motivation: Fixes some issues with the current behavior. This PR is
more-or-less completely re-implementing the unused_unsafe lint; it’s also only
done in the MIR-version of the lint, the set of tests for the `-Zthir-unsafeck`
version no longer succeeds (and is thus disabled, see `lint-unused-unsafe.rs`).
On current nightly,
```rs
unsafe fn unsf() {}
fn inner_ignored() {
unsafe {
#[allow(unused_unsafe)]
unsafe {
unsf()
}
}
}
```
doesn’t create any warnings. This situation is not unrealistic to come by, the
inner `unsafe` block could e.g. come from a macro. Actually, this PR even
includes removal of one unused `unsafe` in the standard library that was missed
in a similar situation. (The inner `unsafe` coming from an external macro hides
the warning, too.)
The reason behind this problem is how the check currently works:
* While generating MIR, it already skips nested unsafe blocks (i.e. unsafe
nested in other unsafe) so that the inner one is always the one considered
unused
* To differentiate the cases of no unsafe operations inside the `unsafe` vs.
a surrounding `unsafe` block, there’s some ad-hoc magic walking up the HIR to
look for surrounding used `unsafe` blocks.
There’s a lot of problems with this approach besides the one presented above.
E.g. the MIR-building uses checks for `unsafe_op_in_unsafe_fn` lint to decide
early whether or not `unsafe` blocks in an `unsafe fn` are redundant and ought
to be removed.
```rs
unsafe fn granular_disallow_op_in_unsafe_fn() {
unsafe {
#[deny(unsafe_op_in_unsafe_fn)]
{
unsf();
}
}
}
```
```
error: call to unsafe function is unsafe and requires unsafe block (error E0133)
--> src/main.rs:13:13
|
13 | unsf();
| ^^^^^^ call to unsafe function
|
note: the lint level is defined here
--> src/main.rs:11:16
|
11 | #[deny(unsafe_op_in_unsafe_fn)]
| ^^^^^^^^^^^^^^^^^^^^^^
= note: consult the function's documentation for information on how to avoid undefined behavior
warning: unnecessary `unsafe` block
--> src/main.rs:10:5
|
9 | unsafe fn granular_disallow_op_in_unsafe_fn() {
| --------------------------------------------- because it's nested under this `unsafe` fn
10 | unsafe {
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
```
Here, the intermediate `unsafe` was ignored, even though it contains a unsafe
operation that is not allowed to happen in an `unsafe fn` without an additional `unsafe` block.
Also closures were problematic and the workaround/algorithms used on current
nightly didn’t work properly. (I skipped trying to fully understand what it was
supposed to do, because this PR uses a completely different approach.)
```rs
fn nested() {
unsafe {
unsafe { unsf() }
}
}
```
```
warning: unnecessary `unsafe` block
--> src/main.rs:10:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
```
vs
```rs
fn nested() {
let _ = || unsafe {
let _ = || unsafe { unsf() };
};
}
```
```
warning: unnecessary `unsafe` block
--> src/main.rs:9:16
|
9 | let _ = || unsafe {
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:10:20
|
10 | let _ = || unsafe { unsf() };
| ^^^^^^ unnecessary `unsafe` block
```
*note that this warning kind-of suggests that **both** unsafe blocks are redundant*
--------------------------------------------------------------------------------
I also dislike the fact that it always suggests keeping the outermost `unsafe`.
E.g. for
```rs
fn granularity() {
unsafe {
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
I prefer if `rustc` suggests removing the more-course outer-level `unsafe`
instead of the fine-grained inner `unsafe` blocks, which it currently does on nightly:
```
warning: unnecessary `unsafe` block
--> src/main.rs:10:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:11:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
11 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
warning: unnecessary `unsafe` block
--> src/main.rs:12:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
12 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
```
--------------------------------------------------------------------------------
Needless to say, this PR addresses all these points. For context, as far as my
understanding goes, the main advantage of skipping inner unsafe blocks was that
a test case like
```rs
fn top_level_used() {
unsafe {
unsf();
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
should generate some warning because there’s redundant nested `unsafe`, however
every single `unsafe` block _does_ contain some statement that uses it. Of course
this PR doesn’t aim change the warnings on this kind of code example, because
the current behavior, warning on all the inner `unsafe` blocks, makes sense in this case.
As mentioned, during MIR building all the unsafe blocks *are* kept now, and usage
is attributed to them. The way to still generate a warning like
```
warning: unnecessary `unsafe` block
--> src/main.rs:11:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsf();
11 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:12:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
12 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
warning: unnecessary `unsafe` block
--> src/main.rs:13:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
13 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
```
in this case is by emitting a `unused_unsafe` warning for all of the `unsafe`
blocks that are _within a **used** unsafe block_.
The previous code had a little HIR traversal already anyways to collect a set of
all the unsafe blocks (in order to afterwards determine which ones are unused
afterwards). This PR uses such a traversal to do additional things including logic
like _always_ warn for an `unsafe` block that’s inside of another **used**
unsafe block. The traversal is expanded to include nested closures in the same go,
this simplifies a lot of things.
The whole logic around `unsafe_op_in_unsafe_fn` is a little complicated, there’s
some test cases of corner-cases in this PR. (The implementation involves
differentiating between whether a used unsafe block was used exclusively by
operations where `allow(unsafe_op_in_unsafe_fn)` was active.) The main goal was
to make sure that code should compile successfully if all the `unused_unsafe`-warnings
are addressed _simultaneously_ (by removing the respective `unsafe` blocks)
no matter how complicated the patterns of `unsafe_op_in_unsafe_fn` being
disallowed and allowed throughout the function are.
--------------------------------------------------------------------------------
One noteworthy design decision I took here: An `unsafe` block
with `allow(unused_unsafe)` **is considered used** for the purposes of
linting about redundant contained unsafe blocks. So while
```rs
fn granularity() {
unsafe { //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
warns for the outer `unsafe` block,
```rs
fn top_level_ignored() {
#[allow(unused_unsafe)]
unsafe {
#[deny(unused_unsafe)]
{
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
}
}
}
```
warns on the inner ones.
2022-02-03 21:16:06 +00:00
|
|
|
callsite,
|
|
|
|
llvm::AttributePlace::Function,
|
2022-02-21 16:19:16 +00:00
|
|
|
&[cmse_nonsecure_call],
|
Improve `unused_unsafe` lint
Main motivation: Fixes some issues with the current behavior. This PR is
more-or-less completely re-implementing the unused_unsafe lint; it’s also only
done in the MIR-version of the lint, the set of tests for the `-Zthir-unsafeck`
version no longer succeeds (and is thus disabled, see `lint-unused-unsafe.rs`).
On current nightly,
```rs
unsafe fn unsf() {}
fn inner_ignored() {
unsafe {
#[allow(unused_unsafe)]
unsafe {
unsf()
}
}
}
```
doesn’t create any warnings. This situation is not unrealistic to come by, the
inner `unsafe` block could e.g. come from a macro. Actually, this PR even
includes removal of one unused `unsafe` in the standard library that was missed
in a similar situation. (The inner `unsafe` coming from an external macro hides
the warning, too.)
The reason behind this problem is how the check currently works:
* While generating MIR, it already skips nested unsafe blocks (i.e. unsafe
nested in other unsafe) so that the inner one is always the one considered
unused
* To differentiate the cases of no unsafe operations inside the `unsafe` vs.
a surrounding `unsafe` block, there’s some ad-hoc magic walking up the HIR to
look for surrounding used `unsafe` blocks.
There’s a lot of problems with this approach besides the one presented above.
E.g. the MIR-building uses checks for `unsafe_op_in_unsafe_fn` lint to decide
early whether or not `unsafe` blocks in an `unsafe fn` are redundant and ought
to be removed.
```rs
unsafe fn granular_disallow_op_in_unsafe_fn() {
unsafe {
#[deny(unsafe_op_in_unsafe_fn)]
{
unsf();
}
}
}
```
```
error: call to unsafe function is unsafe and requires unsafe block (error E0133)
--> src/main.rs:13:13
|
13 | unsf();
| ^^^^^^ call to unsafe function
|
note: the lint level is defined here
--> src/main.rs:11:16
|
11 | #[deny(unsafe_op_in_unsafe_fn)]
| ^^^^^^^^^^^^^^^^^^^^^^
= note: consult the function's documentation for information on how to avoid undefined behavior
warning: unnecessary `unsafe` block
--> src/main.rs:10:5
|
9 | unsafe fn granular_disallow_op_in_unsafe_fn() {
| --------------------------------------------- because it's nested under this `unsafe` fn
10 | unsafe {
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
```
Here, the intermediate `unsafe` was ignored, even though it contains a unsafe
operation that is not allowed to happen in an `unsafe fn` without an additional `unsafe` block.
Also closures were problematic and the workaround/algorithms used on current
nightly didn’t work properly. (I skipped trying to fully understand what it was
supposed to do, because this PR uses a completely different approach.)
```rs
fn nested() {
unsafe {
unsafe { unsf() }
}
}
```
```
warning: unnecessary `unsafe` block
--> src/main.rs:10:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
```
vs
```rs
fn nested() {
let _ = || unsafe {
let _ = || unsafe { unsf() };
};
}
```
```
warning: unnecessary `unsafe` block
--> src/main.rs:9:16
|
9 | let _ = || unsafe {
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:10:20
|
10 | let _ = || unsafe { unsf() };
| ^^^^^^ unnecessary `unsafe` block
```
*note that this warning kind-of suggests that **both** unsafe blocks are redundant*
--------------------------------------------------------------------------------
I also dislike the fact that it always suggests keeping the outermost `unsafe`.
E.g. for
```rs
fn granularity() {
unsafe {
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
I prefer if `rustc` suggests removing the more-course outer-level `unsafe`
instead of the fine-grained inner `unsafe` blocks, which it currently does on nightly:
```
warning: unnecessary `unsafe` block
--> src/main.rs:10:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:11:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
11 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
warning: unnecessary `unsafe` block
--> src/main.rs:12:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
12 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
```
--------------------------------------------------------------------------------
Needless to say, this PR addresses all these points. For context, as far as my
understanding goes, the main advantage of skipping inner unsafe blocks was that
a test case like
```rs
fn top_level_used() {
unsafe {
unsf();
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
should generate some warning because there’s redundant nested `unsafe`, however
every single `unsafe` block _does_ contain some statement that uses it. Of course
this PR doesn’t aim change the warnings on this kind of code example, because
the current behavior, warning on all the inner `unsafe` blocks, makes sense in this case.
As mentioned, during MIR building all the unsafe blocks *are* kept now, and usage
is attributed to them. The way to still generate a warning like
```
warning: unnecessary `unsafe` block
--> src/main.rs:11:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsf();
11 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:12:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
12 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
warning: unnecessary `unsafe` block
--> src/main.rs:13:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
13 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
```
in this case is by emitting a `unused_unsafe` warning for all of the `unsafe`
blocks that are _within a **used** unsafe block_.
The previous code had a little HIR traversal already anyways to collect a set of
all the unsafe blocks (in order to afterwards determine which ones are unused
afterwards). This PR uses such a traversal to do additional things including logic
like _always_ warn for an `unsafe` block that’s inside of another **used**
unsafe block. The traversal is expanded to include nested closures in the same go,
this simplifies a lot of things.
The whole logic around `unsafe_op_in_unsafe_fn` is a little complicated, there’s
some test cases of corner-cases in this PR. (The implementation involves
differentiating between whether a used unsafe block was used exclusively by
operations where `allow(unsafe_op_in_unsafe_fn)` was active.) The main goal was
to make sure that code should compile successfully if all the `unused_unsafe`-warnings
are addressed _simultaneously_ (by removing the respective `unsafe` blocks)
no matter how complicated the patterns of `unsafe_op_in_unsafe_fn` being
disallowed and allowed throughout the function are.
--------------------------------------------------------------------------------
One noteworthy design decision I took here: An `unsafe` block
with `allow(unused_unsafe)` **is considered used** for the purposes of
linting about redundant contained unsafe blocks. So while
```rs
fn granularity() {
unsafe { //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
warns for the outer `unsafe` block,
```rs
fn top_level_ignored() {
#[allow(unused_unsafe)]
unsafe {
#[deny(unused_unsafe)]
{
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
}
}
}
```
warns on the inner ones.
2022-02-03 21:16:06 +00:00
|
|
|
);
|
2021-01-24 17:15:05 +00:00
|
|
|
}
|
2013-01-25 22:56:56 +00:00
|
|
|
}
|
|
|
|
}
|
2018-09-20 13:47:22 +00:00
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
|
2019-12-22 22:42:04 +00:00
|
|
|
fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
|
2019-10-29 14:35:26 +00:00
|
|
|
fn_abi.apply_attrs_callsite(self, callsite)
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|
2018-12-04 19:20:45 +00:00
|
|
|
|
2021-11-24 03:30:20 +00:00
|
|
|
fn get_param(&mut self, index: usize) -> Self::Value {
|
2018-12-04 19:20:45 +00:00
|
|
|
llvm::get_param(self.llfn(), index as c_uint)
|
|
|
|
}
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|