rust/compiler/rustc_codegen_llvm/src/abi.rs

584 lines
22 KiB
Rust
Raw Normal View History

use crate::attributes;
2019-02-17 18:58:58 +00:00
use crate::builder::Builder;
use crate::context::CodegenCx;
use crate::llvm::{self, Attribute, AttributePlace};
2019-02-17 18:58:58 +00:00
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
2019-02-17 18:58:58 +00:00
use crate::value::Value;
2019-12-22 22:42:04 +00:00
use rustc_codegen_ssa::mir::operand::OperandValue;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
2019-12-22 22:42:04 +00:00
use rustc_codegen_ssa::MemFlags;
2020-03-29 15:19:48 +00:00
use rustc_middle::bug;
use rustc_middle::ty::layout::LayoutOf;
pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
2020-03-29 15:19:48 +00:00
use rustc_middle::ty::Ty;
use rustc_session::config;
use rustc_target::abi::call::ArgAbi;
pub use rustc_target::abi::call::*;
use rustc_target::abi::{self, HasDataLayout, Int};
2019-12-22 22:42:04 +00:00
pub use rustc_target::spec::abi::Abi;
use libc::c_uint;
use smallvec::SmallVec;
pub trait ArgAttributesExt {
fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
fn apply_attrs_to_callsite(
&self,
idx: AttributePlace,
cx: &CodegenCx<'_, '_>,
callsite: &Value,
);
}
fn should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool {
2021-11-05 17:26:16 +00:00
// LLVM prior to version 12 had known miscompiles in the presence of
// noalias attributes (see #54878), but we don't support earlier
// versions at all anymore. We now enable mutable noalias by default.
cx.tcx.sess.opts.debugging_opts.mutable_noalias.unwrap_or(true)
}
const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] =
[(ArgAttribute::InReg, llvm::AttributeKind::InReg)];
const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 5] = [
(ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias),
(ArgAttribute::NoCapture, llvm::AttributeKind::NoCapture),
(ArgAttribute::NonNull, llvm::AttributeKind::NonNull),
(ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly),
(ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef),
];
fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> {
let mut regular = this.regular;
let mut attrs = SmallVec::new();
// ABI-affecting attributes must always be applied
for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES {
if regular.contains(attr) {
attrs.push(llattr.create_attr(cx.llcx));
}
}
if let Some(align) = this.pointee_align {
attrs.push(llvm::CreateAlignmentAttr(cx.llcx, align.bytes()));
}
match this.arg_ext {
ArgExtension::None => {}
ArgExtension::Zext => attrs.push(llvm::AttributeKind::ZExt.create_attr(cx.llcx)),
ArgExtension::Sext => attrs.push(llvm::AttributeKind::SExt.create_attr(cx.llcx)),
}
// Only apply remaining attributes when optimizing
if cx.sess().opts.optimize != config::OptLevel::No {
let deref = this.pointee_size.bytes();
if deref != 0 {
if regular.contains(ArgAttribute::NonNull) {
attrs.push(llvm::CreateDereferenceableAttr(cx.llcx, deref));
} else {
attrs.push(llvm::CreateDereferenceableOrNullAttr(cx.llcx, deref));
}
regular -= ArgAttribute::NonNull;
}
for (attr, llattr) in OPTIMIZATION_ATTRIBUTES {
if regular.contains(attr) {
attrs.push(llattr.create_attr(cx.llcx));
}
}
if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
attrs.push(llvm::AttributeKind::NoAlias.create_attr(cx.llcx));
}
}
attrs
}
impl ArgAttributesExt for ArgAttributes {
fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
let attrs = get_attrs(self, cx);
attributes::apply_to_llfn(llfn, idx, &attrs);
}
fn apply_attrs_to_callsite(
&self,
idx: AttributePlace,
cx: &CodegenCx<'_, '_>,
callsite: &Value,
) {
let attrs = get_attrs(self, cx);
attributes::apply_to_callsite(callsite, idx, &attrs);
}
}
pub trait LlvmType {
fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
}
impl LlvmType for Reg {
fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
match self.kind {
RegKind::Integer => cx.type_ix(self.size.bits()),
2019-12-22 22:42:04 +00:00
RegKind::Float => match self.size.bits() {
32 => cx.type_f32(),
64 => cx.type_f64(),
_ => bug!("unsupported float: {:?}", self),
},
RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
}
}
}
impl LlvmType for CastTarget {
fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
let rest_ll_unit = self.rest.unit.llvm_type(cx);
let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
(0, 0)
} else {
2019-12-22 22:42:04 +00:00
(
self.rest.total.bytes() / self.rest.unit.size.bytes(),
self.rest.total.bytes() % self.rest.unit.size.bytes(),
)
};
if self.prefix.iter().all(|x| x.is_none()) {
// Simplify to a single unit when there is no prefix and size <= unit size
if self.rest.total <= self.rest.unit.size {
return rest_ll_unit;
}
// Simplify to array when all chunks are the same size and type
if rem_bytes == 0 {
return cx.type_array(rest_ll_unit, rest_count);
}
}
// Create list of fields in the main structure
2019-12-22 22:42:04 +00:00
let mut args: Vec<_> = self
.prefix
.iter()
.flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)))
.chain((0..rest_count).map(|_| rest_ll_unit))
.collect();
// Append final integer
if rem_bytes != 0 {
// Only integers can be really split further.
assert_eq!(self.rest.unit.kind, RegKind::Integer);
args.push(cx.type_ix(rem_bytes * 8));
}
cx.type_struct(&args, false)
}
}
pub trait ArgAbiExt<'ll, 'tcx> {
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn store(
&self,
bx: &mut Builder<'_, 'll, 'tcx>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>,
);
fn store_fn_arg(
&self,
bx: &mut Builder<'_, 'll, 'tcx>,
idx: &mut usize,
dst: PlaceRef<'tcx, &'ll Value>,
);
}
impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
2019-02-08 13:53:55 +00:00
/// Gets the LLVM type for a place of the original Rust type of
/// this argument/return, i.e., the result of `type_of::type_of`.
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
2018-01-05 05:04:08 +00:00
self.layout.llvm_type(cx)
}
/// Stores a direct/indirect value described by this ArgAbi into a
/// place for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
fn store(
&self,
bx: &mut Builder<'_, 'll, 'tcx>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>,
) {
if self.is_ignore() {
return;
}
if self.is_sized_indirect() {
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
} else if let PassMode::Cast(cast) = self.mode {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
bx.store(val, cast_dst, self.layout.align.abi);
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
// code that follows is the only reliable way I have
// found to do a transform like i64 -> {i32,i32}.
// Basically we dump the data onto the stack then memcpy it.
//
// Other approaches I tried:
// - Casting rust ret pointer to the foreign type and using Store
// is (a) unsafe if size of foreign type > size of rust type and
// (b) runs afoul of strict aliasing rules, yielding invalid
// assembly under -O (specifically, the store gets removed).
// - Truncating foreign type to correct integral type and then
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
let scratch_size = cast.size(bx);
let scratch_align = cast.align(bx);
let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
2018-01-05 05:12:32 +00:00
bx.lifetime_start(llscratch, scratch_size);
// ... where we first store the value...
2018-01-05 05:12:32 +00:00
bx.store(val, llscratch, scratch_align);
// ... and then memcpy it to the intended destination.
bx.memcpy(
dst.llval,
self.layout.align.abi,
llscratch,
scratch_align,
bx.const_usize(self.layout.size.bytes()),
2019-12-22 22:42:04 +00:00
MemFlags::empty(),
);
2018-01-05 05:12:32 +00:00
bx.lifetime_end(llscratch, scratch_size);
}
} else {
2018-01-05 05:12:32 +00:00
OperandValue::Immediate(val).store(bx, dst);
}
}
fn store_fn_arg(
&self,
bx: &mut Builder<'_, 'll, 'tcx>,
idx: &mut usize,
dst: PlaceRef<'tcx, &'ll Value>,
) {
let mut next = || {
2018-01-05 05:12:32 +00:00
let val = llvm::get_param(bx.llfn(), *idx as c_uint);
*idx += 1;
val
};
match self.mode {
PassMode::Ignore => {}
PassMode::Pair(..) => {
2018-01-05 05:12:32 +00:00
OperandValue::Pair(next(), next()).store(bx, dst);
}
PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
}
PassMode::Direct(_)
| PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
| PassMode::Cast(_) => {
let next_arg = next();
self.store(bx, next_arg, dst);
}
}
}
}
impl<'ll, 'tcx> ArgAbiMethods<'tcx> for Builder<'_, 'll, 'tcx> {
fn store_fn_arg(
&mut self,
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
2019-12-22 22:42:04 +00:00
idx: &mut usize,
dst: PlaceRef<'tcx, Self::Value>,
) {
arg_abi.store_fn_arg(self, idx, dst)
}
fn store_arg(
&mut self,
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
val: &'ll Value,
2019-12-22 22:42:04 +00:00
dst: PlaceRef<'tcx, &'ll Value>,
) {
arg_abi.store(self, val, dst)
}
fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
arg_abi.memory_ty(self)
}
}
pub trait FnAbiLlvmExt<'ll, 'tcx> {
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn llvm_cconv(&self) -> llvm::CallConv;
2019-07-06 19:52:25 +00:00
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value);
}
impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
// Ignore "extra" args from the call site for C variadic functions.
// Only the "fixed" args are part of the LLVM function signature.
let args = if self.c_variadic { &self.args[..self.fixed_count] } else { &self.args };
let args_capacity: usize = args.iter().map(|arg|
if arg.pad.is_some() { 1 } else { 0 } +
if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
).sum();
let mut llargument_tys = Vec::with_capacity(
if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + args_capacity,
);
let llreturn_ty = match self.ret.mode {
PassMode::Ignore => cx.type_void(),
2019-12-22 22:42:04 +00:00
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
2018-01-05 05:04:08 +00:00
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect { .. } => {
llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
cx.type_void()
}
};
for arg in args {
// add padding
if let Some(ty) = arg.pad {
2018-01-05 05:04:08 +00:00
llargument_tys.push(ty.llvm_type(cx));
}
let llarg_ty = match arg.mode {
PassMode::Ignore => continue,
2018-01-05 05:04:08 +00:00
PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
PassMode::Pair(..) => {
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
let ptr_layout = cx.layout_of(ptr_ty);
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
2018-01-05 05:04:08 +00:00
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
cx.type_ptr_to(arg.memory_ty(cx))
}
};
llargument_tys.push(llarg_ty);
}
if self.c_variadic {
cx.type_variadic_func(&llargument_tys, llreturn_ty)
} else {
cx.type_func(&llargument_tys, llreturn_ty)
}
}
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
unsafe {
2019-12-22 22:42:04 +00:00
llvm::LLVMPointerType(
self.llvm_type(cx),
cx.data_layout().instruction_address_space.0 as c_uint,
2019-12-22 22:42:04 +00:00
)
}
}
fn llvm_cconv(&self) -> llvm::CallConv {
match self.conv {
Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv,
Conv::AmdGpuKernel => llvm::AmdGpuKernel,
2016-05-06 13:32:10 +00:00
Conv::AvrInterrupt => llvm::AvrInterrupt,
Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
Conv::ArmAapcs => llvm::ArmAapcsCallConv,
Conv::Msp430Intr => llvm::Msp430Intr,
Conv::PtxKernel => llvm::PtxKernel,
Conv::X86Fastcall => llvm::X86FastcallCallConv,
Conv::X86Intr => llvm::X86_Intr,
Conv::X86Stdcall => llvm::X86StdcallCallConv,
Conv::X86ThisCall => llvm::X86_ThisCall,
Conv::X86VectorCall => llvm::X86_VectorCall,
Conv::X86_64SysV => llvm::X86_64_SysV,
Conv::X86_64Win64 => llvm::X86_64_Win64,
}
}
2019-07-06 19:52:25 +00:00
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
let mut func_attrs = SmallVec::<[_; 2]>::new();
if self.ret.layout.abi.is_uninhabited() {
func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
}
2020-03-31 12:27:09 +00:00
if !self.can_unwind {
func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
2020-03-31 12:27:09 +00:00
}
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
2020-03-31 12:27:09 +00:00
let mut i = 0;
let mut apply = |attrs: &ArgAttributes| {
attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn);
i += 1;
i - 1
};
match self.ret.mode {
PassMode::Direct(ref attrs) => {
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
}
PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(attrs);
let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
}
PassMode::Cast(cast) => {
cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
}
_ => {}
}
for arg in &self.args {
if arg.pad.is_some() {
apply(&ArgAttributes::new());
}
match arg.mode {
PassMode::Ignore => {}
PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
let i = apply(attrs);
let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
}
PassMode::Direct(ref attrs)
| PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
apply(attrs);
2019-12-22 22:42:04 +00:00
}
PassMode::Indirect { ref attrs, extra_attrs: Some(ref extra_attrs), on_stack } => {
assert!(!on_stack);
apply(attrs);
apply(extra_attrs);
}
PassMode::Pair(ref a, ref b) => {
apply(a);
apply(b);
}
PassMode::Cast(cast) => {
apply(&cast.attrs);
}
}
}
}
fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) {
let mut func_attrs = SmallVec::<[_; 2]>::new();
if self.ret.layout.abi.is_uninhabited() {
func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx));
}
if !self.can_unwind {
func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(bx.cx.llcx));
}
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &{ func_attrs });
2020-03-31 12:27:09 +00:00
let mut i = 0;
let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite);
i += 1;
i - 1
};
match self.ret.mode {
PassMode::Direct(ref attrs) => {
2021-09-30 17:38:50 +00:00
attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
}
PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(bx.cx, attrs);
let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
}
PassMode::Cast(cast) => {
cast.attrs.apply_attrs_to_callsite(
llvm::AttributePlace::ReturnValue,
&bx.cx,
callsite,
);
}
_ => {}
}
if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
// If the value is a boolean, the range is 0..2 and that ultimately
// become 0..0 when the type becomes i1, which would be rejected
// by the LLVM verifier.
if let Int(..) = scalar.value {
2021-09-07 18:51:09 +00:00
if !scalar.is_bool() && !scalar.is_always_valid(bx) {
bx.range_metadata(callsite, scalar.valid_range);
}
}
}
for arg in &self.args {
if arg.pad.is_some() {
apply(bx.cx, &ArgAttributes::new());
}
match arg.mode {
PassMode::Ignore => {}
PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
let i = apply(bx.cx, attrs);
let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
attributes::apply_to_callsite(
callsite,
llvm::AttributePlace::Argument(i),
&[byval],
);
2019-12-22 22:42:04 +00:00
}
PassMode::Direct(ref attrs)
| PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
apply(bx.cx, attrs);
}
PassMode::Indirect {
ref attrs,
extra_attrs: Some(ref extra_attrs),
on_stack: _,
} => {
apply(bx.cx, attrs);
apply(bx.cx, extra_attrs);
}
PassMode::Pair(ref a, ref b) => {
apply(bx.cx, a);
apply(bx.cx, b);
}
PassMode::Cast(cast) => {
apply(bx.cx, &cast.attrs);
}
}
}
let cconv = self.llvm_cconv();
if cconv != llvm::CCallConv {
llvm::SetInstructionCallConv(callsite, cconv);
}
if self.conv == Conv::CCmseNonSecureCall {
// This will probably get ignored on all targets but those supporting the TrustZone-M
// extension (thumbv8m targets).
let cmse_nonsecure_call =
llvm::CreateAttrString(bx.cx.llcx, cstr::cstr!("cmse_nonsecure_call"));
attributes::apply_to_callsite(
Improve `unused_unsafe` lint Main motivation: Fixes some issues with the current behavior. This PR is more-or-less completely re-implementing the unused_unsafe lint; it’s also only done in the MIR-version of the lint, the set of tests for the `-Zthir-unsafeck` version no longer succeeds (and is thus disabled, see `lint-unused-unsafe.rs`). On current nightly, ```rs unsafe fn unsf() {} fn inner_ignored() { unsafe { #[allow(unused_unsafe)] unsafe { unsf() } } } ``` doesn’t create any warnings. This situation is not unrealistic to come by, the inner `unsafe` block could e.g. come from a macro. Actually, this PR even includes removal of one unused `unsafe` in the standard library that was missed in a similar situation. (The inner `unsafe` coming from an external macro hides the warning, too.) The reason behind this problem is how the check currently works: * While generating MIR, it already skips nested unsafe blocks (i.e. unsafe nested in other unsafe) so that the inner one is always the one considered unused * To differentiate the cases of no unsafe operations inside the `unsafe` vs. a surrounding `unsafe` block, there’s some ad-hoc magic walking up the HIR to look for surrounding used `unsafe` blocks. There’s a lot of problems with this approach besides the one presented above. E.g. the MIR-building uses checks for `unsafe_op_in_unsafe_fn` lint to decide early whether or not `unsafe` blocks in an `unsafe fn` are redundant and ought to be removed. ```rs unsafe fn granular_disallow_op_in_unsafe_fn() { unsafe { #[deny(unsafe_op_in_unsafe_fn)] { unsf(); } } } ``` ``` error: call to unsafe function is unsafe and requires unsafe block (error E0133) --> src/main.rs:13:13 | 13 | unsf(); | ^^^^^^ call to unsafe function | note: the lint level is defined here --> src/main.rs:11:16 | 11 | #[deny(unsafe_op_in_unsafe_fn)] | ^^^^^^^^^^^^^^^^^^^^^^ = note: consult the function's documentation for information on how to avoid undefined behavior warning: unnecessary `unsafe` block --> src/main.rs:10:5 | 9 | unsafe fn granular_disallow_op_in_unsafe_fn() { | --------------------------------------------- because it's nested under this `unsafe` fn 10 | unsafe { | ^^^^^^ unnecessary `unsafe` block | = note: `#[warn(unused_unsafe)]` on by default ``` Here, the intermediate `unsafe` was ignored, even though it contains a unsafe operation that is not allowed to happen in an `unsafe fn` without an additional `unsafe` block. Also closures were problematic and the workaround/algorithms used on current nightly didn’t work properly. (I skipped trying to fully understand what it was supposed to do, because this PR uses a completely different approach.) ```rs fn nested() { unsafe { unsafe { unsf() } } } ``` ``` warning: unnecessary `unsafe` block --> src/main.rs:10:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block 10 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block | = note: `#[warn(unused_unsafe)]` on by default ``` vs ```rs fn nested() { let _ = || unsafe { let _ = || unsafe { unsf() }; }; } ``` ``` warning: unnecessary `unsafe` block --> src/main.rs:9:16 | 9 | let _ = || unsafe { | ^^^^^^ unnecessary `unsafe` block | = note: `#[warn(unused_unsafe)]` on by default warning: unnecessary `unsafe` block --> src/main.rs:10:20 | 10 | let _ = || unsafe { unsf() }; | ^^^^^^ unnecessary `unsafe` block ``` *note that this warning kind-of suggests that **both** unsafe blocks are redundant* -------------------------------------------------------------------------------- I also dislike the fact that it always suggests keeping the outermost `unsafe`. E.g. for ```rs fn granularity() { unsafe { unsafe { unsf() } unsafe { unsf() } unsafe { unsf() } } } ``` I prefer if `rustc` suggests removing the more-course outer-level `unsafe` instead of the fine-grained inner `unsafe` blocks, which it currently does on nightly: ``` warning: unnecessary `unsafe` block --> src/main.rs:10:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block 10 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block | = note: `#[warn(unused_unsafe)]` on by default warning: unnecessary `unsafe` block --> src/main.rs:11:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block 10 | unsafe { unsf() } 11 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block warning: unnecessary `unsafe` block --> src/main.rs:12:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block ... 12 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block ``` -------------------------------------------------------------------------------- Needless to say, this PR addresses all these points. For context, as far as my understanding goes, the main advantage of skipping inner unsafe blocks was that a test case like ```rs fn top_level_used() { unsafe { unsf(); unsafe { unsf() } unsafe { unsf() } unsafe { unsf() } } } ``` should generate some warning because there’s redundant nested `unsafe`, however every single `unsafe` block _does_ contain some statement that uses it. Of course this PR doesn’t aim change the warnings on this kind of code example, because the current behavior, warning on all the inner `unsafe` blocks, makes sense in this case. As mentioned, during MIR building all the unsafe blocks *are* kept now, and usage is attributed to them. The way to still generate a warning like ``` warning: unnecessary `unsafe` block --> src/main.rs:11:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block 10 | unsf(); 11 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block | = note: `#[warn(unused_unsafe)]` on by default warning: unnecessary `unsafe` block --> src/main.rs:12:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block ... 12 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block warning: unnecessary `unsafe` block --> src/main.rs:13:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block ... 13 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block ``` in this case is by emitting a `unused_unsafe` warning for all of the `unsafe` blocks that are _within a **used** unsafe block_. The previous code had a little HIR traversal already anyways to collect a set of all the unsafe blocks (in order to afterwards determine which ones are unused afterwards). This PR uses such a traversal to do additional things including logic like _always_ warn for an `unsafe` block that’s inside of another **used** unsafe block. The traversal is expanded to include nested closures in the same go, this simplifies a lot of things. The whole logic around `unsafe_op_in_unsafe_fn` is a little complicated, there’s some test cases of corner-cases in this PR. (The implementation involves differentiating between whether a used unsafe block was used exclusively by operations where `allow(unsafe_op_in_unsafe_fn)` was active.) The main goal was to make sure that code should compile successfully if all the `unused_unsafe`-warnings are addressed _simultaneously_ (by removing the respective `unsafe` blocks) no matter how complicated the patterns of `unsafe_op_in_unsafe_fn` being disallowed and allowed throughout the function are. -------------------------------------------------------------------------------- One noteworthy design decision I took here: An `unsafe` block with `allow(unused_unsafe)` **is considered used** for the purposes of linting about redundant contained unsafe blocks. So while ```rs fn granularity() { unsafe { //~ ERROR: unnecessary `unsafe` block unsafe { unsf() } unsafe { unsf() } unsafe { unsf() } } } ``` warns for the outer `unsafe` block, ```rs fn top_level_ignored() { #[allow(unused_unsafe)] unsafe { #[deny(unused_unsafe)] { unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block } } } ``` warns on the inner ones.
2022-02-03 21:16:06 +00:00
callsite,
llvm::AttributePlace::Function,
&[cmse_nonsecure_call],
Improve `unused_unsafe` lint Main motivation: Fixes some issues with the current behavior. This PR is more-or-less completely re-implementing the unused_unsafe lint; it’s also only done in the MIR-version of the lint, the set of tests for the `-Zthir-unsafeck` version no longer succeeds (and is thus disabled, see `lint-unused-unsafe.rs`). On current nightly, ```rs unsafe fn unsf() {} fn inner_ignored() { unsafe { #[allow(unused_unsafe)] unsafe { unsf() } } } ``` doesn’t create any warnings. This situation is not unrealistic to come by, the inner `unsafe` block could e.g. come from a macro. Actually, this PR even includes removal of one unused `unsafe` in the standard library that was missed in a similar situation. (The inner `unsafe` coming from an external macro hides the warning, too.) The reason behind this problem is how the check currently works: * While generating MIR, it already skips nested unsafe blocks (i.e. unsafe nested in other unsafe) so that the inner one is always the one considered unused * To differentiate the cases of no unsafe operations inside the `unsafe` vs. a surrounding `unsafe` block, there’s some ad-hoc magic walking up the HIR to look for surrounding used `unsafe` blocks. There’s a lot of problems with this approach besides the one presented above. E.g. the MIR-building uses checks for `unsafe_op_in_unsafe_fn` lint to decide early whether or not `unsafe` blocks in an `unsafe fn` are redundant and ought to be removed. ```rs unsafe fn granular_disallow_op_in_unsafe_fn() { unsafe { #[deny(unsafe_op_in_unsafe_fn)] { unsf(); } } } ``` ``` error: call to unsafe function is unsafe and requires unsafe block (error E0133) --> src/main.rs:13:13 | 13 | unsf(); | ^^^^^^ call to unsafe function | note: the lint level is defined here --> src/main.rs:11:16 | 11 | #[deny(unsafe_op_in_unsafe_fn)] | ^^^^^^^^^^^^^^^^^^^^^^ = note: consult the function's documentation for information on how to avoid undefined behavior warning: unnecessary `unsafe` block --> src/main.rs:10:5 | 9 | unsafe fn granular_disallow_op_in_unsafe_fn() { | --------------------------------------------- because it's nested under this `unsafe` fn 10 | unsafe { | ^^^^^^ unnecessary `unsafe` block | = note: `#[warn(unused_unsafe)]` on by default ``` Here, the intermediate `unsafe` was ignored, even though it contains a unsafe operation that is not allowed to happen in an `unsafe fn` without an additional `unsafe` block. Also closures were problematic and the workaround/algorithms used on current nightly didn’t work properly. (I skipped trying to fully understand what it was supposed to do, because this PR uses a completely different approach.) ```rs fn nested() { unsafe { unsafe { unsf() } } } ``` ``` warning: unnecessary `unsafe` block --> src/main.rs:10:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block 10 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block | = note: `#[warn(unused_unsafe)]` on by default ``` vs ```rs fn nested() { let _ = || unsafe { let _ = || unsafe { unsf() }; }; } ``` ``` warning: unnecessary `unsafe` block --> src/main.rs:9:16 | 9 | let _ = || unsafe { | ^^^^^^ unnecessary `unsafe` block | = note: `#[warn(unused_unsafe)]` on by default warning: unnecessary `unsafe` block --> src/main.rs:10:20 | 10 | let _ = || unsafe { unsf() }; | ^^^^^^ unnecessary `unsafe` block ``` *note that this warning kind-of suggests that **both** unsafe blocks are redundant* -------------------------------------------------------------------------------- I also dislike the fact that it always suggests keeping the outermost `unsafe`. E.g. for ```rs fn granularity() { unsafe { unsafe { unsf() } unsafe { unsf() } unsafe { unsf() } } } ``` I prefer if `rustc` suggests removing the more-course outer-level `unsafe` instead of the fine-grained inner `unsafe` blocks, which it currently does on nightly: ``` warning: unnecessary `unsafe` block --> src/main.rs:10:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block 10 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block | = note: `#[warn(unused_unsafe)]` on by default warning: unnecessary `unsafe` block --> src/main.rs:11:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block 10 | unsafe { unsf() } 11 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block warning: unnecessary `unsafe` block --> src/main.rs:12:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block ... 12 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block ``` -------------------------------------------------------------------------------- Needless to say, this PR addresses all these points. For context, as far as my understanding goes, the main advantage of skipping inner unsafe blocks was that a test case like ```rs fn top_level_used() { unsafe { unsf(); unsafe { unsf() } unsafe { unsf() } unsafe { unsf() } } } ``` should generate some warning because there’s redundant nested `unsafe`, however every single `unsafe` block _does_ contain some statement that uses it. Of course this PR doesn’t aim change the warnings on this kind of code example, because the current behavior, warning on all the inner `unsafe` blocks, makes sense in this case. As mentioned, during MIR building all the unsafe blocks *are* kept now, and usage is attributed to them. The way to still generate a warning like ``` warning: unnecessary `unsafe` block --> src/main.rs:11:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block 10 | unsf(); 11 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block | = note: `#[warn(unused_unsafe)]` on by default warning: unnecessary `unsafe` block --> src/main.rs:12:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block ... 12 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block warning: unnecessary `unsafe` block --> src/main.rs:13:9 | 9 | unsafe { | ------ because it's nested under this `unsafe` block ... 13 | unsafe { unsf() } | ^^^^^^ unnecessary `unsafe` block ``` in this case is by emitting a `unused_unsafe` warning for all of the `unsafe` blocks that are _within a **used** unsafe block_. The previous code had a little HIR traversal already anyways to collect a set of all the unsafe blocks (in order to afterwards determine which ones are unused afterwards). This PR uses such a traversal to do additional things including logic like _always_ warn for an `unsafe` block that’s inside of another **used** unsafe block. The traversal is expanded to include nested closures in the same go, this simplifies a lot of things. The whole logic around `unsafe_op_in_unsafe_fn` is a little complicated, there’s some test cases of corner-cases in this PR. (The implementation involves differentiating between whether a used unsafe block was used exclusively by operations where `allow(unsafe_op_in_unsafe_fn)` was active.) The main goal was to make sure that code should compile successfully if all the `unused_unsafe`-warnings are addressed _simultaneously_ (by removing the respective `unsafe` blocks) no matter how complicated the patterns of `unsafe_op_in_unsafe_fn` being disallowed and allowed throughout the function are. -------------------------------------------------------------------------------- One noteworthy design decision I took here: An `unsafe` block with `allow(unused_unsafe)` **is considered used** for the purposes of linting about redundant contained unsafe blocks. So while ```rs fn granularity() { unsafe { //~ ERROR: unnecessary `unsafe` block unsafe { unsf() } unsafe { unsf() } unsafe { unsf() } } } ``` warns for the outer `unsafe` block, ```rs fn top_level_ignored() { #[allow(unused_unsafe)] unsafe { #[deny(unused_unsafe)] { unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block } } } ``` warns on the inner ones.
2022-02-03 21:16:06 +00:00
);
}
}
}
impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
2019-12-22 22:42:04 +00:00
fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
fn_abi.apply_attrs_callsite(self, callsite)
}
2018-12-04 19:20:45 +00:00
fn get_param(&mut self, index: usize) -> Self::Value {
2018-12-04 19:20:45 +00:00
llvm::get_param(self.llfn(), index as c_uint)
}
}