2022-02-21 16:19:16 +00:00
|
|
|
use crate::attributes;
|
2019-02-17 18:58:58 +00:00
|
|
|
use crate::builder::Builder;
|
|
|
|
use crate::context::CodegenCx;
|
2022-02-21 16:19:16 +00:00
|
|
|
use crate::llvm::{self, Attribute, AttributePlace};
|
2019-02-17 18:58:58 +00:00
|
|
|
use crate::type_::Type;
|
2019-05-17 01:20:14 +00:00
|
|
|
use crate::type_of::LayoutLlvmExt;
|
2019-02-17 18:58:58 +00:00
|
|
|
use crate::value::Value;
|
2019-05-17 01:20:14 +00:00
|
|
|
|
Separate immediate and in-memory ScalarPair representation
Currently, we assume that ScalarPair is always represented using
a two-element struct, both as an immediate value and when stored
in memory.
This currently works fairly well, but runs into problems with
https://github.com/rust-lang/rust/pull/116672, where a ScalarPair
involving an i128 type can no longer be represented as a two-element
struct in memory. For example, the tuple `(i32, i128)` needs to be
represented in-memory as `{ i32, [3 x i32], i128 }` to satisfy
alignment requirement. Using `{ i32, i128 }` instead will result in
the second element being stored at the wrong offset (prior to
LLVM 18).
Resolve this issue by no longer requiring that the immediate and
in-memory type for ScalarPair are the same. The in-memory type
will now look the same as for normal struct types (and will include
padding filler and similar), while the immediate type stays a
simple two-element struct type. This also means that booleans in
immediate ScalarPair are now represented as i1 rather than i8,
just like we do everywhere else.
The core change here is to llvm_type (which now treats ScalarPair
as a normal struct) and immediate_llvm_type (which returns the
two-element struct that llvm_type used to produce). The rest is
fixing things up to no longer assume these are the same. In
particular, this switches places that try to get pointers to the
ScalarPair elements to use byte-geps instead of struct-geps.
2023-12-15 11:14:39 +00:00
|
|
|
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
|
2024-04-11 06:08:34 +00:00
|
|
|
use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
|
2018-11-16 11:45:28 +00:00
|
|
|
use rustc_codegen_ssa::traits::*;
|
2018-10-03 14:56:24 +00:00
|
|
|
use rustc_codegen_ssa::MemFlags;
|
2020-03-29 15:19:48 +00:00
|
|
|
use rustc_middle::bug;
|
2021-08-30 14:38:27 +00:00
|
|
|
use rustc_middle::ty::layout::LayoutOf;
|
2020-03-31 16:16:47 +00:00
|
|
|
pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
|
2020-03-29 15:19:48 +00:00
|
|
|
use rustc_middle::ty::Ty;
|
2022-02-17 05:58:13 +00:00
|
|
|
use rustc_session::config;
|
2017-12-28 17:07:02 +00:00
|
|
|
pub use rustc_target::abi::call::*;
|
make CastTarget::size and CastTarget::llvm_type consistent, remove
special case that's not present in Clang
Making the methods consistent doesn't require much justification. It's
required for us to generate correct code.
The special case was present near the end of `CastTarget::llvm_type`, and
resulted in the final integer component of the ABI type being shrunk to
the smallest integer that fits.
You can see this in action here (https://godbolt.org/z/Pe73cr91d),
where, for a struct with 5 u16 elements, rustc generates
`{ i64, i16 }`, while Clang generates `[2 x i64]`.
This special case was added a long time ago, when the function was
originally written [1]. That commit consolidated logic from many
backends, and in some of the code it deleted, sparc64 [2] and
powerpc64 [3] had similar special cases.
However, looking at Clang today, it doesn't have this special case for
sparc64 (https://godbolt.org/z/YaafvYWdf) or powerpc64
(https://godbolt.org/z/5c3YePTje), so this change just removes it.
[1]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-183c4dadf10704bd1f521b71f71d89bf755c9603a93f894d66c03bb1effc6021R231
[2]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-2d8f87ea6db6d7f0a6fbeb1d5549adc07e93331278d951a1e051a40f92914436L163-L166
[3]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-88af4a9df9ead503a5c7774a0455d270dea3ba60e9b0ec1ce550b4c53d3bce3bL172-L175
2024-03-17 04:14:20 +00:00
|
|
|
use rustc_target::abi::{self, HasDataLayout, Int, Size};
|
2018-04-25 16:30:39 +00:00
|
|
|
pub use rustc_target::spec::abi::Abi;
|
2022-07-16 11:08:48 +00:00
|
|
|
use rustc_target::spec::SanitizerSet;
|
2016-11-16 22:36:08 +00:00
|
|
|
|
2020-03-31 16:16:47 +00:00
|
|
|
use libc::c_uint;
|
2022-02-21 16:19:16 +00:00
|
|
|
use smallvec::SmallVec;
|
2020-03-31 16:16:47 +00:00
|
|
|
|
make CastTarget::size and CastTarget::llvm_type consistent, remove
special case that's not present in Clang
Making the methods consistent doesn't require much justification. It's
required for us to generate correct code.
The special case was present near the end of `CastTarget::llvm_type`, and
resulted in the final integer component of the ABI type being shrunk to
the smallest integer that fits.
You can see this in action here (https://godbolt.org/z/Pe73cr91d),
where, for a struct with 5 u16 elements, rustc generates
`{ i64, i16 }`, while Clang generates `[2 x i64]`.
This special case was added a long time ago, when the function was
originally written [1]. That commit consolidated logic from many
backends, and in some of the code it deleted, sparc64 [2] and
powerpc64 [3] had similar special cases.
However, looking at Clang today, it doesn't have this special case for
sparc64 (https://godbolt.org/z/YaafvYWdf) or powerpc64
(https://godbolt.org/z/5c3YePTje), so this change just removes it.
[1]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-183c4dadf10704bd1f521b71f71d89bf755c9603a93f894d66c03bb1effc6021R231
[2]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-2d8f87ea6db6d7f0a6fbeb1d5549adc07e93331278d951a1e051a40f92914436L163-L166
[3]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-88af4a9df9ead503a5c7774a0455d270dea3ba60e9b0ec1ce550b4c53d3bce3bL172-L175
2024-03-17 04:14:20 +00:00
|
|
|
use std::cmp;
|
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
pub trait ArgAttributesExt {
|
2021-03-18 20:50:28 +00:00
|
|
|
fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
|
|
|
|
fn apply_attrs_to_callsite(
|
|
|
|
&self,
|
|
|
|
idx: AttributePlace,
|
|
|
|
cx: &CodegenCx<'_, '_>,
|
|
|
|
callsite: &Value,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2022-02-21 16:19:16 +00:00
|
|
|
const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] =
|
|
|
|
[(ArgAttribute::InReg, llvm::AttributeKind::InReg)];
|
2022-02-17 05:58:13 +00:00
|
|
|
|
2022-02-21 16:19:16 +00:00
|
|
|
const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 5] = [
|
|
|
|
(ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias),
|
|
|
|
(ArgAttribute::NoCapture, llvm::AttributeKind::NoCapture),
|
|
|
|
(ArgAttribute::NonNull, llvm::AttributeKind::NonNull),
|
|
|
|
(ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly),
|
|
|
|
(ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef),
|
2022-02-17 05:58:13 +00:00
|
|
|
];
|
|
|
|
|
2022-02-26 21:58:17 +00:00
|
|
|
fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> {
|
2022-02-21 16:19:16 +00:00
|
|
|
let mut regular = this.regular;
|
|
|
|
|
2022-02-26 21:58:17 +00:00
|
|
|
let mut attrs = SmallVec::new();
|
2022-02-21 16:19:16 +00:00
|
|
|
|
|
|
|
// ABI-affecting attributes must always be applied
|
|
|
|
for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES {
|
|
|
|
if regular.contains(attr) {
|
|
|
|
attrs.push(llattr.create_attr(cx.llcx));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if let Some(align) = this.pointee_align {
|
|
|
|
attrs.push(llvm::CreateAlignmentAttr(cx.llcx, align.bytes()));
|
|
|
|
}
|
|
|
|
match this.arg_ext {
|
|
|
|
ArgExtension::None => {}
|
|
|
|
ArgExtension::Zext => attrs.push(llvm::AttributeKind::ZExt.create_attr(cx.llcx)),
|
|
|
|
ArgExtension::Sext => attrs.push(llvm::AttributeKind::SExt.create_attr(cx.llcx)),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only apply remaining attributes when optimizing
|
|
|
|
if cx.sess().opts.optimize != config::OptLevel::No {
|
|
|
|
let deref = this.pointee_size.bytes();
|
|
|
|
if deref != 0 {
|
|
|
|
if regular.contains(ArgAttribute::NonNull) {
|
|
|
|
attrs.push(llvm::CreateDereferenceableAttr(cx.llcx, deref));
|
|
|
|
} else {
|
|
|
|
attrs.push(llvm::CreateDereferenceableOrNullAttr(cx.llcx, deref));
|
2017-10-03 07:45:07 +00:00
|
|
|
}
|
2022-02-21 16:19:16 +00:00
|
|
|
regular -= ArgAttribute::NonNull;
|
|
|
|
}
|
|
|
|
for (attr, llattr) in OPTIMIZATION_ATTRIBUTES {
|
|
|
|
if regular.contains(attr) {
|
|
|
|
attrs.push(llattr.create_attr(cx.llcx));
|
2021-03-18 20:50:28 +00:00
|
|
|
}
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
2022-07-16 11:08:48 +00:00
|
|
|
} else if cx.tcx.sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::MEMORY) {
|
|
|
|
// If we're not optimising, *but* memory sanitizer is on, emit noundef, since it affects
|
|
|
|
// memory sanitizer's behavior.
|
|
|
|
|
|
|
|
if regular.contains(ArgAttribute::NoUndef) {
|
|
|
|
attrs.push(llvm::AttributeKind::NoUndef.create_attr(cx.llcx));
|
|
|
|
}
|
2022-02-21 16:19:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
attrs
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ArgAttributesExt for ArgAttributes {
|
|
|
|
fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
|
|
|
|
let attrs = get_attrs(self, cx);
|
|
|
|
attributes::apply_to_llfn(llfn, idx, &attrs);
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
|
|
|
|
2021-03-18 20:50:28 +00:00
|
|
|
fn apply_attrs_to_callsite(
|
|
|
|
&self,
|
|
|
|
idx: AttributePlace,
|
|
|
|
cx: &CodegenCx<'_, '_>,
|
|
|
|
callsite: &Value,
|
|
|
|
) {
|
2022-02-21 16:19:16 +00:00
|
|
|
let attrs = get_attrs(self, cx);
|
|
|
|
attributes::apply_to_callsite(callsite, idx, &attrs);
|
2016-11-16 22:36:08 +00:00
|
|
|
}
|
|
|
|
}
|
2017-03-10 04:25:57 +00:00
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
pub trait LlvmType {
|
2021-12-14 18:49:49 +00:00
|
|
|
fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
impl LlvmType for Reg {
|
2021-12-14 18:49:49 +00:00
|
|
|
fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
|
2017-03-10 04:25:57 +00:00
|
|
|
match self.kind {
|
2018-09-06 20:52:15 +00:00
|
|
|
RegKind::Integer => cx.type_ix(self.size.bits()),
|
2017-03-10 04:25:57 +00:00
|
|
|
RegKind::Float => match self.size.bits() {
|
2018-09-06 20:52:15 +00:00
|
|
|
32 => cx.type_f32(),
|
|
|
|
64 => cx.type_f64(),
|
2017-03-10 04:25:57 +00:00
|
|
|
_ => bug!("unsupported float: {:?}", self),
|
|
|
|
},
|
2018-09-06 20:52:15 +00:00
|
|
|
RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-28 17:07:02 +00:00
|
|
|
impl LlvmType for CastTarget {
|
2021-12-14 18:49:49 +00:00
|
|
|
fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
|
2018-02-14 12:47:38 +00:00
|
|
|
let rest_ll_unit = self.rest.unit.llvm_type(cx);
|
make CastTarget::size and CastTarget::llvm_type consistent, remove
special case that's not present in Clang
Making the methods consistent doesn't require much justification. It's
required for us to generate correct code.
The special case was present near the end of `CastTarget::llvm_type`, and
resulted in the final integer component of the ABI type being shrunk to
the smallest integer that fits.
You can see this in action here (https://godbolt.org/z/Pe73cr91d),
where, for a struct with 5 u16 elements, rustc generates
`{ i64, i16 }`, while Clang generates `[2 x i64]`.
This special case was added a long time ago, when the function was
originally written [1]. That commit consolidated logic from many
backends, and in some of the code it deleted, sparc64 [2] and
powerpc64 [3] had similar special cases.
However, looking at Clang today, it doesn't have this special case for
sparc64 (https://godbolt.org/z/YaafvYWdf) or powerpc64
(https://godbolt.org/z/5c3YePTje), so this change just removes it.
[1]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-183c4dadf10704bd1f521b71f71d89bf755c9603a93f894d66c03bb1effc6021R231
[2]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-2d8f87ea6db6d7f0a6fbeb1d5549adc07e93331278d951a1e051a40f92914436L163-L166
[3]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-88af4a9df9ead503a5c7774a0455d270dea3ba60e9b0ec1ce550b4c53d3bce3bL172-L175
2024-03-17 04:14:20 +00:00
|
|
|
let rest_count = if self.rest.total == Size::ZERO {
|
|
|
|
0
|
2018-05-17 03:02:01 +00:00
|
|
|
} else {
|
make CastTarget::size and CastTarget::llvm_type consistent, remove
special case that's not present in Clang
Making the methods consistent doesn't require much justification. It's
required for us to generate correct code.
The special case was present near the end of `CastTarget::llvm_type`, and
resulted in the final integer component of the ABI type being shrunk to
the smallest integer that fits.
You can see this in action here (https://godbolt.org/z/Pe73cr91d),
where, for a struct with 5 u16 elements, rustc generates
`{ i64, i16 }`, while Clang generates `[2 x i64]`.
This special case was added a long time ago, when the function was
originally written [1]. That commit consolidated logic from many
backends, and in some of the code it deleted, sparc64 [2] and
powerpc64 [3] had similar special cases.
However, looking at Clang today, it doesn't have this special case for
sparc64 (https://godbolt.org/z/YaafvYWdf) or powerpc64
(https://godbolt.org/z/5c3YePTje), so this change just removes it.
[1]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-183c4dadf10704bd1f521b71f71d89bf755c9603a93f894d66c03bb1effc6021R231
[2]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-2d8f87ea6db6d7f0a6fbeb1d5549adc07e93331278d951a1e051a40f92914436L163-L166
[3]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-88af4a9df9ead503a5c7774a0455d270dea3ba60e9b0ec1ce550b4c53d3bce3bL172-L175
2024-03-17 04:14:20 +00:00
|
|
|
assert_ne!(
|
|
|
|
self.rest.unit.size,
|
|
|
|
Size::ZERO,
|
|
|
|
"total size {:?} cannot be divided into units of zero size",
|
|
|
|
self.rest.total
|
|
|
|
);
|
|
|
|
if self.rest.total.bytes() % self.rest.unit.size.bytes() != 0 {
|
|
|
|
assert_eq!(self.rest.unit.kind, RegKind::Integer, "only int regs can be split");
|
|
|
|
}
|
|
|
|
self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes())
|
2018-05-17 03:02:01 +00:00
|
|
|
};
|
2018-02-14 12:47:38 +00:00
|
|
|
|
make CastTarget::size and CastTarget::llvm_type consistent, remove
special case that's not present in Clang
Making the methods consistent doesn't require much justification. It's
required for us to generate correct code.
The special case was present near the end of `CastTarget::llvm_type`, and
resulted in the final integer component of the ABI type being shrunk to
the smallest integer that fits.
You can see this in action here (https://godbolt.org/z/Pe73cr91d),
where, for a struct with 5 u16 elements, rustc generates
`{ i64, i16 }`, while Clang generates `[2 x i64]`.
This special case was added a long time ago, when the function was
originally written [1]. That commit consolidated logic from many
backends, and in some of the code it deleted, sparc64 [2] and
powerpc64 [3] had similar special cases.
However, looking at Clang today, it doesn't have this special case for
sparc64 (https://godbolt.org/z/YaafvYWdf) or powerpc64
(https://godbolt.org/z/5c3YePTje), so this change just removes it.
[1]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-183c4dadf10704bd1f521b71f71d89bf755c9603a93f894d66c03bb1effc6021R231
[2]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-2d8f87ea6db6d7f0a6fbeb1d5549adc07e93331278d951a1e051a40f92914436L163-L166
[3]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-88af4a9df9ead503a5c7774a0455d270dea3ba60e9b0ec1ce550b4c53d3bce3bL172-L175
2024-03-17 04:14:20 +00:00
|
|
|
// Simplify to a single unit or an array if there's no prefix.
|
|
|
|
// This produces the same layout, but using a simpler type.
|
2018-02-14 12:47:38 +00:00
|
|
|
if self.prefix.iter().all(|x| x.is_none()) {
|
2024-03-21 15:10:23 +00:00
|
|
|
// We can't do this if is_consecutive is set and the unit would get
|
|
|
|
// split on the target. Currently, this is only relevant for i128
|
|
|
|
// registers.
|
|
|
|
if rest_count == 1 && (!self.rest.is_consecutive || self.rest.unit != Reg::i128()) {
|
2018-02-14 12:47:38 +00:00
|
|
|
return rest_ll_unit;
|
|
|
|
}
|
2018-02-06 17:11:27 +00:00
|
|
|
|
make CastTarget::size and CastTarget::llvm_type consistent, remove
special case that's not present in Clang
Making the methods consistent doesn't require much justification. It's
required for us to generate correct code.
The special case was present near the end of `CastTarget::llvm_type`, and
resulted in the final integer component of the ABI type being shrunk to
the smallest integer that fits.
You can see this in action here (https://godbolt.org/z/Pe73cr91d),
where, for a struct with 5 u16 elements, rustc generates
`{ i64, i16 }`, while Clang generates `[2 x i64]`.
This special case was added a long time ago, when the function was
originally written [1]. That commit consolidated logic from many
backends, and in some of the code it deleted, sparc64 [2] and
powerpc64 [3] had similar special cases.
However, looking at Clang today, it doesn't have this special case for
sparc64 (https://godbolt.org/z/YaafvYWdf) or powerpc64
(https://godbolt.org/z/5c3YePTje), so this change just removes it.
[1]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-183c4dadf10704bd1f521b71f71d89bf755c9603a93f894d66c03bb1effc6021R231
[2]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-2d8f87ea6db6d7f0a6fbeb1d5549adc07e93331278d951a1e051a40f92914436L163-L166
[3]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-88af4a9df9ead503a5c7774a0455d270dea3ba60e9b0ec1ce550b4c53d3bce3bL172-L175
2024-03-17 04:14:20 +00:00
|
|
|
return cx.type_array(rest_ll_unit, rest_count);
|
2018-02-14 12:47:38 +00:00
|
|
|
}
|
|
|
|
|
make CastTarget::size and CastTarget::llvm_type consistent, remove
special case that's not present in Clang
Making the methods consistent doesn't require much justification. It's
required for us to generate correct code.
The special case was present near the end of `CastTarget::llvm_type`, and
resulted in the final integer component of the ABI type being shrunk to
the smallest integer that fits.
You can see this in action here (https://godbolt.org/z/Pe73cr91d),
where, for a struct with 5 u16 elements, rustc generates
`{ i64, i16 }`, while Clang generates `[2 x i64]`.
This special case was added a long time ago, when the function was
originally written [1]. That commit consolidated logic from many
backends, and in some of the code it deleted, sparc64 [2] and
powerpc64 [3] had similar special cases.
However, looking at Clang today, it doesn't have this special case for
sparc64 (https://godbolt.org/z/YaafvYWdf) or powerpc64
(https://godbolt.org/z/5c3YePTje), so this change just removes it.
[1]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-183c4dadf10704bd1f521b71f71d89bf755c9603a93f894d66c03bb1effc6021R231
[2]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-2d8f87ea6db6d7f0a6fbeb1d5549adc07e93331278d951a1e051a40f92914436L163-L166
[3]: https://github.com/rust-lang/rust/commit/f0636b61c7f84962a609e831760db9d77f4f5e14#diff-88af4a9df9ead503a5c7774a0455d270dea3ba60e9b0ec1ce550b4c53d3bce3bL172-L175
2024-03-17 04:14:20 +00:00
|
|
|
// Generate a struct type with the prefix and the "rest" arguments.
|
|
|
|
let prefix_args =
|
|
|
|
self.prefix.iter().flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)));
|
|
|
|
let rest_args = (0..rest_count).map(|_| rest_ll_unit);
|
|
|
|
let args: Vec<_> = prefix_args.chain(rest_args).collect();
|
2018-09-06 20:52:15 +00:00
|
|
|
cx.type_struct(&args, false)
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
}
|
2016-11-16 22:36:08 +00:00
|
|
|
|
2019-10-29 17:17:16 +00:00
|
|
|
pub trait ArgAbiExt<'ll, 'tcx> {
|
2018-07-10 10:28:39 +00:00
|
|
|
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
|
2018-08-07 15:14:40 +00:00
|
|
|
fn store(
|
|
|
|
&self,
|
2018-10-05 13:08:49 +00:00
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-08-07 15:14:40 +00:00
|
|
|
val: &'ll Value,
|
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
|
|
|
);
|
2018-08-02 14:48:44 +00:00
|
|
|
fn store_fn_arg(
|
|
|
|
&self,
|
2018-10-05 13:08:49 +00:00
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-08-02 14:48:44 +00:00
|
|
|
idx: &mut usize,
|
2018-08-03 12:20:10 +00:00
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
2018-08-02 14:48:44 +00:00
|
|
|
);
|
2013-09-25 10:30:44 +00:00
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
|
2019-02-08 13:53:55 +00:00
|
|
|
/// Gets the LLVM type for a place of the original Rust type of
|
2018-11-27 02:59:49 +00:00
|
|
|
/// this argument/return, i.e., the result of `type_of::type_of`.
|
2018-07-10 10:28:39 +00:00
|
|
|
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
|
2018-01-05 05:04:08 +00:00
|
|
|
self.layout.llvm_type(cx)
|
2017-03-10 04:25:57 +00:00
|
|
|
}
|
|
|
|
|
2019-10-29 14:35:26 +00:00
|
|
|
/// Stores a direct/indirect value described by this ArgAbi into a
|
2017-12-01 12:39:51 +00:00
|
|
|
/// place for the original Rust type of this argument/return.
|
2016-03-06 11:23:20 +00:00
|
|
|
/// Can be used for both storing formal arguments into Rust variables
|
|
|
|
/// or results of call/invoke instructions into their destinations.
|
2018-08-07 15:14:40 +00:00
|
|
|
fn store(
|
|
|
|
&self,
|
2018-10-05 13:08:49 +00:00
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-08-07 15:14:40 +00:00
|
|
|
val: &'ll Value,
|
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
|
|
|
) {
|
2024-03-08 22:11:27 +00:00
|
|
|
match &self.mode {
|
|
|
|
PassMode::Ignore => {}
|
|
|
|
// Sized indirect arguments
|
|
|
|
PassMode::Indirect { attrs, meta_attrs: None, on_stack: _ } => {
|
|
|
|
let align = attrs.pointee_align.unwrap_or(self.layout.align.abi);
|
2024-04-11 06:08:34 +00:00
|
|
|
OperandValue::Ref(PlaceValue::new_sized(val, align)).store(bx, dst);
|
2024-03-08 22:11:27 +00:00
|
|
|
}
|
|
|
|
// Unsized indirect qrguments
|
|
|
|
PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
|
|
|
|
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
|
|
|
|
}
|
|
|
|
PassMode::Cast { cast, pad_i32: _ } => {
|
2024-03-17 04:22:35 +00:00
|
|
|
// The ABI mandates that the value is passed as a different struct representation.
|
|
|
|
// Spill and reload it from the stack to convert from the ABI representation to
|
|
|
|
// the Rust representation.
|
|
|
|
let scratch_size = cast.size(bx);
|
|
|
|
let scratch_align = cast.align(bx);
|
|
|
|
// Note that the ABI type may be either larger or smaller than the Rust type,
|
|
|
|
// due to the presence or absence of trailing padding. For example:
|
|
|
|
// - On some ABIs, the Rust layout { f64, f32, <f32 padding> } may omit padding
|
|
|
|
// when passed by value, making it smaller.
|
|
|
|
// - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes
|
|
|
|
// when passed by value, making it larger.
|
|
|
|
let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes());
|
|
|
|
// Allocate some scratch space...
|
|
|
|
let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
|
|
|
|
bx.lifetime_start(llscratch, scratch_size);
|
|
|
|
// ...store the value...
|
|
|
|
bx.store(val, llscratch, scratch_align);
|
|
|
|
// ... and then memcpy it to the intended destination.
|
|
|
|
bx.memcpy(
|
2024-04-11 05:07:21 +00:00
|
|
|
dst.val.llval,
|
2024-03-17 04:22:35 +00:00
|
|
|
self.layout.align.abi,
|
|
|
|
llscratch,
|
|
|
|
scratch_align,
|
|
|
|
bx.const_usize(copy_bytes),
|
|
|
|
MemFlags::empty(),
|
|
|
|
);
|
|
|
|
bx.lifetime_end(llscratch, scratch_size);
|
2024-03-08 22:11:27 +00:00
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst);
|
2016-03-06 11:23:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-03-06 14:30:21 +00:00
|
|
|
|
2018-08-02 14:48:44 +00:00
|
|
|
fn store_fn_arg(
|
|
|
|
&self,
|
2021-12-14 18:49:49 +00:00
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-08-02 14:48:44 +00:00
|
|
|
idx: &mut usize,
|
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
|
|
|
) {
|
2017-10-10 17:54:50 +00:00
|
|
|
let mut next = || {
|
2018-01-05 05:12:32 +00:00
|
|
|
let val = llvm::get_param(bx.llfn(), *idx as c_uint);
|
2017-10-10 17:54:50 +00:00
|
|
|
*idx += 1;
|
|
|
|
val
|
|
|
|
};
|
|
|
|
match self.mode {
|
2019-08-10 11:38:17 +00:00
|
|
|
PassMode::Ignore => {}
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Pair(..) => {
|
2018-01-05 05:12:32 +00:00
|
|
|
OperandValue::Pair(next(), next()).store(bx, dst);
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2023-09-08 06:48:41 +00:00
|
|
|
PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
|
2024-04-11 06:08:34 +00:00
|
|
|
let place_val = PlaceValue {
|
|
|
|
llval: next(),
|
|
|
|
llextra: Some(next()),
|
|
|
|
align: self.layout.align.abi,
|
|
|
|
};
|
|
|
|
OperandValue::Ref(place_val).store(bx, dst);
|
2018-05-28 15:12:55 +00:00
|
|
|
}
|
2020-11-14 13:29:40 +00:00
|
|
|
PassMode::Direct(_)
|
2023-09-08 06:48:41 +00:00
|
|
|
| PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
|
2023-09-07 20:06:37 +00:00
|
|
|
| PassMode::Cast { .. } => {
|
2019-02-27 20:32:12 +00:00
|
|
|
let next_arg = next();
|
|
|
|
self.store(bx, next_arg, dst);
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2016-03-06 14:30:21 +00:00
|
|
|
}
|
|
|
|
}
|
2013-01-25 22:56:56 +00:00
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'ll, 'tcx> ArgAbiMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
2018-09-20 13:47:22 +00:00
|
|
|
fn store_fn_arg(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2019-10-29 14:35:26 +00:00
|
|
|
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
|
2018-09-20 13:47:22 +00:00
|
|
|
idx: &mut usize,
|
|
|
|
dst: PlaceRef<'tcx, Self::Value>,
|
|
|
|
) {
|
2019-10-29 14:35:26 +00:00
|
|
|
arg_abi.store_fn_arg(self, idx, dst)
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|
2019-10-29 14:35:26 +00:00
|
|
|
fn store_arg(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2019-10-29 14:35:26 +00:00
|
|
|
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
|
2018-09-20 13:47:22 +00:00
|
|
|
val: &'ll Value,
|
|
|
|
dst: PlaceRef<'tcx, &'ll Value>,
|
|
|
|
) {
|
2019-10-29 14:35:26 +00:00
|
|
|
arg_abi.store(self, val, dst)
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|
2019-10-29 14:35:26 +00:00
|
|
|
fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
|
|
|
|
arg_abi.memory_ty(self)
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
pub trait FnAbiLlvmExt<'ll, 'tcx> {
|
2021-08-04 18:20:31 +00:00
|
|
|
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
|
2018-10-11 15:50:00 +00:00
|
|
|
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
|
2018-04-25 13:45:29 +00:00
|
|
|
fn llvm_cconv(&self) -> llvm::CallConv;
|
2019-07-06 19:52:25 +00:00
|
|
|
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
|
2021-12-14 18:49:49 +00:00
|
|
|
fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value);
|
2013-05-21 19:25:44 +00:00
|
|
|
}
|
2013-04-18 22:53:29 +00:00
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
2021-08-04 18:20:31 +00:00
|
|
|
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
|
|
|
|
// Ignore "extra" args from the call site for C variadic functions.
|
|
|
|
// Only the "fixed" args are part of the LLVM function signature.
|
2022-08-25 09:02:22 +00:00
|
|
|
let args =
|
|
|
|
if self.c_variadic { &self.args[..self.fixed_count as usize] } else { &self.args };
|
2021-08-03 22:09:57 +00:00
|
|
|
|
2022-08-26 00:37:51 +00:00
|
|
|
// This capacity calculation is approximate.
|
2018-07-16 17:35:45 +00:00
|
|
|
let mut llargument_tys = Vec::with_capacity(
|
2022-08-26 00:37:51 +00:00
|
|
|
self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 },
|
2018-07-16 17:35:45 +00:00
|
|
|
);
|
2016-02-23 19:55:19 +00:00
|
|
|
|
2022-08-25 07:52:37 +00:00
|
|
|
let llreturn_ty = match &self.ret.mode {
|
2019-08-10 11:38:17 +00:00
|
|
|
PassMode::Ignore => cx.type_void(),
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
|
2023-09-07 20:06:37 +00:00
|
|
|
PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx),
|
2020-11-14 13:29:40 +00:00
|
|
|
PassMode::Indirect { .. } => {
|
2022-12-06 05:07:28 +00:00
|
|
|
llargument_tys.push(cx.type_ptr());
|
2018-09-06 20:52:15 +00:00
|
|
|
cx.type_void()
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2016-02-23 19:55:19 +00:00
|
|
|
};
|
|
|
|
|
2021-08-03 22:09:57 +00:00
|
|
|
for arg in args {
|
2023-09-06 09:12:23 +00:00
|
|
|
// Note that the exact number of arguments pushed here is carefully synchronized with
|
|
|
|
// code all over the place, both in the codegen_llvm and codegen_ssa crates. That's how
|
|
|
|
// other code then knows which LLVM argument(s) correspond to the n-th Rust argument.
|
2022-08-25 07:52:37 +00:00
|
|
|
let llarg_ty = match &arg.mode {
|
2019-08-10 11:38:17 +00:00
|
|
|
PassMode::Ignore => continue,
|
2023-09-06 09:12:23 +00:00
|
|
|
PassMode::Direct(_) => {
|
|
|
|
// ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
|
|
|
|
// and for Scalar ABIs the LLVM type is fully determined by `layout.abi`,
|
2023-10-29 11:24:32 +00:00
|
|
|
// guaranteeing that we generate ABI-compatible LLVM IR.
|
2023-09-06 09:12:23 +00:00
|
|
|
arg.layout.immediate_llvm_type(cx)
|
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
PassMode::Pair(..) => {
|
2023-09-06 09:12:23 +00:00
|
|
|
// ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
|
|
|
|
// so for ScalarPair we can easily be sure that we are generating ABI-compatible
|
|
|
|
// LLVM IR.
|
Store scalar pair bools as i8 in memory
We represent `bool` as `i1` in a `ScalarPair`, unlike other aggregates,
to optimize IR for checked operators and the like. With this patch, we
still do so when the pair is an immediate value, but we use the `i8`
memory type when the value is loaded or stored as an LLVM aggregate.
So `(bool, bool)` looks like an `{ i1, i1 }` immediate, but `{ i8, i8 }`
in memory. When a pair is a direct function argument, `PassMode::Pair`,
it is still passed using the immediate `i1` type, but as a return value
it will use the `i8` memory type. Also, `bool`-like` enum tags will now
use scalar pairs when possible, where they were previously excluded due
to optimization issues.
2018-06-15 22:47:54 +00:00
|
|
|
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
|
|
|
|
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
|
2017-09-20 02:16:06 +00:00
|
|
|
continue;
|
|
|
|
}
|
2023-10-29 11:24:32 +00:00
|
|
|
PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
|
2023-09-06 09:12:23 +00:00
|
|
|
// Construct the type of a (wide) pointer to `ty`, and pass its two fields.
|
|
|
|
// Any two ABI-compatible unsized types have the same metadata type and
|
|
|
|
// moreover the same metadata value leads to the same dynamic size and
|
|
|
|
// alignment, so this respects ABI compatibility.
|
2023-07-05 19:13:26 +00:00
|
|
|
let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
|
2018-05-28 15:12:55 +00:00
|
|
|
let ptr_layout = cx.layout_of(ptr_ty);
|
|
|
|
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
|
|
|
|
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
|
|
|
|
continue;
|
|
|
|
}
|
2023-10-29 11:24:32 +00:00
|
|
|
PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => cx.type_ptr(),
|
2023-09-07 20:06:37 +00:00
|
|
|
PassMode::Cast { cast, pad_i32 } => {
|
2022-08-25 12:19:38 +00:00
|
|
|
// add padding
|
|
|
|
if *pad_i32 {
|
|
|
|
llargument_tys.push(Reg::i32().llvm_type(cx));
|
|
|
|
}
|
2023-09-06 09:12:23 +00:00
|
|
|
// Compute the LLVM type we use for this function from the cast type.
|
|
|
|
// We assume here that ABI-compatible Rust types have the same cast type.
|
2022-08-25 12:19:38 +00:00
|
|
|
cast.llvm_type(cx)
|
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
};
|
|
|
|
llargument_tys.push(llarg_ty);
|
2016-02-23 19:55:19 +00:00
|
|
|
}
|
|
|
|
|
2019-02-08 17:30:42 +00:00
|
|
|
if self.c_variadic {
|
2018-09-06 20:52:15 +00:00
|
|
|
cx.type_variadic_func(&llargument_tys, llreturn_ty)
|
2016-02-23 19:55:19 +00:00
|
|
|
} else {
|
2018-09-06 20:52:15 +00:00
|
|
|
cx.type_func(&llargument_tys, llreturn_ty)
|
2016-02-23 19:55:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-11 15:50:00 +00:00
|
|
|
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
|
2022-12-06 05:07:28 +00:00
|
|
|
cx.type_ptr_ext(cx.data_layout().instruction_address_space)
|
2018-10-11 15:50:00 +00:00
|
|
|
}
|
|
|
|
|
2018-04-25 13:45:29 +00:00
|
|
|
fn llvm_cconv(&self) -> llvm::CallConv {
|
2022-11-05 09:06:38 +00:00
|
|
|
self.conv.into()
|
2018-04-25 13:45:29 +00:00
|
|
|
}
|
|
|
|
|
2019-07-06 19:52:25 +00:00
|
|
|
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
|
feat: `riscv-interrupt-{m,s}` calling conventions
Similar to prior support added for the mips430, avr, and x86 targets
this change implements the rough equivalent of clang's
[`__attribute__((interrupt))`][clang-attr] for riscv targets, enabling
e.g.
```rust
static mut CNT: usize = 0;
pub extern "riscv-interrupt-m" fn isr_m() {
unsafe {
CNT += 1;
}
}
```
to produce highly effective assembly like:
```asm
pub extern "riscv-interrupt-m" fn isr_m() {
420003a0: 1141 addi sp,sp,-16
unsafe {
CNT += 1;
420003a2: c62a sw a0,12(sp)
420003a4: c42e sw a1,8(sp)
420003a6: 3fc80537 lui a0,0x3fc80
420003aa: 63c52583 lw a1,1596(a0) # 3fc8063c <_ZN12esp_riscv_rt3CNT17hcec3e3a214887d53E.0>
420003ae: 0585 addi a1,a1,1
420003b0: 62b52e23 sw a1,1596(a0)
}
}
420003b4: 4532 lw a0,12(sp)
420003b6: 45a2 lw a1,8(sp)
420003b8: 0141 addi sp,sp,16
420003ba: 30200073 mret
```
(disassembly via `riscv64-unknown-elf-objdump -C -S --disassemble ./esp32c3-hal/target/riscv32imc-unknown-none-elf/release/examples/gpio_interrupt`)
This outcome is superior to hand-coded interrupt routines which, lacking
visibility into any non-assembly body of the interrupt handler, have to
be very conservative and save the [entire CPU state to the stack
frame][full-frame-save]. By instead asking LLVM to only save the
registers that it uses, we defer the decision to the tool with the best
context: it can more accurately account for the cost of spills if it
knows that every additional register used is already at the cost of an
implicit spill.
At the LLVM level, this is apparently [implemented by] marking every
register as "[callee-save]," matching the semantics of an interrupt
handler nicely (it has to leave the CPU state just as it found it after
its `{m|s}ret`).
This approach is not suitable for every interrupt handler, as it makes
no attempt to e.g. save the state in a user-accessible stack frame. For
a full discussion of those challenges and tradeoffs, please refer to
[the interrupt calling conventions RFC][rfc].
Inside rustc, this implementation differs from prior art because LLVM
does not expose the "all-saved" function flavor as a calling convention
directly, instead preferring to use an attribute that allows for
differentiating between "machine-mode" and "superivsor-mode" interrupts.
Finally, some effort has been made to guide those who may not yet be
aware of the differences between machine-mode and supervisor-mode
interrupts as to why no `riscv-interrupt` calling convention is exposed
through rustc, and similarly for why `riscv-interrupt-u` makes no
appearance (as it would complicate future LLVM upgrades).
[clang-attr]: https://clang.llvm.org/docs/AttributeReference.html#interrupt-risc-v
[full-frame-save]: https://github.com/esp-rs/esp-riscv-rt/blob/9281af2ecffe13e40992917316f36920c26acaf3/src/lib.rs#L440-L469
[implemented by]: https://github.com/llvm/llvm-project/blob/b7fb2a3fec7c187d58a6d338ab512d9173bca987/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp#L61-L67
[callee-save]: https://github.com/llvm/llvm-project/blob/973f1fe7a8591c7af148e573491ab68cc15b6ecf/llvm/lib/Target/RISCV/RISCVCallingConv.td#L30-L37
[rfc]: https://github.com/rust-lang/rfcs/pull/3246
2023-05-23 22:08:23 +00:00
|
|
|
let mut func_attrs = SmallVec::<[_; 3]>::new();
|
2019-10-29 17:46:18 +00:00
|
|
|
if self.ret.layout.abi.is_uninhabited() {
|
2022-02-21 16:19:16 +00:00
|
|
|
func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
|
2019-10-29 17:46:18 +00:00
|
|
|
}
|
2020-03-31 12:27:09 +00:00
|
|
|
if !self.can_unwind {
|
2022-02-21 16:19:16 +00:00
|
|
|
func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
|
2020-03-31 12:27:09 +00:00
|
|
|
}
|
feat: `riscv-interrupt-{m,s}` calling conventions
Similar to prior support added for the mips430, avr, and x86 targets
this change implements the rough equivalent of clang's
[`__attribute__((interrupt))`][clang-attr] for riscv targets, enabling
e.g.
```rust
static mut CNT: usize = 0;
pub extern "riscv-interrupt-m" fn isr_m() {
unsafe {
CNT += 1;
}
}
```
to produce highly effective assembly like:
```asm
pub extern "riscv-interrupt-m" fn isr_m() {
420003a0: 1141 addi sp,sp,-16
unsafe {
CNT += 1;
420003a2: c62a sw a0,12(sp)
420003a4: c42e sw a1,8(sp)
420003a6: 3fc80537 lui a0,0x3fc80
420003aa: 63c52583 lw a1,1596(a0) # 3fc8063c <_ZN12esp_riscv_rt3CNT17hcec3e3a214887d53E.0>
420003ae: 0585 addi a1,a1,1
420003b0: 62b52e23 sw a1,1596(a0)
}
}
420003b4: 4532 lw a0,12(sp)
420003b6: 45a2 lw a1,8(sp)
420003b8: 0141 addi sp,sp,16
420003ba: 30200073 mret
```
(disassembly via `riscv64-unknown-elf-objdump -C -S --disassemble ./esp32c3-hal/target/riscv32imc-unknown-none-elf/release/examples/gpio_interrupt`)
This outcome is superior to hand-coded interrupt routines which, lacking
visibility into any non-assembly body of the interrupt handler, have to
be very conservative and save the [entire CPU state to the stack
frame][full-frame-save]. By instead asking LLVM to only save the
registers that it uses, we defer the decision to the tool with the best
context: it can more accurately account for the cost of spills if it
knows that every additional register used is already at the cost of an
implicit spill.
At the LLVM level, this is apparently [implemented by] marking every
register as "[callee-save]," matching the semantics of an interrupt
handler nicely (it has to leave the CPU state just as it found it after
its `{m|s}ret`).
This approach is not suitable for every interrupt handler, as it makes
no attempt to e.g. save the state in a user-accessible stack frame. For
a full discussion of those challenges and tradeoffs, please refer to
[the interrupt calling conventions RFC][rfc].
Inside rustc, this implementation differs from prior art because LLVM
does not expose the "all-saved" function flavor as a calling convention
directly, instead preferring to use an attribute that allows for
differentiating between "machine-mode" and "superivsor-mode" interrupts.
Finally, some effort has been made to guide those who may not yet be
aware of the differences between machine-mode and supervisor-mode
interrupts as to why no `riscv-interrupt` calling convention is exposed
through rustc, and similarly for why `riscv-interrupt-u` makes no
appearance (as it would complicate future LLVM upgrades).
[clang-attr]: https://clang.llvm.org/docs/AttributeReference.html#interrupt-risc-v
[full-frame-save]: https://github.com/esp-rs/esp-riscv-rt/blob/9281af2ecffe13e40992917316f36920c26acaf3/src/lib.rs#L440-L469
[implemented by]: https://github.com/llvm/llvm-project/blob/b7fb2a3fec7c187d58a6d338ab512d9173bca987/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp#L61-L67
[callee-save]: https://github.com/llvm/llvm-project/blob/973f1fe7a8591c7af148e573491ab68cc15b6ecf/llvm/lib/Target/RISCV/RISCVCallingConv.td#L30-L37
[rfc]: https://github.com/rust-lang/rfcs/pull/3246
2023-05-23 22:08:23 +00:00
|
|
|
if let Conv::RiscvInterrupt { kind } = self.conv {
|
|
|
|
func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", kind.as_str()));
|
|
|
|
}
|
2022-02-21 16:19:16 +00:00
|
|
|
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
|
2020-03-31 12:27:09 +00:00
|
|
|
|
2017-10-10 17:54:50 +00:00
|
|
|
let mut i = 0;
|
2020-11-14 13:29:40 +00:00
|
|
|
let mut apply = |attrs: &ArgAttributes| {
|
2021-03-18 20:50:28 +00:00
|
|
|
attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn);
|
2017-10-10 17:54:50 +00:00
|
|
|
i += 1;
|
2020-11-14 13:29:40 +00:00
|
|
|
i - 1
|
2017-09-20 02:16:06 +00:00
|
|
|
};
|
2022-08-25 07:52:37 +00:00
|
|
|
match &self.ret.mode {
|
|
|
|
PassMode::Direct(attrs) => {
|
2021-03-18 20:50:28 +00:00
|
|
|
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
|
2020-11-14 13:29:40 +00:00
|
|
|
}
|
2023-09-08 06:48:41 +00:00
|
|
|
PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
|
2020-11-14 13:29:40 +00:00
|
|
|
assert!(!on_stack);
|
2020-11-14 18:16:43 +00:00
|
|
|
let i = apply(attrs);
|
2024-02-25 05:43:03 +00:00
|
|
|
let sret = llvm::CreateStructRetAttr(
|
|
|
|
cx.llcx,
|
|
|
|
cx.type_array(cx.type_i8(), self.ret.layout.size.bytes()),
|
|
|
|
);
|
2022-02-21 16:19:16 +00:00
|
|
|
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2023-09-07 20:06:37 +00:00
|
|
|
PassMode::Cast { cast, pad_i32: _ } => {
|
2021-12-01 09:03:45 +00:00
|
|
|
cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
|
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
_ => {}
|
|
|
|
}
|
2022-08-25 09:08:04 +00:00
|
|
|
for arg in self.args.iter() {
|
2022-08-25 07:52:37 +00:00
|
|
|
match &arg.mode {
|
2019-08-10 11:38:17 +00:00
|
|
|
PassMode::Ignore => {}
|
2023-09-08 06:48:41 +00:00
|
|
|
PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
|
2020-11-14 13:29:40 +00:00
|
|
|
let i = apply(attrs);
|
2024-02-25 05:43:03 +00:00
|
|
|
let byval = llvm::CreateByValAttr(
|
|
|
|
cx.llcx,
|
|
|
|
cx.type_array(cx.type_i8(), arg.layout.size.bytes()),
|
|
|
|
);
|
2022-02-21 16:19:16 +00:00
|
|
|
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
|
2020-11-14 13:29:40 +00:00
|
|
|
}
|
2022-08-25 07:52:37 +00:00
|
|
|
PassMode::Direct(attrs)
|
2023-09-08 06:48:41 +00:00
|
|
|
| PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
|
2020-11-14 13:29:40 +00:00
|
|
|
apply(attrs);
|
2019-12-22 22:42:04 +00:00
|
|
|
}
|
2023-09-08 06:48:41 +00:00
|
|
|
PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
|
2020-11-14 13:29:40 +00:00
|
|
|
assert!(!on_stack);
|
|
|
|
apply(attrs);
|
2023-09-08 06:48:41 +00:00
|
|
|
apply(meta_attrs);
|
2018-05-28 15:12:55 +00:00
|
|
|
}
|
2022-08-25 07:52:37 +00:00
|
|
|
PassMode::Pair(a, b) => {
|
2020-11-14 13:29:40 +00:00
|
|
|
apply(a);
|
|
|
|
apply(b);
|
|
|
|
}
|
2023-09-07 20:06:37 +00:00
|
|
|
PassMode::Cast { cast, pad_i32 } => {
|
2022-08-25 12:19:38 +00:00
|
|
|
if *pad_i32 {
|
|
|
|
apply(&ArgAttributes::new());
|
|
|
|
}
|
2021-12-01 09:03:45 +00:00
|
|
|
apply(&cast.attrs);
|
2017-09-20 02:16:06 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-25 23:10:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) {
|
2022-02-21 16:19:16 +00:00
|
|
|
let mut func_attrs = SmallVec::<[_; 2]>::new();
|
2021-09-06 11:10:11 +00:00
|
|
|
if self.ret.layout.abi.is_uninhabited() {
|
2022-02-21 16:19:16 +00:00
|
|
|
func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx));
|
2021-09-06 11:10:11 +00:00
|
|
|
}
|
|
|
|
if !self.can_unwind {
|
2022-02-21 16:19:16 +00:00
|
|
|
func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(bx.cx.llcx));
|
2021-09-06 11:10:11 +00:00
|
|
|
}
|
2022-02-21 16:19:16 +00:00
|
|
|
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &{ func_attrs });
|
2020-03-31 12:27:09 +00:00
|
|
|
|
2017-10-10 17:54:50 +00:00
|
|
|
let mut i = 0;
|
2021-03-18 20:50:28 +00:00
|
|
|
let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
|
|
|
|
attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite);
|
2017-10-10 17:54:50 +00:00
|
|
|
i += 1;
|
2020-11-14 13:29:40 +00:00
|
|
|
i - 1
|
2017-09-20 02:16:06 +00:00
|
|
|
};
|
2022-08-25 07:52:37 +00:00
|
|
|
match &self.ret.mode {
|
|
|
|
PassMode::Direct(attrs) => {
|
2021-09-30 17:38:50 +00:00
|
|
|
attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
|
2020-11-14 13:29:40 +00:00
|
|
|
}
|
2023-09-08 06:48:41 +00:00
|
|
|
PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
|
2020-11-14 13:29:40 +00:00
|
|
|
assert!(!on_stack);
|
2021-03-18 20:50:28 +00:00
|
|
|
let i = apply(bx.cx, attrs);
|
2024-02-25 05:43:03 +00:00
|
|
|
let sret = llvm::CreateStructRetAttr(
|
|
|
|
bx.cx.llcx,
|
|
|
|
bx.cx.type_array(bx.cx.type_i8(), self.ret.layout.size.bytes()),
|
|
|
|
);
|
2022-02-21 16:19:16 +00:00
|
|
|
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
|
2017-10-10 17:54:50 +00:00
|
|
|
}
|
2023-09-07 20:06:37 +00:00
|
|
|
PassMode::Cast { cast, pad_i32: _ } => {
|
2021-12-01 09:03:45 +00:00
|
|
|
cast.attrs.apply_attrs_to_callsite(
|
|
|
|
llvm::AttributePlace::ReturnValue,
|
2023-11-21 19:07:32 +00:00
|
|
|
bx.cx,
|
2021-12-01 09:03:45 +00:00
|
|
|
callsite,
|
|
|
|
);
|
|
|
|
}
|
2017-10-10 17:54:50 +00:00
|
|
|
_ => {}
|
|
|
|
}
|
2021-08-29 09:06:55 +00:00
|
|
|
if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
|
2018-04-22 16:40:54 +00:00
|
|
|
// If the value is a boolean, the range is 0..2 and that ultimately
|
|
|
|
// become 0..0 when the type becomes i1, which would be rejected
|
|
|
|
// by the LLVM verifier.
|
2022-03-03 12:02:12 +00:00
|
|
|
if let Int(..) = scalar.primitive() {
|
2021-09-07 18:51:09 +00:00
|
|
|
if !scalar.is_bool() && !scalar.is_always_valid(bx) {
|
2022-03-03 12:02:12 +00:00
|
|
|
bx.range_metadata(callsite, scalar.valid_range(bx));
|
2018-04-22 16:40:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-08-25 09:08:04 +00:00
|
|
|
for arg in self.args.iter() {
|
2022-08-25 07:52:37 +00:00
|
|
|
match &arg.mode {
|
2019-08-10 11:38:17 +00:00
|
|
|
PassMode::Ignore => {}
|
2023-09-08 06:48:41 +00:00
|
|
|
PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
|
2021-03-18 20:50:28 +00:00
|
|
|
let i = apply(bx.cx, attrs);
|
2024-02-25 05:43:03 +00:00
|
|
|
let byval = llvm::CreateByValAttr(
|
|
|
|
bx.cx.llcx,
|
|
|
|
bx.cx.type_array(bx.cx.type_i8(), arg.layout.size.bytes()),
|
|
|
|
);
|
2022-02-21 16:19:16 +00:00
|
|
|
attributes::apply_to_callsite(
|
|
|
|
callsite,
|
|
|
|
llvm::AttributePlace::Argument(i),
|
|
|
|
&[byval],
|
|
|
|
);
|
2019-12-22 22:42:04 +00:00
|
|
|
}
|
2022-08-25 07:52:37 +00:00
|
|
|
PassMode::Direct(attrs)
|
2023-09-08 06:48:41 +00:00
|
|
|
| PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
|
2021-03-18 20:50:28 +00:00
|
|
|
apply(bx.cx, attrs);
|
2020-11-14 13:29:40 +00:00
|
|
|
}
|
2023-09-08 06:48:41 +00:00
|
|
|
PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => {
|
2021-03-18 20:50:28 +00:00
|
|
|
apply(bx.cx, attrs);
|
2023-09-08 06:48:41 +00:00
|
|
|
apply(bx.cx, meta_attrs);
|
2018-05-28 15:12:55 +00:00
|
|
|
}
|
2022-08-25 07:52:37 +00:00
|
|
|
PassMode::Pair(a, b) => {
|
2021-03-18 20:50:28 +00:00
|
|
|
apply(bx.cx, a);
|
|
|
|
apply(bx.cx, b);
|
2020-11-14 13:29:40 +00:00
|
|
|
}
|
2023-09-07 20:06:37 +00:00
|
|
|
PassMode::Cast { cast, pad_i32 } => {
|
2022-08-25 12:19:38 +00:00
|
|
|
if *pad_i32 {
|
|
|
|
apply(bx.cx, &ArgAttributes::new());
|
|
|
|
}
|
2021-12-01 09:03:45 +00:00
|
|
|
apply(bx.cx, &cast.attrs);
|
2017-09-20 02:16:06 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-23 19:55:19 +00:00
|
|
|
}
|
2016-03-06 10:34:31 +00:00
|
|
|
|
2018-04-25 13:45:29 +00:00
|
|
|
let cconv = self.llvm_cconv();
|
|
|
|
if cconv != llvm::CCallConv {
|
|
|
|
llvm::SetInstructionCallConv(callsite, cconv);
|
2016-03-06 10:34:31 +00:00
|
|
|
}
|
2021-01-24 17:15:05 +00:00
|
|
|
|
|
|
|
if self.conv == Conv::CCmseNonSecureCall {
|
|
|
|
// This will probably get ignored on all targets but those supporting the TrustZone-M
|
|
|
|
// extension (thumbv8m targets).
|
2022-03-03 00:00:00 +00:00
|
|
|
let cmse_nonsecure_call = llvm::CreateAttrString(bx.cx.llcx, "cmse_nonsecure_call");
|
2022-02-21 16:19:16 +00:00
|
|
|
attributes::apply_to_callsite(
|
Improve `unused_unsafe` lint
Main motivation: Fixes some issues with the current behavior. This PR is
more-or-less completely re-implementing the unused_unsafe lint; it’s also only
done in the MIR-version of the lint, the set of tests for the `-Zthir-unsafeck`
version no longer succeeds (and is thus disabled, see `lint-unused-unsafe.rs`).
On current nightly,
```rs
unsafe fn unsf() {}
fn inner_ignored() {
unsafe {
#[allow(unused_unsafe)]
unsafe {
unsf()
}
}
}
```
doesn’t create any warnings. This situation is not unrealistic to come by, the
inner `unsafe` block could e.g. come from a macro. Actually, this PR even
includes removal of one unused `unsafe` in the standard library that was missed
in a similar situation. (The inner `unsafe` coming from an external macro hides
the warning, too.)
The reason behind this problem is how the check currently works:
* While generating MIR, it already skips nested unsafe blocks (i.e. unsafe
nested in other unsafe) so that the inner one is always the one considered
unused
* To differentiate the cases of no unsafe operations inside the `unsafe` vs.
a surrounding `unsafe` block, there’s some ad-hoc magic walking up the HIR to
look for surrounding used `unsafe` blocks.
There’s a lot of problems with this approach besides the one presented above.
E.g. the MIR-building uses checks for `unsafe_op_in_unsafe_fn` lint to decide
early whether or not `unsafe` blocks in an `unsafe fn` are redundant and ought
to be removed.
```rs
unsafe fn granular_disallow_op_in_unsafe_fn() {
unsafe {
#[deny(unsafe_op_in_unsafe_fn)]
{
unsf();
}
}
}
```
```
error: call to unsafe function is unsafe and requires unsafe block (error E0133)
--> src/main.rs:13:13
|
13 | unsf();
| ^^^^^^ call to unsafe function
|
note: the lint level is defined here
--> src/main.rs:11:16
|
11 | #[deny(unsafe_op_in_unsafe_fn)]
| ^^^^^^^^^^^^^^^^^^^^^^
= note: consult the function's documentation for information on how to avoid undefined behavior
warning: unnecessary `unsafe` block
--> src/main.rs:10:5
|
9 | unsafe fn granular_disallow_op_in_unsafe_fn() {
| --------------------------------------------- because it's nested under this `unsafe` fn
10 | unsafe {
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
```
Here, the intermediate `unsafe` was ignored, even though it contains a unsafe
operation that is not allowed to happen in an `unsafe fn` without an additional `unsafe` block.
Also closures were problematic and the workaround/algorithms used on current
nightly didn’t work properly. (I skipped trying to fully understand what it was
supposed to do, because this PR uses a completely different approach.)
```rs
fn nested() {
unsafe {
unsafe { unsf() }
}
}
```
```
warning: unnecessary `unsafe` block
--> src/main.rs:10:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
```
vs
```rs
fn nested() {
let _ = || unsafe {
let _ = || unsafe { unsf() };
};
}
```
```
warning: unnecessary `unsafe` block
--> src/main.rs:9:16
|
9 | let _ = || unsafe {
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:10:20
|
10 | let _ = || unsafe { unsf() };
| ^^^^^^ unnecessary `unsafe` block
```
*note that this warning kind-of suggests that **both** unsafe blocks are redundant*
--------------------------------------------------------------------------------
I also dislike the fact that it always suggests keeping the outermost `unsafe`.
E.g. for
```rs
fn granularity() {
unsafe {
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
I prefer if `rustc` suggests removing the more-course outer-level `unsafe`
instead of the fine-grained inner `unsafe` blocks, which it currently does on nightly:
```
warning: unnecessary `unsafe` block
--> src/main.rs:10:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:11:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
11 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
warning: unnecessary `unsafe` block
--> src/main.rs:12:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
12 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
```
--------------------------------------------------------------------------------
Needless to say, this PR addresses all these points. For context, as far as my
understanding goes, the main advantage of skipping inner unsafe blocks was that
a test case like
```rs
fn top_level_used() {
unsafe {
unsf();
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
should generate some warning because there’s redundant nested `unsafe`, however
every single `unsafe` block _does_ contain some statement that uses it. Of course
this PR doesn’t aim change the warnings on this kind of code example, because
the current behavior, warning on all the inner `unsafe` blocks, makes sense in this case.
As mentioned, during MIR building all the unsafe blocks *are* kept now, and usage
is attributed to them. The way to still generate a warning like
```
warning: unnecessary `unsafe` block
--> src/main.rs:11:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsf();
11 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:12:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
12 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
warning: unnecessary `unsafe` block
--> src/main.rs:13:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
13 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
```
in this case is by emitting a `unused_unsafe` warning for all of the `unsafe`
blocks that are _within a **used** unsafe block_.
The previous code had a little HIR traversal already anyways to collect a set of
all the unsafe blocks (in order to afterwards determine which ones are unused
afterwards). This PR uses such a traversal to do additional things including logic
like _always_ warn for an `unsafe` block that’s inside of another **used**
unsafe block. The traversal is expanded to include nested closures in the same go,
this simplifies a lot of things.
The whole logic around `unsafe_op_in_unsafe_fn` is a little complicated, there’s
some test cases of corner-cases in this PR. (The implementation involves
differentiating between whether a used unsafe block was used exclusively by
operations where `allow(unsafe_op_in_unsafe_fn)` was active.) The main goal was
to make sure that code should compile successfully if all the `unused_unsafe`-warnings
are addressed _simultaneously_ (by removing the respective `unsafe` blocks)
no matter how complicated the patterns of `unsafe_op_in_unsafe_fn` being
disallowed and allowed throughout the function are.
--------------------------------------------------------------------------------
One noteworthy design decision I took here: An `unsafe` block
with `allow(unused_unsafe)` **is considered used** for the purposes of
linting about redundant contained unsafe blocks. So while
```rs
fn granularity() {
unsafe { //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
warns for the outer `unsafe` block,
```rs
fn top_level_ignored() {
#[allow(unused_unsafe)]
unsafe {
#[deny(unused_unsafe)]
{
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
}
}
}
```
warns on the inner ones.
2022-02-03 21:16:06 +00:00
|
|
|
callsite,
|
|
|
|
llvm::AttributePlace::Function,
|
2022-02-21 16:19:16 +00:00
|
|
|
&[cmse_nonsecure_call],
|
Improve `unused_unsafe` lint
Main motivation: Fixes some issues with the current behavior. This PR is
more-or-less completely re-implementing the unused_unsafe lint; it’s also only
done in the MIR-version of the lint, the set of tests for the `-Zthir-unsafeck`
version no longer succeeds (and is thus disabled, see `lint-unused-unsafe.rs`).
On current nightly,
```rs
unsafe fn unsf() {}
fn inner_ignored() {
unsafe {
#[allow(unused_unsafe)]
unsafe {
unsf()
}
}
}
```
doesn’t create any warnings. This situation is not unrealistic to come by, the
inner `unsafe` block could e.g. come from a macro. Actually, this PR even
includes removal of one unused `unsafe` in the standard library that was missed
in a similar situation. (The inner `unsafe` coming from an external macro hides
the warning, too.)
The reason behind this problem is how the check currently works:
* While generating MIR, it already skips nested unsafe blocks (i.e. unsafe
nested in other unsafe) so that the inner one is always the one considered
unused
* To differentiate the cases of no unsafe operations inside the `unsafe` vs.
a surrounding `unsafe` block, there’s some ad-hoc magic walking up the HIR to
look for surrounding used `unsafe` blocks.
There’s a lot of problems with this approach besides the one presented above.
E.g. the MIR-building uses checks for `unsafe_op_in_unsafe_fn` lint to decide
early whether or not `unsafe` blocks in an `unsafe fn` are redundant and ought
to be removed.
```rs
unsafe fn granular_disallow_op_in_unsafe_fn() {
unsafe {
#[deny(unsafe_op_in_unsafe_fn)]
{
unsf();
}
}
}
```
```
error: call to unsafe function is unsafe and requires unsafe block (error E0133)
--> src/main.rs:13:13
|
13 | unsf();
| ^^^^^^ call to unsafe function
|
note: the lint level is defined here
--> src/main.rs:11:16
|
11 | #[deny(unsafe_op_in_unsafe_fn)]
| ^^^^^^^^^^^^^^^^^^^^^^
= note: consult the function's documentation for information on how to avoid undefined behavior
warning: unnecessary `unsafe` block
--> src/main.rs:10:5
|
9 | unsafe fn granular_disallow_op_in_unsafe_fn() {
| --------------------------------------------- because it's nested under this `unsafe` fn
10 | unsafe {
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
```
Here, the intermediate `unsafe` was ignored, even though it contains a unsafe
operation that is not allowed to happen in an `unsafe fn` without an additional `unsafe` block.
Also closures were problematic and the workaround/algorithms used on current
nightly didn’t work properly. (I skipped trying to fully understand what it was
supposed to do, because this PR uses a completely different approach.)
```rs
fn nested() {
unsafe {
unsafe { unsf() }
}
}
```
```
warning: unnecessary `unsafe` block
--> src/main.rs:10:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
```
vs
```rs
fn nested() {
let _ = || unsafe {
let _ = || unsafe { unsf() };
};
}
```
```
warning: unnecessary `unsafe` block
--> src/main.rs:9:16
|
9 | let _ = || unsafe {
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:10:20
|
10 | let _ = || unsafe { unsf() };
| ^^^^^^ unnecessary `unsafe` block
```
*note that this warning kind-of suggests that **both** unsafe blocks are redundant*
--------------------------------------------------------------------------------
I also dislike the fact that it always suggests keeping the outermost `unsafe`.
E.g. for
```rs
fn granularity() {
unsafe {
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
I prefer if `rustc` suggests removing the more-course outer-level `unsafe`
instead of the fine-grained inner `unsafe` blocks, which it currently does on nightly:
```
warning: unnecessary `unsafe` block
--> src/main.rs:10:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:11:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsafe { unsf() }
11 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
warning: unnecessary `unsafe` block
--> src/main.rs:12:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
12 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
```
--------------------------------------------------------------------------------
Needless to say, this PR addresses all these points. For context, as far as my
understanding goes, the main advantage of skipping inner unsafe blocks was that
a test case like
```rs
fn top_level_used() {
unsafe {
unsf();
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
should generate some warning because there’s redundant nested `unsafe`, however
every single `unsafe` block _does_ contain some statement that uses it. Of course
this PR doesn’t aim change the warnings on this kind of code example, because
the current behavior, warning on all the inner `unsafe` blocks, makes sense in this case.
As mentioned, during MIR building all the unsafe blocks *are* kept now, and usage
is attributed to them. The way to still generate a warning like
```
warning: unnecessary `unsafe` block
--> src/main.rs:11:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
10 | unsf();
11 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
|
= note: `#[warn(unused_unsafe)]` on by default
warning: unnecessary `unsafe` block
--> src/main.rs:12:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
12 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
warning: unnecessary `unsafe` block
--> src/main.rs:13:9
|
9 | unsafe {
| ------ because it's nested under this `unsafe` block
...
13 | unsafe { unsf() }
| ^^^^^^ unnecessary `unsafe` block
```
in this case is by emitting a `unused_unsafe` warning for all of the `unsafe`
blocks that are _within a **used** unsafe block_.
The previous code had a little HIR traversal already anyways to collect a set of
all the unsafe blocks (in order to afterwards determine which ones are unused
afterwards). This PR uses such a traversal to do additional things including logic
like _always_ warn for an `unsafe` block that’s inside of another **used**
unsafe block. The traversal is expanded to include nested closures in the same go,
this simplifies a lot of things.
The whole logic around `unsafe_op_in_unsafe_fn` is a little complicated, there’s
some test cases of corner-cases in this PR. (The implementation involves
differentiating between whether a used unsafe block was used exclusively by
operations where `allow(unsafe_op_in_unsafe_fn)` was active.) The main goal was
to make sure that code should compile successfully if all the `unused_unsafe`-warnings
are addressed _simultaneously_ (by removing the respective `unsafe` blocks)
no matter how complicated the patterns of `unsafe_op_in_unsafe_fn` being
disallowed and allowed throughout the function are.
--------------------------------------------------------------------------------
One noteworthy design decision I took here: An `unsafe` block
with `allow(unused_unsafe)` **is considered used** for the purposes of
linting about redundant contained unsafe blocks. So while
```rs
fn granularity() {
unsafe { //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() }
unsafe { unsf() }
unsafe { unsf() }
}
}
```
warns for the outer `unsafe` block,
```rs
fn top_level_ignored() {
#[allow(unused_unsafe)]
unsafe {
#[deny(unused_unsafe)]
{
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
unsafe { unsf() } //~ ERROR: unnecessary `unsafe` block
}
}
}
```
warns on the inner ones.
2022-02-03 21:16:06 +00:00
|
|
|
);
|
2021-01-24 17:15:05 +00:00
|
|
|
}
|
2022-07-19 13:03:39 +00:00
|
|
|
|
|
|
|
// Some intrinsics require that an elementtype attribute (with the pointee type of a
|
|
|
|
// pointer argument) is added to the callsite.
|
|
|
|
let element_type_index = unsafe { llvm::LLVMRustGetElementTypeArgIndex(callsite) };
|
|
|
|
if element_type_index >= 0 {
|
|
|
|
let arg_ty = self.args[element_type_index as usize].layout.ty;
|
|
|
|
let pointee_ty = arg_ty.builtin_deref(true).expect("Must be pointer argument").ty;
|
|
|
|
let element_type_attr = unsafe {
|
|
|
|
llvm::LLVMRustCreateElementTypeAttr(bx.llcx, bx.layout_of(pointee_ty).llvm_type(bx))
|
|
|
|
};
|
|
|
|
attributes::apply_to_callsite(
|
|
|
|
callsite,
|
|
|
|
llvm::AttributePlace::Argument(element_type_index as u32),
|
|
|
|
&[element_type_attr],
|
|
|
|
);
|
|
|
|
}
|
2013-01-25 22:56:56 +00:00
|
|
|
}
|
|
|
|
}
|
2018-09-20 13:47:22 +00:00
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
|
2021-11-24 03:30:20 +00:00
|
|
|
fn get_param(&mut self, index: usize) -> Self::Value {
|
2018-12-04 19:20:45 +00:00
|
|
|
llvm::get_param(self.llfn(), index as c_uint)
|
|
|
|
}
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|
2022-11-05 09:06:38 +00:00
|
|
|
|
|
|
|
impl From<Conv> for llvm::CallConv {
|
|
|
|
fn from(conv: Conv) -> Self {
|
|
|
|
match conv {
|
feat: `riscv-interrupt-{m,s}` calling conventions
Similar to prior support added for the mips430, avr, and x86 targets
this change implements the rough equivalent of clang's
[`__attribute__((interrupt))`][clang-attr] for riscv targets, enabling
e.g.
```rust
static mut CNT: usize = 0;
pub extern "riscv-interrupt-m" fn isr_m() {
unsafe {
CNT += 1;
}
}
```
to produce highly effective assembly like:
```asm
pub extern "riscv-interrupt-m" fn isr_m() {
420003a0: 1141 addi sp,sp,-16
unsafe {
CNT += 1;
420003a2: c62a sw a0,12(sp)
420003a4: c42e sw a1,8(sp)
420003a6: 3fc80537 lui a0,0x3fc80
420003aa: 63c52583 lw a1,1596(a0) # 3fc8063c <_ZN12esp_riscv_rt3CNT17hcec3e3a214887d53E.0>
420003ae: 0585 addi a1,a1,1
420003b0: 62b52e23 sw a1,1596(a0)
}
}
420003b4: 4532 lw a0,12(sp)
420003b6: 45a2 lw a1,8(sp)
420003b8: 0141 addi sp,sp,16
420003ba: 30200073 mret
```
(disassembly via `riscv64-unknown-elf-objdump -C -S --disassemble ./esp32c3-hal/target/riscv32imc-unknown-none-elf/release/examples/gpio_interrupt`)
This outcome is superior to hand-coded interrupt routines which, lacking
visibility into any non-assembly body of the interrupt handler, have to
be very conservative and save the [entire CPU state to the stack
frame][full-frame-save]. By instead asking LLVM to only save the
registers that it uses, we defer the decision to the tool with the best
context: it can more accurately account for the cost of spills if it
knows that every additional register used is already at the cost of an
implicit spill.
At the LLVM level, this is apparently [implemented by] marking every
register as "[callee-save]," matching the semantics of an interrupt
handler nicely (it has to leave the CPU state just as it found it after
its `{m|s}ret`).
This approach is not suitable for every interrupt handler, as it makes
no attempt to e.g. save the state in a user-accessible stack frame. For
a full discussion of those challenges and tradeoffs, please refer to
[the interrupt calling conventions RFC][rfc].
Inside rustc, this implementation differs from prior art because LLVM
does not expose the "all-saved" function flavor as a calling convention
directly, instead preferring to use an attribute that allows for
differentiating between "machine-mode" and "superivsor-mode" interrupts.
Finally, some effort has been made to guide those who may not yet be
aware of the differences between machine-mode and supervisor-mode
interrupts as to why no `riscv-interrupt` calling convention is exposed
through rustc, and similarly for why `riscv-interrupt-u` makes no
appearance (as it would complicate future LLVM upgrades).
[clang-attr]: https://clang.llvm.org/docs/AttributeReference.html#interrupt-risc-v
[full-frame-save]: https://github.com/esp-rs/esp-riscv-rt/blob/9281af2ecffe13e40992917316f36920c26acaf3/src/lib.rs#L440-L469
[implemented by]: https://github.com/llvm/llvm-project/blob/b7fb2a3fec7c187d58a6d338ab512d9173bca987/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp#L61-L67
[callee-save]: https://github.com/llvm/llvm-project/blob/973f1fe7a8591c7af148e573491ab68cc15b6ecf/llvm/lib/Target/RISCV/RISCVCallingConv.td#L30-L37
[rfc]: https://github.com/rust-lang/rfcs/pull/3246
2023-05-23 22:08:23 +00:00
|
|
|
Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => {
|
|
|
|
llvm::CCallConv
|
|
|
|
}
|
2023-08-27 00:42:59 +00:00
|
|
|
Conv::Cold => llvm::ColdCallConv,
|
|
|
|
Conv::PreserveMost => llvm::PreserveMost,
|
|
|
|
Conv::PreserveAll => llvm::PreserveAll,
|
2022-11-05 09:06:38 +00:00
|
|
|
Conv::AvrInterrupt => llvm::AvrInterrupt,
|
|
|
|
Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
|
|
|
|
Conv::ArmAapcs => llvm::ArmAapcsCallConv,
|
|
|
|
Conv::Msp430Intr => llvm::Msp430Intr,
|
|
|
|
Conv::PtxKernel => llvm::PtxKernel,
|
|
|
|
Conv::X86Fastcall => llvm::X86FastcallCallConv,
|
|
|
|
Conv::X86Intr => llvm::X86_Intr,
|
|
|
|
Conv::X86Stdcall => llvm::X86StdcallCallConv,
|
|
|
|
Conv::X86ThisCall => llvm::X86_ThisCall,
|
|
|
|
Conv::X86VectorCall => llvm::X86_VectorCall,
|
|
|
|
Conv::X86_64SysV => llvm::X86_64_SysV,
|
|
|
|
Conv::X86_64Win64 => llvm::X86_64_Win64,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|