2021-03-19 21:49:51 +00:00
|
|
|
//@ compile-flags: -O -C no-prepopulate-passes
|
2015-09-15 21:22:16 +00:00
|
|
|
#![crate_type = "lib"]
|
2023-02-10 23:54:05 +00:00
|
|
|
#![feature(dyn_star)]
|
2024-03-05 10:32:03 +00:00
|
|
|
#![feature(allocator_api)]
|
2015-05-24 16:07:52 +00:00
|
|
|
|
2024-05-29 04:11:20 +00:00
|
|
|
use std::marker::PhantomPinned;
|
2022-02-07 02:09:21 +00:00
|
|
|
use std::mem::MaybeUninit;
|
2024-02-22 13:59:52 +00:00
|
|
|
use std::num::NonZero;
|
2022-12-30 20:11:30 +00:00
|
|
|
use std::ptr::NonNull;
|
2022-02-07 02:09:21 +00:00
|
|
|
|
2015-05-24 16:07:52 +00:00
|
|
|
pub struct S {
|
2024-05-29 04:11:20 +00:00
|
|
|
_field: [i32; 8],
|
2015-05-24 16:07:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub struct UnsafeInner {
|
2024-05-29 04:11:20 +00:00
|
|
|
_field: std::cell::UnsafeCell<i16>,
|
2015-05-24 16:07:52 +00:00
|
|
|
}
|
|
|
|
|
2022-06-21 03:51:15 +00:00
|
|
|
pub struct NotUnpin {
|
2024-05-29 04:11:20 +00:00
|
|
|
_field: i32,
|
|
|
|
_marker: PhantomPinned,
|
2022-06-21 03:51:15 +00:00
|
|
|
}
|
|
|
|
|
2022-02-12 06:38:24 +00:00
|
|
|
pub enum MyBool {
|
2024-05-29 04:11:20 +00:00
|
|
|
True,
|
|
|
|
False,
|
2022-02-12 06:38:24 +00:00
|
|
|
}
|
|
|
|
|
2022-02-05 06:00:37 +00:00
|
|
|
// CHECK: noundef zeroext i1 @boolean(i1 noundef zeroext %x)
|
2015-05-24 16:07:52 +00:00
|
|
|
#[no_mangle]
|
|
|
|
pub fn boolean(x: bool) -> bool {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2015-05-24 16:07:52 +00:00
|
|
|
}
|
|
|
|
|
2022-02-07 02:09:21 +00:00
|
|
|
// CHECK: i8 @maybeuninit_boolean(i8 %x)
|
|
|
|
#[no_mangle]
|
|
|
|
pub fn maybeuninit_boolean(x: MaybeUninit<bool>) -> MaybeUninit<bool> {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2022-02-07 02:09:21 +00:00
|
|
|
}
|
|
|
|
|
2022-02-12 06:38:24 +00:00
|
|
|
// CHECK: noundef zeroext i1 @enum_bool(i1 noundef zeroext %x)
|
|
|
|
#[no_mangle]
|
|
|
|
pub fn enum_bool(x: MyBool) -> MyBool {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2022-02-12 06:38:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK: i8 @maybeuninit_enum_bool(i8 %x)
|
|
|
|
#[no_mangle]
|
|
|
|
pub fn maybeuninit_enum_bool(x: MaybeUninit<MyBool>) -> MaybeUninit<MyBool> {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2022-02-12 06:38:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK: noundef i32 @char(i32 noundef %x)
|
|
|
|
#[no_mangle]
|
|
|
|
pub fn char(x: char) -> char {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2022-02-12 06:38:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK: i32 @maybeuninit_char(i32 %x)
|
|
|
|
#[no_mangle]
|
|
|
|
pub fn maybeuninit_char(x: MaybeUninit<char>) -> MaybeUninit<char> {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2022-02-12 06:38:24 +00:00
|
|
|
}
|
|
|
|
|
2022-12-30 14:55:05 +00:00
|
|
|
// CHECK: noundef i64 @int(i64 noundef %x)
|
2022-02-12 06:38:24 +00:00
|
|
|
#[no_mangle]
|
|
|
|
pub fn int(x: u64) -> u64 {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2022-02-12 06:38:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK: noundef i64 @nonzero_int(i64 noundef %x)
|
|
|
|
#[no_mangle]
|
2024-02-22 13:59:52 +00:00
|
|
|
pub fn nonzero_int(x: NonZero<u64>) -> NonZero<u64> {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2022-02-12 06:38:24 +00:00
|
|
|
}
|
|
|
|
|
2022-12-30 14:55:05 +00:00
|
|
|
// CHECK: noundef i64 @option_nonzero_int(i64 noundef %x)
|
2022-02-12 06:38:24 +00:00
|
|
|
#[no_mangle]
|
2024-02-22 13:59:52 +00:00
|
|
|
pub fn option_nonzero_int(x: Option<NonZero<u64>>) -> Option<NonZero<u64>> {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2022-02-12 06:38:24 +00:00
|
|
|
}
|
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @readonly_borrow(ptr noalias noundef readonly align 4 dereferenceable(4) %_1)
|
2015-05-24 16:07:52 +00:00
|
|
|
// FIXME #25759 This should also have `nocapture`
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn readonly_borrow(_: &i32) {}
|
2015-05-24 16:07:52 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: noundef align 4 dereferenceable(4) ptr @readonly_borrow_ret()
|
2022-12-27 11:46:08 +00:00
|
|
|
#[no_mangle]
|
|
|
|
pub fn readonly_borrow_ret() -> &'static i32 {
|
2024-05-29 04:11:20 +00:00
|
|
|
loop {}
|
2022-12-27 11:46:08 +00:00
|
|
|
}
|
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @static_borrow(ptr noalias noundef readonly align 4 dereferenceable(4) %_1)
|
2015-05-24 16:07:52 +00:00
|
|
|
// static borrow may be captured
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn static_borrow(_: &'static i32) {}
|
2015-05-24 16:07:52 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @named_borrow(ptr noalias noundef readonly align 4 dereferenceable(4) %_1)
|
2015-05-24 16:07:52 +00:00
|
|
|
// borrow with named lifetime may be captured
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn named_borrow<'r>(_: &'r i32) {}
|
2015-05-24 16:07:52 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @unsafe_borrow(ptr noundef nonnull align 2 %_1)
|
2015-05-24 16:07:52 +00:00
|
|
|
// unsafe interior means this isn't actually readonly and there may be aliases ...
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn unsafe_borrow(_: &UnsafeInner) {}
|
2015-05-24 16:07:52 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @mutable_unsafe_borrow(ptr noalias noundef align 2 dereferenceable(2) %_1)
|
2015-05-24 16:07:52 +00:00
|
|
|
// ... unless this is a mutable borrow, those never alias
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) {}
|
2015-05-24 16:07:52 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @mutable_borrow(ptr noalias noundef align 4 dereferenceable(4) %_1)
|
2015-05-24 16:07:52 +00:00
|
|
|
// FIXME #25759 This should also have `nocapture`
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn mutable_borrow(_: &mut i32) {}
|
2015-05-24 16:07:52 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: noundef align 4 dereferenceable(4) ptr @mutable_borrow_ret()
|
2022-12-27 11:46:08 +00:00
|
|
|
#[no_mangle]
|
|
|
|
pub fn mutable_borrow_ret() -> &'static mut i32 {
|
2024-05-29 04:11:20 +00:00
|
|
|
loop {}
|
2022-12-27 11:46:08 +00:00
|
|
|
}
|
|
|
|
|
2022-06-21 03:51:15 +00:00
|
|
|
#[no_mangle]
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @mutable_notunpin_borrow(ptr noundef nonnull align 4 %_1)
|
2022-06-21 03:51:15 +00:00
|
|
|
// This one is *not* `noalias` because it might be self-referential.
|
2022-12-27 11:46:08 +00:00
|
|
|
// It is also not `dereferenceable` due to
|
|
|
|
// <https://github.com/rust-lang/unsafe-code-guidelines/issues/381>.
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn mutable_notunpin_borrow(_: &mut NotUnpin) {}
|
2022-06-21 03:51:15 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @notunpin_borrow(ptr noalias noundef readonly align 4 dereferenceable(4) %_1)
|
2022-06-21 03:51:15 +00:00
|
|
|
// But `&NotUnpin` behaves perfectly normal.
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn notunpin_borrow(_: &NotUnpin) {}
|
2022-06-21 03:51:15 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @indirect_struct(ptr noalias nocapture noundef readonly align 4 dereferenceable(32) %_1)
|
2015-05-24 16:07:52 +00:00
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn indirect_struct(_: S) {}
|
2015-05-24 16:07:52 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @borrowed_struct(ptr noalias noundef readonly align 4 dereferenceable(32) %_1)
|
2015-05-24 16:07:52 +00:00
|
|
|
// FIXME #25759 This should also have `nocapture`
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn borrowed_struct(_: &S) {}
|
2015-05-24 16:07:52 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @option_borrow(ptr noalias noundef readonly align 4 dereferenceable_or_null(4) %x)
|
2022-12-30 20:11:30 +00:00
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn option_borrow(x: Option<&i32>) {}
|
2022-12-30 20:11:30 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @option_borrow_mut(ptr noalias noundef align 4 dereferenceable_or_null(4) %x)
|
2022-12-30 20:11:30 +00:00
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn option_borrow_mut(x: Option<&mut i32>) {}
|
2022-12-30 20:11:30 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @raw_struct(ptr noundef %_1)
|
2022-02-05 06:00:37 +00:00
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn raw_struct(_: *const S) {}
|
2022-02-05 06:00:37 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @raw_option_nonnull_struct(ptr noundef %_1)
|
2022-12-30 20:11:30 +00:00
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn raw_option_nonnull_struct(_: Option<NonNull<S>>) {}
|
2022-12-30 20:11:30 +00:00
|
|
|
|
2019-11-22 21:04:22 +00:00
|
|
|
// `Box` can get deallocated during execution of the function, so it should
|
|
|
|
// not get `dereferenceable`.
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: noundef nonnull align 4 ptr @_box(ptr noalias noundef nonnull align 4 %x)
|
2015-05-24 16:07:52 +00:00
|
|
|
#[no_mangle]
|
|
|
|
pub fn _box(x: Box<i32>) -> Box<i32> {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2015-05-24 16:07:52 +00:00
|
|
|
}
|
|
|
|
|
2024-03-05 10:32:03 +00:00
|
|
|
// With a custom allocator, it should *not* have `noalias`. (See
|
|
|
|
// <https://github.com/rust-lang/miri/issues/3341> for why.) The second argument is the allocator,
|
|
|
|
// which is a reference here that still carries `noalias` as usual.
|
|
|
|
// CHECK: @_box_custom(ptr noundef nonnull align 4 %x.0, ptr noalias noundef nonnull readonly align 1 %x.1)
|
|
|
|
#[no_mangle]
|
|
|
|
pub fn _box_custom(x: Box<i32, &std::alloc::Global>) {
|
2024-05-29 04:11:20 +00:00
|
|
|
drop(x)
|
2024-03-05 10:32:03 +00:00
|
|
|
}
|
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: noundef nonnull align 4 ptr @notunpin_box(ptr noundef nonnull align 4 %x)
|
2023-01-02 13:09:01 +00:00
|
|
|
#[no_mangle]
|
|
|
|
pub fn notunpin_box(x: Box<NotUnpin>) -> Box<NotUnpin> {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2023-01-02 13:09:01 +00:00
|
|
|
}
|
|
|
|
|
2024-02-19 14:02:49 +00:00
|
|
|
// CHECK: @struct_return(ptr{{( dead_on_unwind)?}} noalias nocapture noundef{{( writable)?}} sret([32 x i8]) align 4 dereferenceable(32){{( %_0)?}})
|
2015-05-24 16:07:52 +00:00
|
|
|
#[no_mangle]
|
|
|
|
pub fn struct_return() -> S {
|
2024-05-29 04:11:20 +00:00
|
|
|
S { _field: [0, 0, 0, 0, 0, 0, 0, 0] }
|
2015-05-24 16:07:52 +00:00
|
|
|
}
|
|
|
|
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
// Hack to get the correct size for the length part in slices
|
2022-12-30 14:55:05 +00:00
|
|
|
// CHECK: @helper([[USIZE:i[0-9]+]] noundef %_1)
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn helper(_: usize) {}
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @slice(ptr noalias noundef nonnull readonly align 1 %_1.0, [[USIZE]] noundef %_1.1)
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
// FIXME #25759 This should also have `nocapture`
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn slice(_: &[u8]) {}
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @mutable_slice(ptr noalias noundef nonnull align 1 %_1.0, [[USIZE]] noundef %_1.1)
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
// FIXME #25759 This should also have `nocapture`
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn mutable_slice(_: &mut [u8]) {}
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @unsafe_slice(ptr noundef nonnull align 2 %_1.0, [[USIZE]] noundef %_1.1)
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
// unsafe interior means this isn't actually readonly and there may be aliases ...
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn unsafe_slice(_: &[UnsafeInner]) {}
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @raw_slice(ptr noundef %_1.0, [[USIZE]] noundef %_1.1)
|
2022-02-05 06:00:37 +00:00
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn raw_slice(_: *const [u8]) {}
|
2022-02-05 06:00:37 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @str(ptr noalias noundef nonnull readonly align 1 %_1.0, [[USIZE]] noundef %_1.1)
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
// FIXME #25759 This should also have `nocapture`
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn str(_: &[u8]) {}
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @trait_borrow(ptr noundef nonnull align 1 %_1.0, {{.+}} noalias noundef readonly align {{.*}} dereferenceable({{.*}}) %_1.1)
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
// FIXME #25759 This should also have `nocapture`
|
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn trait_borrow(_: &dyn Drop) {}
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @option_trait_borrow(ptr noundef align 1 %x.0, ptr %x.1)
|
2022-12-30 20:11:30 +00:00
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn option_trait_borrow(x: Option<&dyn Drop>) {}
|
2022-12-30 20:11:30 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @option_trait_borrow_mut(ptr noundef align 1 %x.0, ptr %x.1)
|
2022-12-30 20:11:30 +00:00
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn option_trait_borrow_mut(x: Option<&mut dyn Drop>) {}
|
2022-12-30 20:11:30 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @trait_raw(ptr noundef %_1.0, {{.+}} noalias noundef readonly align {{.*}} dereferenceable({{.*}}) %_1.1)
|
2022-02-05 06:00:37 +00:00
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn trait_raw(_: *const dyn Drop) {}
|
2022-02-05 06:00:37 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: @trait_box(ptr noalias noundef nonnull align 1{{( %0)?}}, {{.+}} noalias noundef readonly align {{.*}} dereferenceable({{.*}}){{( %1)?}})
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
#[no_mangle]
|
2024-05-29 04:11:20 +00:00
|
|
|
pub fn trait_box(_: Box<dyn Drop + Unpin>) {}
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: { ptr, ptr } @trait_option(ptr noalias noundef align 1 %x.0, ptr %x.1)
|
2018-03-26 14:26:03 +00:00
|
|
|
#[no_mangle]
|
2023-01-02 13:09:01 +00:00
|
|
|
pub fn trait_option(x: Option<Box<dyn Drop + Unpin>>) -> Option<Box<dyn Drop + Unpin>> {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2018-03-26 14:26:03 +00:00
|
|
|
}
|
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: { ptr, [[USIZE]] } @return_slice(ptr noalias noundef nonnull readonly align 2 %x.0, [[USIZE]] noundef %x.1)
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
#[no_mangle]
|
2017-10-30 17:18:00 +00:00
|
|
|
pub fn return_slice(x: &[u16]) -> &[u16] {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 21:57:40 +00:00
|
|
|
}
|
|
|
|
|
2022-02-12 06:38:24 +00:00
|
|
|
// CHECK: { i16, i16 } @enum_id_1(i16 noundef %x.0, i16 %x.1)
|
2018-03-27 14:44:03 +00:00
|
|
|
#[no_mangle]
|
|
|
|
pub fn enum_id_1(x: Option<Result<u16, u16>>) -> Option<Result<u16, u16>> {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2018-03-27 14:44:03 +00:00
|
|
|
}
|
|
|
|
|
Separate immediate and in-memory ScalarPair representation
Currently, we assume that ScalarPair is always represented using
a two-element struct, both as an immediate value and when stored
in memory.
This currently works fairly well, but runs into problems with
https://github.com/rust-lang/rust/pull/116672, where a ScalarPair
involving an i128 type can no longer be represented as a two-element
struct in memory. For example, the tuple `(i32, i128)` needs to be
represented in-memory as `{ i32, [3 x i32], i128 }` to satisfy
alignment requirement. Using `{ i32, i128 }` instead will result in
the second element being stored at the wrong offset (prior to
LLVM 18).
Resolve this issue by no longer requiring that the immediate and
in-memory type for ScalarPair are the same. The in-memory type
will now look the same as for normal struct types (and will include
padding filler and similar), while the immediate type stays a
simple two-element struct type. This also means that booleans in
immediate ScalarPair are now represented as i1 rather than i8,
just like we do everywhere else.
The core change here is to llvm_type (which now treats ScalarPair
as a normal struct) and immediate_llvm_type (which returns the
two-element struct that llvm_type used to produce). The rest is
fixing things up to no longer assume these are the same. In
particular, this switches places that try to get pointers to the
ScalarPair elements to use byte-geps instead of struct-geps.
2023-12-15 11:14:39 +00:00
|
|
|
// CHECK: { i1, i8 } @enum_id_2(i1 noundef zeroext %x.0, i8 %x.1)
|
2018-03-27 14:44:03 +00:00
|
|
|
#[no_mangle]
|
|
|
|
pub fn enum_id_2(x: Option<u8>) -> Option<u8> {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2018-03-27 14:44:03 +00:00
|
|
|
}
|
2023-02-10 23:54:05 +00:00
|
|
|
|
2023-07-27 21:44:13 +00:00
|
|
|
// CHECK: { ptr, {{.+}} } @dyn_star(ptr noundef %x.0, {{.+}} noalias noundef readonly align {{.*}} dereferenceable({{.*}}) %x.1)
|
2023-02-15 03:42:45 +00:00
|
|
|
// Expect an ABI something like `{ {}*, [3 x i64]* }`, but that's hard to match on generically,
|
|
|
|
// so do like the `trait_box` test and just match on `{{.+}}` for the vtable.
|
2023-02-10 23:54:05 +00:00
|
|
|
#[no_mangle]
|
|
|
|
pub fn dyn_star(x: dyn* Drop) -> dyn* Drop {
|
2024-05-29 04:11:20 +00:00
|
|
|
x
|
2023-02-10 23:54:05 +00:00
|
|
|
}
|