Rollup merge of #127168 - DianQK:cast-size, r=workingjubilee

Use the aligned size for alloca at args/ret when the pass mode is cast

Fixes #75839. Fixes #121028.

The `load` and `store` instructions in LLVM access the aligned size. For example, `load { i64, i32 }` accesses 16 bytes on x86_64: https://alive2.llvm.org/ce/z/n8CHAp.

BTW, this example is expected to be optimized to immediate UB by Alive2: https://rust.godbolt.org/z/b7xK7hv1c and https://alive2.llvm.org/ce/z/vZDtZH.

r? compiler
This commit is contained in:
Matthias Krüger 2024-07-02 17:47:48 +02:00 committed by GitHub
commit 33b0238586
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 381 additions and 39 deletions

View File

@ -226,7 +226,8 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
// when passed by value, making it smaller.
// - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes
// when passed by value, making it larger.
let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes());
let copy_bytes =
cmp::min(cast.unaligned_size(bx).bytes(), self.layout.size.bytes());
// Allocate some scratch space...
let llscratch = bx.alloca(scratch_size, scratch_align);
bx.lifetime_start(llscratch, scratch_size);

View File

@ -1521,7 +1521,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// when passed by value, making it smaller.
// - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes
// when passed by value, making it larger.
let copy_bytes = cmp::min(scratch_size.bytes(), arg.layout.size.bytes());
let copy_bytes = cmp::min(cast.unaligned_size(bx).bytes(), arg.layout.size.bytes());
// Allocate some scratch space...
let llscratch = bx.alloca(scratch_size, scratch_align);
bx.lifetime_start(llscratch, scratch_size);

View File

@ -230,10 +230,20 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let layout = start_bx.layout_of(fx.monomorphize(decl.ty));
assert!(!layout.ty.has_erasable_regions());
if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local);
let llretptr = start_bx.get_param(0);
return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
if local == mir::RETURN_PLACE {
match fx.fn_abi.ret.mode {
PassMode::Indirect { .. } => {
debug!("alloc: {:?} (return place) -> place", local);
let llretptr = start_bx.get_param(0);
return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
}
PassMode::Cast { ref cast, .. } => {
debug!("alloc: {:?} (return place) -> place", local);
let size = cast.size(&start_bx);
return LocalRef::Place(PlaceRef::alloca_size(&mut start_bx, size, layout));
}
_ => {}
};
}
if memory_locals.contains(local) {

View File

@ -108,9 +108,17 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
layout: TyAndLayout<'tcx>,
) -> Self {
Self::alloca_size(bx, layout.size, layout)
}
pub fn alloca_size<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
size: Size,
layout: TyAndLayout<'tcx>,
) -> Self {
assert!(layout.is_sized(), "tried to statically allocate unsized place");
PlaceValue::alloca(bx, layout.size, layout.align.abi).with_type(layout)
PlaceValue::alloca(bx, size, layout.align.abi).with_type(layout)
}
/// Returns a place for an indirect reference to an unsized place.

View File

@ -339,7 +339,9 @@ impl CastTarget {
}
}
pub fn size<C: HasDataLayout>(&self, _cx: &C) -> Size {
/// When you only access the range containing valid data, you can use this unaligned size;
/// otherwise, use the safer `size` method.
pub fn unaligned_size<C: HasDataLayout>(&self, _cx: &C) -> Size {
// Prefix arguments are passed in specific designated registers
let prefix_size = self
.prefix
@ -353,6 +355,10 @@ impl CastTarget {
prefix_size + rest_size
}
pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
self.unaligned_size(cx).align_to(self.align(cx))
}
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
self.prefix
.iter()

View File

@ -1,6 +1,7 @@
// ignore-tidy-linelength
//@ revisions:aarch64 loongarch64 powerpc64 sparc64
//@ compile-flags: -O -C no-prepopulate-passes
//@ revisions:aarch64 loongarch64 powerpc64 sparc64 x86_64
// FIXME: Add `-Cllvm-args=--lint-abort-on-error` after LLVM 19
//@ compile-flags: -O -C no-prepopulate-passes -C passes=lint
//@[aarch64] compile-flags: --target aarch64-unknown-linux-gnu
//@[aarch64] needs-llvm-components: arm
@ -10,6 +11,8 @@
//@[powerpc64] needs-llvm-components: powerpc
//@[sparc64] compile-flags: --target sparc64-unknown-linux-gnu
//@[sparc64] needs-llvm-components: sparc
//@[x86_64] compile-flags: --target x86_64-unknown-linux-gnu
//@[x86_64] needs-llvm-components: x86
// Tests that arguments with `PassMode::Cast` are handled correctly.
@ -60,30 +63,261 @@ pub struct DoubleFloat {
g: f32,
}
extern "C" {
fn receives_twou16s(x: TwoU16s);
fn returns_twou16s() -> TwoU16s;
fn receives_fiveu16s(x: FiveU16s);
fn returns_fiveu16s() -> FiveU16s;
fn receives_doubledouble(x: DoubleDouble);
fn returns_doubledouble() -> DoubleDouble;
// These functions cause an ICE in sparc64 ABI code (https://github.com/rust-lang/rust/issues/122620)
#[cfg(not(target_arch = "sparc64"))]
fn receives_doublefloat(x: DoubleFloat);
#[cfg(not(target_arch = "sparc64"))]
fn returns_doublefloat() -> DoubleFloat;
// On x86_64, this struct will be passed as `{ i64, i32 }`.
// The load and store instructions will access 16 bytes, so we should allocate 16 bytes.
#[repr(C)]
pub struct Three32s {
a: u32,
b: u32,
c: u32,
}
// CHECK-LABEL: @call_twou16s
// CHECK-LABEL: @receives_twou16s
// aarch64-SAME: ([[ABI_TYPE:i64]] {{.*}}[[ABI_VALUE:%.+]])
// loongarch64-SAME: ([[ABI_TYPE:i64]] {{.*}}[[ABI_VALUE:%.+]])
// powerpc64-SAME: ([[ABI_TYPE:i32]] {{.*}}[[ABI_VALUE:%.+]])
// sparc64-SAME: ([[ABI_TYPE:i64]] {{.*}}[[ABI_VALUE:%.+]])
// x86_64-SAME: ([[ABI_TYPE:i32]] {{.*}}[[ABI_VALUE:%.+]])
#[no_mangle]
pub unsafe fn call_twou16s() {
#[inline(never)]
pub extern "C" fn receives_twou16s(x: TwoU16s) {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
// CHECK: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false)
}
// CHECK-LABEL: @returns_twou16s
// powerpc64-SAME: sret([4 x i8]) align [[RUST_ALIGN:2]] {{.*}}[[RET_PTR:%.*]])
#[no_mangle]
#[inline(never)]
pub extern "C" fn returns_twou16s() -> TwoU16s {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:2]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:2]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:2]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:2]]
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i32]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// loongarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// sparc64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// x86_64: ret [[ABI_TYPE]] [[ABI_VALUE]]
TwoU16s { a: 0, b: 1 }
}
// CHECK-LABEL: @receives_fiveu16s
// aarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// loongarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// sparc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// x86_64-SAME: ([[ABI_TYPE:{ i64, i16 }]] {{.*}}[[ABI_VALUE:%.+]])
#[no_mangle]
#[inline(never)]
pub extern "C" fn receives_fiveu16s(x: FiveU16s) {
// CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca [10 x i8], align [[RUST_ALIGN:2]]
// CHECK: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false)
}
// CHECK-LABEL: @returns_fiveu16s
// powerpc64-SAME: sret([10 x i8]) align [[RUST_ALIGN:2]] {{.*}}[[RET_PTR:%.*]])
#[no_mangle]
#[inline(never)]
pub extern "C" fn returns_fiveu16s() -> FiveU16s {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:2]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:2]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:2]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:2]]
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i16 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// loongarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// sparc64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// x86_64: ret [[ABI_TYPE]] [[ABI_VALUE]]
FiveU16s { a: 0, b: 1, c: 2, d: 3, e: 4 }
}
// CHECK-LABEL: @receives_doubledouble
// aarch64-SAME: ([[ABI_TYPE:\[2 x double\]]] {{.*}}[[ABI_VALUE:%.+]])
// loongarch64-SAME: ([[ABI_TYPE:{ double, double }]] {{.*}}[[ABI_VALUE:%.+]])
// powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// sparc64-SAME: ([[ABI_TYPE:{ double, double }]] {{.*}}[[ABI_VALUE:%.+]])
// x86_64-SAME: ([[ABI_TYPE:{ double, double }]] {{.*}}[[ABI_VALUE:%.+]])
#[no_mangle]
#[inline(never)]
pub extern "C" fn receives_doubledouble(x: DoubleDouble) {
// CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// CHECK: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
}
// CHECK-LABEL: @returns_doubledouble
// powerpc64-SAME: sret([16 x i8]) align [[RUST_ALIGN:8]] {{.*}}[[RET_PTR:%.*]])
#[no_mangle]
#[inline(never)]
pub extern "C" fn returns_doubledouble() -> DoubleDouble {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x double\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// loongarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// sparc64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// x86_64: ret [[ABI_TYPE]] [[ABI_VALUE]]
DoubleDouble { f: 0., g: 1. }
}
// CHECK-LABEL: @receives_three32s
// aarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// loongarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// sparc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// x86_64-SAME: ([[ABI_TYPE:{ i64, i32 }]] {{.*}}[[ABI_VALUE:%.+]])
#[no_mangle]
#[inline(never)]
pub extern "C" fn receives_three32s(x: Three32s) {
// CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca [12 x i8], align [[RUST_ALIGN:4]]
// CHECK: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false)
}
// CHECK-LABEL: @returns_three32s
// powerpc64-SAME: sret([12 x i8]) align [[RUST_ALIGN:4]] {{.*}}[[RET_PTR:%.*]])
#[no_mangle]
#[inline(never)]
pub extern "C" fn returns_three32s() -> Three32s {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:4]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:4]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:4]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:4]]
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i32 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// loongarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// sparc64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// x86_64: ret [[ABI_TYPE]] [[ABI_VALUE]]
Three32s { a: 0, b: 0, c: 0 }
}
// These functions cause an ICE in sparc64 ABI code (https://github.com/rust-lang/rust/issues/122620)
#[cfg(not(target_arch = "sparc64"))]
// aarch64-LABEL: @receives_doublefloat
// loongarch64-LABEL: @receives_doublefloat
// powerpc64-LABEL: @receives_doublefloat
// x86_64-LABEL: @receives_doublefloat
// aarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// loongarch64-SAME: ([[ABI_TYPE:{ double, float }]] {{.*}}[[ABI_VALUE:%.+]])
// powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// x86_64-SAME: ([[ABI_TYPE:{ double, double }]] {{.*}}[[ABI_VALUE:%.+]])
#[no_mangle]
#[inline(never)]
pub extern "C" fn receives_doublefloat(x: DoubleFloat) {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// powerpc64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// x86_64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false)
// powerpc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
// x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
}
#[cfg(not(target_arch = "sparc64"))]
// aarch64-LABEL: @returns_doublefloat
// loongarch64-LABEL: @returns_doublefloat
// powerpc64-LABEL: @returns_doublefloat
// x86_64-LABEL: @returns_doublefloat
// powerpc64-SAME: sret([16 x i8]) align [[RUST_ALIGN:8]] {{.*}}[[RET_PTR:%.*]])
#[no_mangle]
#[inline(never)]
pub extern "C" fn returns_doublefloat() -> DoubleFloat {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, float }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// loongarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
// x86_64: ret [[ABI_TYPE]] [[ABI_VALUE]]
DoubleFloat { f: 0., g: 0. }
}
// CHECK-LABEL: @call_twou16s
#[no_mangle]
pub fn call_twou16s() {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
@ -93,6 +327,7 @@ pub unsafe fn call_twou16s() {
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i32]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i32]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_twou16s([[ABI_TYPE]] [[ABI_VALUE]])
let x = TwoU16s { a: 1, b: 2 };
@ -101,7 +336,7 @@ pub unsafe fn call_twou16s() {
// CHECK-LABEL: @return_twou16s
#[no_mangle]
pub unsafe fn return_twou16s() -> TwoU16s {
pub fn return_twou16s() -> TwoU16s {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: [[RETVAL:%.+]] = alloca [4 x i8], align 2
@ -112,34 +347,45 @@ pub unsafe fn return_twou16s() -> TwoU16s {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
// sparc64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
// x86_64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s()
// x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i32]] @returns_twou16s()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false)
// sparc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false)
// x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false)
returns_twou16s()
}
// CHECK-LABEL: @call_fiveu16s
#[no_mangle]
pub unsafe fn call_fiveu16s() {
pub fn call_fiveu16s() {
// CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca [10 x i8], align 2
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 10, i1 false)
// CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i16 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_fiveu16s([[ABI_TYPE]] [[ABI_VALUE]])
let x = FiveU16s { a: 1, b: 2, c: 3, d: 4, e: 5 };
receives_fiveu16s(x);
@ -148,7 +394,7 @@ pub unsafe fn call_fiveu16s() {
// CHECK-LABEL: @return_fiveu16s
// CHECK-SAME: (ptr {{.+}} sret([10 x i8]) align [[RUST_ALIGN:2]] dereferenceable(10) [[RET_PTR:%.+]])
#[no_mangle]
pub unsafe fn return_fiveu16s() -> FiveU16s {
pub fn return_fiveu16s() -> FiveU16s {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: call void @returns_fiveu16s(ptr {{.+}} [[RET_PTR]])
@ -158,24 +404,28 @@ pub unsafe fn return_fiveu16s() -> FiveU16s {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
// x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i16 }]] @returns_fiveu16s()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RET_PTR]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RET_PTR]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false)
// sparc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RET_PTR]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false)
// x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RET_PTR]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false)
returns_fiveu16s()
}
// CHECK-LABEL: @call_doubledouble
#[no_mangle]
pub unsafe fn call_doubledouble() {
pub fn call_doubledouble() {
// CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
@ -186,6 +436,7 @@ pub unsafe fn call_doubledouble() {
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_doubledouble([[ABI_TYPE]] [[ABI_VALUE]])
let x = DoubleDouble { f: 1., g: 2. };
@ -194,7 +445,7 @@ pub unsafe fn call_doubledouble() {
// CHECK-LABEL: @return_doubledouble
#[no_mangle]
pub unsafe fn return_doubledouble() -> DoubleDouble {
pub fn return_doubledouble() -> DoubleDouble {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: [[RETVAL:%.+]] = alloca [16 x i8], align 8
@ -205,22 +456,27 @@ pub unsafe fn return_doubledouble() -> DoubleDouble {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// sparc64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// x86_64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x double\]]] @returns_doubledouble()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doubledouble()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doubledouble()
// x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doubledouble()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
// sparc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
// x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
returns_doubledouble()
}
@ -229,27 +485,33 @@ pub unsafe fn return_doubledouble() -> DoubleDouble {
// aarch64-LABEL: @call_doublefloat
// loongarch64-LABEL: @call_doublefloat
// powerpc64-LABEL: @call_doublefloat
// x86_64-LABEL: @call_doublefloat
#[no_mangle]
pub unsafe fn call_doublefloat() {
pub fn call_doublefloat() {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [12 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// powerpc64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// x86_64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 12, i1 false)
// powerpc64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
// x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, float }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]])
// loongarch64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]])
// powerpc64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]])
// x86_64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]])
let x = DoubleFloat { f: 1., g: 2. };
receives_doublefloat(x);
}
@ -259,8 +521,9 @@ pub unsafe fn call_doublefloat() {
// aarch64-LABEL: @return_doublefloat
// loongarch64-LABEL: @return_doublefloat
// powerpc64-LABEL: @return_doublefloat
// x86_64-LABEL: @return_doublefloat
#[no_mangle]
pub unsafe fn return_doublefloat() -> DoubleFloat {
pub fn return_doublefloat() -> DoubleFloat {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: [[RETVAL:%.+]] = alloca [16 x i8], align 8
@ -269,18 +532,72 @@ pub unsafe fn return_doublefloat() -> DoubleFloat {
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [12 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// x86_64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_doublefloat()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, float }]] @returns_doublefloat()
// x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doublefloat()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false)
// x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
returns_doublefloat()
}
// CHECK-LABEL: @call_three32s
#[no_mangle]
pub fn call_three32s() {
// CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca [12 x i8], align [[RUST_ALIGN:4]]
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 12, i1 false)
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i32 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_three32s([[ABI_TYPE]] [[ABI_VALUE]])
let x = Three32s { a: 1, b: 2, c: 3 };
receives_three32s(x);
}
// Regression test for #75839
// CHECK-LABEL: @return_three32s(
// CHECK-SAME: sret([12 x i8]) align [[RUST_ALIGN:4]] {{.*}}[[RUST_RETVAL:%.*]])
#[no_mangle]
pub fn return_three32s() -> Three32s {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: call void @returns_three32s(ptr {{.+}} [[RUST_RETVAL]])
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s()
// x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i32 }]] @returns_three32s()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_RETVAL]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_RETVAL]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false)
// sparc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_RETVAL]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false)
// x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_RETVAL]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false)
returns_three32s()
}

View File

@ -1,5 +1,5 @@
//@ revisions: linux apple
//@ compile-flags: -C opt-level=0 -C no-prepopulate-passes
//@ compile-flags: -C opt-level=0 -C no-prepopulate-passes -C passes=lint
//@[linux] compile-flags: --target x86_64-unknown-linux-gnu
//@[linux] needs-llvm-components: x86
@ -36,7 +36,7 @@ extern "C" {
pub fn test() {
let s = S { f1: 1, f2: 2, f3: 3 };
unsafe {
// CHECK: [[ALLOCA:%.+]] = alloca [12 x i8], align 8
// CHECK: [[ALLOCA:%.+]] = alloca [16 x i8], align 8
// CHECK: [[LOAD:%.+]] = load { i64, i32 }, ptr [[ALLOCA]], align 8
// CHECK: call void @foo({ i64, i32 } [[LOAD]])
foo(s);