mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-29 18:23:49 +00:00
ccd1bc2ad1
LLVM and Cranelift disagree about how to return values that don't fit in the registers designated for return values. LLVM will force the entire return value to be passed by return area pointer, while Cranelift will look at each IR level return value independently and decide to pass it in a register or not, which would result in the return value being passed partially in registers and partially through a return area pointer. While Cranelift may need to be fixed as the LLVM behavior is generally more correct with respect to the surface language, forcing this behavior in rustc itself makes it easier for other backends to conform to the Rust ABI and for the C ABI rustc already handles this behavior anyway. In addition LLVM's decision to pass the return value in registers or using a return area pointer depends on how exactly the return type is lowered to an LLVM IR type. For example `Option<u128>` can be lowered as `{ i128, i128 }` in which case the x86_64 backend would use a return area pointer, or it could be passed as `{ i32, i128 }` in which case the x86_64 backend would pass it in registers by taking advantage of an LLVM ABI extension that allows using 3 registers for the x86_64 sysv call conv rather than the officially specified 2 registers. This adjustment is only necessary for the Rust ABI as for other ABI's the calling convention implementations in rustc_target already ensure any return value which doesn't fit in the available amount of return registers is passed in the right way for the current target.
99 lines
3.4 KiB
Rust
99 lines
3.4 KiB
Rust
//@ only-x86_64
|
|
//@ compile-flags: -O -C no-prepopulate-passes --crate-type=lib
|
|
|
|
// On LLVM 17 and earlier LLVM's own data layout specifies that i128 has 8 byte alignment,
|
|
// while rustc wants it to have 16 byte alignment. This test checks that we handle this
|
|
// correctly.
|
|
|
|
// CHECK: %ScalarPair = type { i32, [3 x i32], i128 }
|
|
|
|
#![feature(core_intrinsics)]
|
|
|
|
#[repr(C)]
|
|
#[derive(Clone, Copy)]
|
|
pub struct ScalarPair {
|
|
a: i32,
|
|
b: i128,
|
|
}
|
|
|
|
#[no_mangle]
|
|
pub fn load(x: &ScalarPair) -> ScalarPair {
|
|
// CHECK-LABEL: @load(
|
|
// CHECK-SAME: sret([32 x i8]) align 16 dereferenceable(32) %_0,
|
|
// CHECK-SAME: align 16 dereferenceable(32) %x
|
|
// CHECK: [[A:%.*]] = load i32, ptr %x, align 16
|
|
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %x, i64 16
|
|
// CHECK-NEXT: [[B:%.*]] = load i128, ptr [[GEP]], align 16
|
|
// CHECK-NEXT: store i32 [[A]], ptr %_0, align 16
|
|
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %_0, i64 16
|
|
// CHECK-NEXT: store i128 [[B]], ptr [[GEP]], align 16
|
|
// CHECK-NEXT: ret void
|
|
*x
|
|
}
|
|
|
|
#[no_mangle]
|
|
pub fn store(x: &mut ScalarPair) {
|
|
// CHECK-LABEL: @store(
|
|
// CHECK-SAME: align 16 dereferenceable(32) %x
|
|
// CHECK: store i32 1, ptr %x, align 16
|
|
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %x, i64 16
|
|
// CHECK-NEXT: store i128 2, ptr [[GEP]], align 16
|
|
*x = ScalarPair { a: 1, b: 2 };
|
|
}
|
|
|
|
#[no_mangle]
|
|
pub fn alloca() {
|
|
// CHECK-LABEL: @alloca(
|
|
// CHECK: [[X:%.*]] = alloca [32 x i8], align 16
|
|
// CHECK: store i32 1, ptr %x, align 16
|
|
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %x, i64 16
|
|
// CHECK-NEXT: store i128 2, ptr [[GEP]], align 16
|
|
let mut x = ScalarPair { a: 1, b: 2 };
|
|
store(&mut x);
|
|
}
|
|
|
|
#[no_mangle]
|
|
pub fn load_volatile(x: &ScalarPair) -> ScalarPair {
|
|
// CHECK-LABEL: @load_volatile(
|
|
// CHECK-SAME: sret([32 x i8]) align 16 dereferenceable(32) %_0,
|
|
// CHECK-SAME: align 16 dereferenceable(32) %x
|
|
// CHECK: [[LOAD:%.*]] = load volatile %ScalarPair, ptr %x, align 16
|
|
// CHECK-NEXT: store %ScalarPair [[LOAD]], ptr %_0, align 16
|
|
// CHECK-NEXT: ret void
|
|
unsafe { std::intrinsics::volatile_load(x) }
|
|
}
|
|
|
|
#[no_mangle]
|
|
pub fn transmute(x: ScalarPair) -> (std::mem::MaybeUninit<i128>, i128) {
|
|
// CHECK-LABEL: @transmute(
|
|
// CHECK-SAME: sret([32 x i8]) align 16 dereferenceable(32) %_0,
|
|
// CHECK-SAME: i32 noundef %x.0, i128 noundef %x.1
|
|
// CHECK: store i32 %x.0, ptr %_0, align 16
|
|
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %_0, i64 16
|
|
// CHECK-NEXT: store i128 %x.1, ptr [[GEP]], align 16
|
|
// CHECK-NEXT: ret void
|
|
unsafe { std::mem::transmute(x) }
|
|
}
|
|
|
|
#[repr(C)]
|
|
#[derive(Clone, Copy)]
|
|
pub struct Struct {
|
|
a: i32,
|
|
b: i32,
|
|
c: i128,
|
|
}
|
|
|
|
#[no_mangle]
|
|
pub fn store_struct(x: &mut Struct) {
|
|
// CHECK-LABEL: @store_struct(
|
|
// CHECK-SAME: align 16 dereferenceable(32) %x
|
|
// CHECK: [[TMP:%.*]] = alloca [32 x i8], align 16
|
|
// CHECK: store i32 1, ptr [[TMP]], align 16
|
|
// CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 4
|
|
// CHECK-NEXT: store i32 2, ptr [[GEP1]], align 4
|
|
// CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 16
|
|
// CHECK-NEXT: store i128 3, ptr [[GEP2]], align 16
|
|
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 %x, ptr align 16 [[TMP]], i64 32, i1 false)
|
|
*x = Struct { a: 1, b: 2, c: 3 };
|
|
}
|