Rollup merge of #125311 - calebzulawski:repr-packed-simd-intrinsics, r=workingjubilee

Make repr(packed) vectors work with SIMD intrinsics

In #117116 I fixed `#[repr(packed, simd)]` by doing the expected thing and removing padding from the layout.  This should be the last step in providing a solution to rust-lang/portable-simd#319
This commit is contained in:
Jubilee 2024-06-02 05:06:47 -07:00 committed by GitHub
commit ca9dd62c05
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 103 additions and 16 deletions

View File

@ -482,8 +482,60 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
} }
_ if name.as_str().starts_with("simd_") => { _ if name.as_str().starts_with("simd_") => {
// Unpack non-power-of-2 #[repr(packed, simd)] arguments.
// This gives them the expected layout of a regular #[repr(simd)] vector.
let mut loaded_args = Vec::new();
for (ty, arg) in arg_tys.iter().zip(args) {
loaded_args.push(
// #[repr(packed, simd)] vectors are passed like arrays (as references,
// with reduced alignment and no padding) rather than as immediates.
// We can use a vector load to fix the layout and turn the argument
// into an immediate.
if ty.is_simd()
&& let OperandValue::Ref(place) = arg.val
{
let (size, elem_ty) = ty.simd_size_and_type(self.tcx());
let elem_ll_ty = match elem_ty.kind() {
ty::Float(f) => self.type_float_from_ty(*f),
ty::Int(i) => self.type_int_from_ty(*i),
ty::Uint(u) => self.type_uint_from_ty(*u),
ty::RawPtr(_, _) => self.type_ptr(),
_ => unreachable!(),
};
let loaded =
self.load_from_place(self.type_vector(elem_ll_ty, size), place);
OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
} else {
*arg
},
);
}
let llret_ty = if ret_ty.is_simd()
&& let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi
{
let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
let elem_ll_ty = match elem_ty.kind() {
ty::Float(f) => self.type_float_from_ty(*f),
ty::Int(i) => self.type_int_from_ty(*i),
ty::Uint(u) => self.type_uint_from_ty(*u),
ty::RawPtr(_, _) => self.type_ptr(),
_ => unreachable!(),
};
self.type_vector(elem_ll_ty, size)
} else {
llret_ty
};
match generic_simd_intrinsic( match generic_simd_intrinsic(
self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span, self,
name,
callee_ty,
fn_args,
&loaded_args,
ret_ty,
llret_ty,
span,
) { ) {
Ok(llval) => llval, Ok(llval) => llval,
Err(()) => return Ok(()), Err(()) => return Ok(()),

View File

@ -0,0 +1,44 @@
//@ compile-flags: -Cno-prepopulate-passes
#![crate_type = "lib"]
#![feature(repr_simd, core_intrinsics)]
// make sure that codegen emits correctly-aligned loads and stores for repr(packed, simd) types
// the alignment of a load should be no less than T, and no more than the size of the vector type
use std::intrinsics::simd as intrinsics;
#[derive(Copy, Clone)]
#[repr(packed, simd)]
struct f32x3([f32; 3]);
#[derive(Copy, Clone)]
#[repr(packed, simd)]
struct f32x4([f32; 4]);
// CHECK-LABEL: load_f32x3
#[no_mangle]
pub fn load_f32x3(floats: &f32x3) -> f32x3 {
// FIXME: Is a memcpy really the best we can do?
// CHECK: @llvm.memcpy.{{.*}}ptr align 4 {{.*}}ptr align 4
*floats
}
// CHECK-LABEL: load_f32x4
#[no_mangle]
pub fn load_f32x4(floats: &f32x4) -> f32x4 {
// CHECK: load <4 x float>, ptr %{{[a-z0-9_]*}}, align {{4|8|16}}
*floats
}
// CHECK-LABEL: add_f32x3
#[no_mangle]
pub fn add_f32x3(x: f32x3, y: f32x3) -> f32x3 {
// CHECK: load <3 x float>, ptr %{{[a-z0-9_]*}}, align 4
unsafe { intrinsics::simd_add(x, y) }
}
// CHECK-LABEL: add_f32x4
#[no_mangle]
pub fn add_f32x4(x: f32x4, y: f32x4) -> f32x4 {
// CHECK: load <4 x float>, ptr %{{[a-z0-9_]*}}, align {{4|8|16}}
unsafe { intrinsics::simd_add(x, y) }
}

View File

@ -6,9 +6,6 @@
#[repr(simd, packed)] #[repr(simd, packed)]
struct Simd<T, const N: usize>([T; N]); struct Simd<T, const N: usize>([T; N]);
#[repr(simd)]
struct FullSimd<T, const N: usize>([T; N]);
fn check_size_align<T, const N: usize>() { fn check_size_align<T, const N: usize>() {
use std::mem; use std::mem;
assert_eq!(mem::size_of::<Simd<T, N>>(), mem::size_of::<[T; N]>()); assert_eq!(mem::size_of::<Simd<T, N>>(), mem::size_of::<[T; N]>());
@ -39,21 +36,15 @@ fn main() {
check_ty::<f64>(); check_ty::<f64>();
unsafe { unsafe {
// powers-of-two have no padding and work as usual // powers-of-two have no padding and have the same layout as #[repr(simd)]
let x: Simd<f64, 4> = let x: Simd<f64, 4> =
simd_add(Simd::<f64, 4>([0., 1., 2., 3.]), Simd::<f64, 4>([2., 2., 2., 2.])); simd_add(Simd::<f64, 4>([0., 1., 2., 3.]), Simd::<f64, 4>([2., 2., 2., 2.]));
assert_eq!(std::mem::transmute::<_, [f64; 4]>(x), [2., 3., 4., 5.]); assert_eq!(std::mem::transmute::<_, [f64; 4]>(x), [2., 3., 4., 5.]);
// non-powers-of-two have padding and need to be expanded to full vectors // non-powers-of-two should have padding (which is removed by #[repr(packed)]),
fn load<T, const N: usize>(v: Simd<T, N>) -> FullSimd<T, N> { // but the intrinsic handles it
unsafe { let x: Simd<f64, 3> = simd_add(Simd::<f64, 3>([0., 1., 2.]), Simd::<f64, 3>([2., 2., 2.]));
let mut tmp = core::mem::MaybeUninit::<FullSimd<T, N>>::uninit(); let arr: [f64; 3] = x.0;
std::ptr::copy_nonoverlapping(&v as *const _, tmp.as_mut_ptr().cast(), 1); assert_eq!(arr, [2., 3., 4.]);
tmp.assume_init()
}
}
let x: FullSimd<f64, 3> =
simd_add(load(Simd::<f64, 3>([0., 1., 2.])), load(Simd::<f64, 3>([2., 2., 2.])));
assert_eq!(x.0, [2., 3., 4.]);
} }
} }