Make repr(packed) vectors work with SIMD intrinsics

This commit is contained in:
Caleb Zulawski 2024-05-20 01:09:29 -04:00
parent 959a67a7f2
commit 86158f581d
2 changed files with 52 additions and 15 deletions

View File

@ -480,8 +480,55 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
} }
_ if name.as_str().starts_with("simd_") => { _ if name.as_str().starts_with("simd_") => {
// Unpack non-power-of-2 #[repr(packed)]
let mut loaded_args = Vec::new();
for (ty, arg) in arg_tys.iter().zip(args) {
loaded_args.push(
if ty.is_simd()
&& let OperandValue::Ref(place) = arg.val
{
let (size, elem_ty) = ty.simd_size_and_type(self.tcx());
let elem_ll_ty = match elem_ty.kind() {
ty::Float(f) => self.type_float_from_ty(*f),
ty::Int(i) => self.type_int_from_ty(*i),
ty::Uint(u) => self.type_uint_from_ty(*u),
ty::RawPtr(_, _) => self.type_ptr(),
_ => unreachable!(),
};
let loaded =
self.load_from_place(self.type_vector(elem_ll_ty, size), place);
OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
} else {
*arg
},
);
}
let llret_ty = if ret_ty.is_simd()
&& let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi
{
let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
let elem_ll_ty = match elem_ty.kind() {
ty::Float(f) => self.type_float_from_ty(*f),
ty::Int(i) => self.type_int_from_ty(*i),
ty::Uint(u) => self.type_uint_from_ty(*u),
ty::RawPtr(_, _) => self.type_ptr(),
_ => unreachable!(),
};
self.type_vector(elem_ll_ty, size)
} else {
llret_ty
};
match generic_simd_intrinsic( match generic_simd_intrinsic(
self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span, self,
name,
callee_ty,
fn_args,
&loaded_args,
ret_ty,
llret_ty,
span,
) { ) {
Ok(llval) => llval, Ok(llval) => llval,
Err(()) => return Ok(()), Err(()) => return Ok(()),

View File

@ -6,9 +6,6 @@
#[repr(simd, packed)] #[repr(simd, packed)]
struct Simd<T, const N: usize>([T; N]); struct Simd<T, const N: usize>([T; N]);
#[repr(simd)]
struct FullSimd<T, const N: usize>([T; N]);
fn check_size_align<T, const N: usize>() { fn check_size_align<T, const N: usize>() {
use std::mem; use std::mem;
assert_eq!(mem::size_of::<Simd<T, N>>(), mem::size_of::<[T; N]>()); assert_eq!(mem::size_of::<Simd<T, N>>(), mem::size_of::<[T; N]>());
@ -44,16 +41,9 @@ fn main() {
simd_add(Simd::<f64, 4>([0., 1., 2., 3.]), Simd::<f64, 4>([2., 2., 2., 2.])); simd_add(Simd::<f64, 4>([0., 1., 2., 3.]), Simd::<f64, 4>([2., 2., 2., 2.]));
assert_eq!(std::mem::transmute::<_, [f64; 4]>(x), [2., 3., 4., 5.]); assert_eq!(std::mem::transmute::<_, [f64; 4]>(x), [2., 3., 4., 5.]);
// non-powers-of-two have padding and need to be expanded to full vectors // non-powers-of-two have padding and lesser alignment, but the intrinsic handles it
fn load<T, const N: usize>(v: Simd<T, N>) -> FullSimd<T, N> { let x: Simd<f64, 3> = simd_add(Simd::<f64, 3>([0., 1., 2.]), Simd::<f64, 3>([2., 2., 2.]));
unsafe { let arr: [f64; 3] = x.0;
let mut tmp = core::mem::MaybeUninit::<FullSimd<T, N>>::uninit(); assert_eq!(arr, [2., 3., 4.]);
std::ptr::copy_nonoverlapping(&v as *const _, tmp.as_mut_ptr().cast(), 1);
tmp.assume_init()
}
}
let x: FullSimd<f64, 3> =
simd_add(load(Simd::<f64, 3>([0., 1., 2.])), load(Simd::<f64, 3>([2., 2., 2.])));
assert_eq!(x.0, [2., 3., 4.]);
} }
} }