mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 06:44:35 +00:00
Rollup merge of #128537 - Jamesbarford:118980-const-vector, r=RalfJung,nikic
const vector passed through to codegen This allows constant vectors using a repr(simd) type to be propagated through to the backend by reusing the functionality used to do a similar thing for the simd_shuffle intrinsic #118209 r? RalfJung
This commit is contained in:
commit
aea5087964
@ -160,6 +160,11 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||
self.context.new_struct_constructor(None, struct_type.as_type(), None, values)
|
||||
}
|
||||
|
||||
fn const_vector(&self, values: &[RValue<'gcc>]) -> RValue<'gcc> {
|
||||
let typ = self.type_vector(values[0].get_type(), values.len() as u64);
|
||||
self.context.new_rvalue_from_vector(None, typ, values)
|
||||
}
|
||||
|
||||
fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> {
|
||||
// TODO(antoyo)
|
||||
None
|
||||
|
@ -97,11 +97,6 @@ impl<'ll> CodegenCx<'ll, '_> {
|
||||
unsafe { llvm::LLVMConstArray2(ty, elts.as_ptr(), len) }
|
||||
}
|
||||
|
||||
pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
|
||||
let len = c_uint::try_from(elts.len()).expect("LLVMConstVector elements len overflow");
|
||||
unsafe { llvm::LLVMConstVector(elts.as_ptr(), len) }
|
||||
}
|
||||
|
||||
pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value {
|
||||
bytes_in_context(self.llcx, bytes)
|
||||
}
|
||||
@ -221,6 +216,11 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
|
||||
struct_in_context(self.llcx, elts, packed)
|
||||
}
|
||||
|
||||
fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
|
||||
let len = c_uint::try_from(elts.len()).expect("LLVMConstVector elements len overflow");
|
||||
unsafe { llvm::LLVMConstVector(elts.as_ptr(), len) }
|
||||
}
|
||||
|
||||
fn const_to_opt_uint(&self, v: &'ll Value) -> Option<u64> {
|
||||
try_as_const_integral(v).and_then(|v| unsafe {
|
||||
let mut i = 0u64;
|
||||
|
@ -923,8 +923,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
// third argument must be constant. This is
|
||||
// checked by the type-checker.
|
||||
if i == 2 && intrinsic.name == sym::simd_shuffle {
|
||||
// FIXME: the simd_shuffle argument is actually an array,
|
||||
// not a vector, so we need this special hack to make sure
|
||||
// it is passed as an immediate. We should pass the
|
||||
// shuffle indices as a vector instead to avoid this hack.
|
||||
if let mir::Operand::Constant(constant) = &arg.node {
|
||||
let (llval, ty) = self.simd_shuffle_indices(bx, constant);
|
||||
let (llval, ty) = self.immediate_const_vector(bx, constant);
|
||||
return OperandRef {
|
||||
val: Immediate(llval),
|
||||
layout: bx.layout_of(ty),
|
||||
|
@ -1,6 +1,6 @@
|
||||
use rustc_middle::mir::interpret::ErrorHandled;
|
||||
use rustc_middle::ty::layout::HasTyCtxt;
|
||||
use rustc_middle::ty::{self, Ty};
|
||||
use rustc_middle::ty::{self, Ty, ValTree};
|
||||
use rustc_middle::{bug, mir, span_bug};
|
||||
use rustc_target::abi::Abi;
|
||||
|
||||
@ -28,7 +28,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
.expect("erroneous constant missed by mono item collection")
|
||||
}
|
||||
|
||||
/// This is a convenience helper for `simd_shuffle_indices`. It has the precondition
|
||||
/// This is a convenience helper for `immediate_const_vector`. It has the precondition
|
||||
/// that the given `constant` is an `Const::Unevaluated` and must be convertible to
|
||||
/// a `ValTree`. If you want a more general version of this, talk to `wg-const-eval` on zulip.
|
||||
///
|
||||
@ -59,23 +59,42 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
self.cx.tcx().const_eval_resolve_for_typeck(ty::ParamEnv::reveal_all(), uv, constant.span)
|
||||
}
|
||||
|
||||
/// process constant containing SIMD shuffle indices
|
||||
pub fn simd_shuffle_indices(
|
||||
/// process constant containing SIMD shuffle indices & constant vectors
|
||||
pub fn immediate_const_vector(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
constant: &mir::ConstOperand<'tcx>,
|
||||
) -> (Bx::Value, Ty<'tcx>) {
|
||||
let ty = self.monomorphize(constant.ty());
|
||||
let ty_is_simd = ty.is_simd();
|
||||
// FIXME: ideally we'd assert that this is a SIMD type, but simd_shuffle
|
||||
// in its current form relies on a regular array being passed as an
|
||||
// immediate argument. This hack can be removed once that is fixed.
|
||||
let field_ty = if ty_is_simd {
|
||||
ty.simd_size_and_type(bx.tcx()).1
|
||||
} else {
|
||||
ty.builtin_index().unwrap()
|
||||
};
|
||||
|
||||
let val = self
|
||||
.eval_unevaluated_mir_constant_to_valtree(constant)
|
||||
.ok()
|
||||
.map(|x| x.ok())
|
||||
.flatten()
|
||||
.map(|val| {
|
||||
let field_ty = ty.builtin_index().unwrap();
|
||||
let values: Vec<_> = val
|
||||
.unwrap_branch()
|
||||
.iter()
|
||||
// Depending on whether this is a SIMD type with an array field
|
||||
// or a type with many fields (one for each elements), the valtree
|
||||
// is either a single branch with N children, or a root node
|
||||
// with exactly one child which then in turn has many children.
|
||||
// So we look at the first child to determine whether it is a
|
||||
// leaf or whether we have to go one more layer down.
|
||||
let branch_or_leaf = val.unwrap_branch();
|
||||
let first = branch_or_leaf.get(0).unwrap();
|
||||
let field_iter = match first {
|
||||
ValTree::Branch(_) => first.unwrap_branch().iter(),
|
||||
ValTree::Leaf(_) => branch_or_leaf.iter(),
|
||||
};
|
||||
let values: Vec<_> = field_iter
|
||||
.map(|field| {
|
||||
if let Some(prim) = field.try_to_scalar() {
|
||||
let layout = bx.layout_of(field_ty);
|
||||
@ -84,11 +103,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
};
|
||||
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
|
||||
} else {
|
||||
bug!("simd shuffle field {:?}", field)
|
||||
bug!("field is not a scalar {:?}", field)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
bx.const_struct(&values, false)
|
||||
if ty_is_simd { bx.const_vector(&values) } else { bx.const_struct(&values, false) }
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
bx.tcx().dcx().emit_err(errors::ShuffleIndicesEvaluation { span: constant.span });
|
||||
|
@ -635,7 +635,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
self.codegen_consume(bx, place.as_ref())
|
||||
}
|
||||
|
||||
mir::Operand::Constant(ref constant) => self.eval_mir_constant_to_operand(bx, constant),
|
||||
mir::Operand::Constant(ref constant) => {
|
||||
let constant_ty = self.monomorphize(constant.ty());
|
||||
// Most SIMD vector constants should be passed as immediates.
|
||||
// (In particular, some intrinsics really rely on this.)
|
||||
if constant_ty.is_simd() {
|
||||
// However, some SIMD types do not actually use the vector ABI
|
||||
// (in particular, packed SIMD types do not). Ensure we exclude those.
|
||||
let layout = bx.layout_of(constant_ty);
|
||||
if let Abi::Vector { .. } = layout.abi {
|
||||
let (llval, ty) = self.immediate_const_vector(bx, constant);
|
||||
return OperandRef {
|
||||
val: OperandValue::Immediate(llval),
|
||||
layout: bx.layout_of(ty),
|
||||
};
|
||||
}
|
||||
}
|
||||
self.eval_mir_constant_to_operand(bx, constant)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ pub trait ConstMethods<'tcx>: BackendTypes {
|
||||
|
||||
fn const_str(&self, s: &str) -> (Self::Value, Self::Value);
|
||||
fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value;
|
||||
fn const_vector(&self, elts: &[Self::Value]) -> Self::Value;
|
||||
|
||||
fn const_to_opt_uint(&self, v: Self::Value) -> Option<u64>;
|
||||
fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option<u128>;
|
||||
|
107
tests/codegen/const-vector.rs
Normal file
107
tests/codegen/const-vector.rs
Normal file
@ -0,0 +1,107 @@
|
||||
//@ compile-flags: -C no-prepopulate-passes -Copt-level=0
|
||||
|
||||
// This test checks that constants of SIMD type are passed as immediate vectors.
|
||||
// We ensure that both vector representations (struct with fields and struct wrapping array) work.
|
||||
#![crate_type = "lib"]
|
||||
#![feature(abi_unadjusted)]
|
||||
#![feature(const_trait_impl)]
|
||||
#![feature(repr_simd)]
|
||||
#![feature(rustc_attrs)]
|
||||
#![feature(simd_ffi)]
|
||||
#![allow(non_camel_case_types)]
|
||||
|
||||
// Setting up structs that can be used as const vectors
|
||||
#[repr(simd)]
|
||||
#[derive(Clone)]
|
||||
pub struct i8x2(i8, i8);
|
||||
|
||||
#[repr(simd)]
|
||||
#[derive(Clone)]
|
||||
pub struct i8x2_arr([i8; 2]);
|
||||
|
||||
#[repr(simd)]
|
||||
#[derive(Clone)]
|
||||
pub struct f32x2(f32, f32);
|
||||
|
||||
#[repr(simd)]
|
||||
#[derive(Clone)]
|
||||
pub struct f32x2_arr([f32; 2]);
|
||||
|
||||
#[repr(simd, packed)]
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct Simd<T, const N: usize>([T; N]);
|
||||
|
||||
// The following functions are required for the tests to ensure
|
||||
// that they are called with a const vector
|
||||
|
||||
extern "unadjusted" {
|
||||
#[no_mangle]
|
||||
fn test_i8x2(a: i8x2);
|
||||
}
|
||||
|
||||
extern "unadjusted" {
|
||||
#[no_mangle]
|
||||
fn test_i8x2_two_args(a: i8x2, b: i8x2);
|
||||
}
|
||||
|
||||
extern "unadjusted" {
|
||||
#[no_mangle]
|
||||
fn test_i8x2_mixed_args(a: i8x2, c: i32, b: i8x2);
|
||||
}
|
||||
|
||||
extern "unadjusted" {
|
||||
#[no_mangle]
|
||||
fn test_i8x2_arr(a: i8x2_arr);
|
||||
}
|
||||
|
||||
extern "unadjusted" {
|
||||
#[no_mangle]
|
||||
fn test_f32x2(a: f32x2);
|
||||
}
|
||||
|
||||
extern "unadjusted" {
|
||||
#[no_mangle]
|
||||
fn test_f32x2_arr(a: f32x2_arr);
|
||||
}
|
||||
|
||||
extern "unadjusted" {
|
||||
#[no_mangle]
|
||||
fn test_simd(a: Simd<i32, 4>);
|
||||
}
|
||||
|
||||
extern "unadjusted" {
|
||||
#[no_mangle]
|
||||
fn test_simd_unaligned(a: Simd<i32, 3>);
|
||||
}
|
||||
|
||||
// Ensure the packed variant of the simd struct does not become a const vector
|
||||
// if the size is not a power of 2
|
||||
// CHECK: %"Simd<i32, 3>" = type { [3 x i32] }
|
||||
|
||||
pub fn do_call() {
|
||||
unsafe {
|
||||
// CHECK: call void @test_i8x2(<2 x i8> <i8 32, i8 64>
|
||||
test_i8x2(const { i8x2(32, 64) });
|
||||
|
||||
// CHECK: call void @test_i8x2_two_args(<2 x i8> <i8 32, i8 64>, <2 x i8> <i8 8, i8 16>
|
||||
test_i8x2_two_args(const { i8x2(32, 64) }, const { i8x2(8, 16) });
|
||||
|
||||
// CHECK: call void @test_i8x2_mixed_args(<2 x i8> <i8 32, i8 64>, i32 43, <2 x i8> <i8 8, i8 16>
|
||||
test_i8x2_mixed_args(const { i8x2(32, 64) }, 43, const { i8x2(8, 16) });
|
||||
|
||||
// CHECK: call void @test_i8x2_arr(<2 x i8> <i8 32, i8 64>
|
||||
test_i8x2_arr(const { i8x2_arr([32, 64]) });
|
||||
|
||||
// CHECK: call void @test_f32x2(<2 x float> <float 0x3FD47AE140000000, float 0x3FE47AE140000000>
|
||||
test_f32x2(const { f32x2(0.32, 0.64) });
|
||||
|
||||
// CHECK: void @test_f32x2_arr(<2 x float> <float 0x3FD47AE140000000, float 0x3FE47AE140000000>
|
||||
test_f32x2_arr(const { f32x2_arr([0.32, 0.64]) });
|
||||
|
||||
// CHECK: call void @test_simd(<4 x i32> <i32 2, i32 4, i32 6, i32 8>
|
||||
test_simd(const { Simd::<i32, 4>([2, 4, 6, 8]) });
|
||||
|
||||
// CHECK: call void @test_simd_unaligned(%"Simd<i32, 3>" %1
|
||||
test_simd_unaligned(const { Simd::<i32, 3>([2, 4, 6]) });
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user