rust/tests/assembly/simd-intrinsic-mask-reduce.rs
Jörn Horstmann e91f937779 Add tests for the generated assembly of mask related simd instructions.
The tests show that the code generation currently uses the least
significant bits of <iX x N> vector masks when converting to <i1 xN>.
This leads to an additional left shift operation in the assembly for
x86, since mask operations on x86 operate based on the most significant
bit. On aarch64 the left shift is followed by a comparison against zero,
which repeats the sign bit across the whole lane.

The exception, which does not introduce an unneeded shift, is
simd_bitmask, because the code generation already shifts before
truncating.

By using the "C" calling convention the tests should be stable regarding
changes in register allocation, but it is possible that future llvm
updates will require updating some of the checks.

This additional instruction would be removed by the fix in #104693,
which uses the most significant bit for all mask operations.
2024-03-12 08:52:54 +01:00

61 lines
1.7 KiB
Rust

// verify that simd mask reductions do not introduce additional bit shift operations
//@ revisions: x86 aarch64
//@ [x86] compile-flags: --target=x86_64-unknown-linux-gnu -C llvm-args=-x86-asm-syntax=intel
//@ [x86] needs-llvm-components: x86
//@ [aarch64] compile-flags: --target=aarch64-unknown-linux-gnu
//@ [aarch64] needs-llvm-components: aarch64
//@ [aarch64] min-llvm-version: 18.0
//@ assembly-output: emit-asm
//@ compile-flags: --crate-type=lib -O
#![feature(no_core, lang_items, repr_simd, intrinsics)]
#![no_core]
#![allow(non_camel_case_types)]
// Because we don't have core yet.
#[lang = "sized"]
pub trait Sized {}
#[lang = "copy"]
trait Copy {}
#[repr(simd)]
pub struct mask8x16([i8; 16]);
extern "rust-intrinsic" {
fn simd_reduce_all<T>(x: T) -> bool;
fn simd_reduce_any<T>(x: T) -> bool;
}
// CHECK-LABEL: mask_reduce_all:
#[no_mangle]
pub unsafe extern "C" fn mask_reduce_all(m: mask8x16) -> bool {
// x86: psllw xmm0, 7
// x86-NEXT: pmovmskb eax, xmm0
// x86-NEXT: {{cmp ax, -1|xor eax, 65535}}
// x86-NEXT: sete al
//
// aarch64: shl v0.16b, v0.16b, #7
// aarch64-NEXT: cmlt v0.16b, v0.16b, #0
// aarch64-NEXT: uminv b0, v0.16b
// aarch64-NEXT: fmov [[REG:[a-z0-9]+]], s0
// aarch64-NEXT: and w0, [[REG]], #0x1
simd_reduce_all(m)
}
// CHECK-LABEL: mask_reduce_any:
#[no_mangle]
pub unsafe extern "C" fn mask_reduce_any(m: mask8x16) -> bool {
// x86: psllw xmm0, 7
// x86-NEXT: pmovmskb
// x86-NEXT: test eax, eax
// x86-NEXT: setne al
//
// aarch64: shl v0.16b, v0.16b, #7
// aarch64-NEXT: cmlt v0.16b, v0.16b, #0
// aarch64-NEXT: umaxv b0, v0.16b
// aarch64-NEXT: fmov [[REG:[a-z0-9]+]], s0
// aarch64-NEXT: and w0, [[REG]], #0x1
simd_reduce_any(m)
}