rust/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

70 lines
2.0 KiB
Rust
Raw Normal View History

//! Emulate LLVM intrinsics
2019-07-30 12:37:20 +00:00
use crate::intrinsics::*;
2019-08-31 17:28:09 +00:00
use crate::prelude::*;
pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
args: &[Spanned<mir::Operand<'tcx>>],
ret: CPlace<'tcx>,
target: Option<BasicBlock>,
span: Span,
) {
if intrinsic.starts_with("llvm.aarch64") {
return llvm_aarch64::codegen_aarch64_llvm_intrinsic_call(fx, intrinsic, args, ret, target);
}
if intrinsic.starts_with("llvm.x86") {
return llvm_x86::codegen_x86_llvm_intrinsic_call(fx, intrinsic, args, ret, target, span);
}
match intrinsic {
2024-01-09 13:16:09 +00:00
"llvm.prefetch" => {
// Nothing to do. This is merely a perf hint.
}
_ if intrinsic.starts_with("llvm.ctlz.v") => {
intrinsic_args!(fx, args => (a); intrinsic);
simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
fx.bcx.ins().clz(lane)
2019-07-30 12:37:20 +00:00
});
}
_ if intrinsic.starts_with("llvm.ctpop.v") => {
intrinsic_args!(fx, args => (a); intrinsic);
simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
fx.bcx.ins().popcnt(lane)
});
}
_ if intrinsic.starts_with("llvm.fma.v") => {
intrinsic_args!(fx, args => (x,y,z); intrinsic);
simd_trio_for_each_lane(
fx,
x,
y,
z,
ret,
&|fx, _lane_ty, _res_lane_ty, lane_x, lane_y, lane_z| {
fx.bcx.ins().fma(lane_x, lane_y, lane_z)
},
);
}
_ => {
fx.tcx
.dcx()
.warn(format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
crate::trap::trap_unimplemented(fx, intrinsic);
return;
}
}
let dest = target.expect("all llvm intrinsics used by stdlib should return");
let ret_block = fx.get_block(dest);
fx.bcx.ins().jump(ret_block, &[]);
}