2025-01-24 21:05:26 +00:00
|
|
|
use std::borrow::{Borrow, Cow};
|
2019-02-17 18:58:58 +00:00
|
|
|
use std::ops::Deref;
|
2023-04-08 09:15:26 +00:00
|
|
|
use std::{iter, ptr};
|
2024-07-28 22:13:50 +00:00
|
|
|
|
2025-01-01 20:42:45 +00:00
|
|
|
pub(crate) mod autodiff;
|
|
|
|
|
2024-12-04 11:30:42 +00:00
|
|
|
use libc::{c_char, c_uint, size_t};
|
2024-10-09 00:29:00 +00:00
|
|
|
use rustc_abi as abi;
|
|
|
|
use rustc_abi::{Align, Size, WrappingRange};
|
2019-02-17 18:58:58 +00:00
|
|
|
use rustc_codegen_ssa::MemFlags;
|
2022-09-18 22:49:49 +00:00
|
|
|
use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind};
|
2018-10-03 14:56:24 +00:00
|
|
|
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
|
|
|
|
use rustc_codegen_ssa::mir::place::PlaceRef;
|
|
|
|
use rustc_codegen_ssa::traits::*;
|
2018-08-07 14:04:34 +00:00
|
|
|
use rustc_data_structures::small_c_str::SmallCStr;
|
2020-01-05 01:37:57 +00:00
|
|
|
use rustc_hir::def_id::DefId;
|
2022-12-13 06:42:44 +00:00
|
|
|
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
|
2021-09-01 21:09:34 +00:00
|
|
|
use rustc_middle::ty::layout::{
|
2024-11-15 12:53:31 +00:00
|
|
|
FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTypingEnv, LayoutError, LayoutOfHelpers,
|
2024-08-05 19:25:40 +00:00
|
|
|
TyAndLayout,
|
2021-09-01 21:09:34 +00:00
|
|
|
};
|
2024-03-15 19:45:46 +00:00
|
|
|
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
|
2024-04-08 02:35:49 +00:00
|
|
|
use rustc_sanitizers::{cfi, kcfi};
|
2024-03-21 22:38:22 +00:00
|
|
|
use rustc_session::config::OptLevel;
|
2021-04-19 17:55:32 +00:00
|
|
|
use rustc_span::Span;
|
2024-11-03 02:32:52 +00:00
|
|
|
use rustc_target::callconv::FnAbi;
|
2022-12-13 06:42:44 +00:00
|
|
|
use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target};
|
2023-06-28 04:07:43 +00:00
|
|
|
use smallvec::SmallVec;
|
2024-05-22 04:50:24 +00:00
|
|
|
use tracing::{debug, instrument};
|
2013-07-21 13:33:40 +00:00
|
|
|
|
2022-10-01 17:01:31 +00:00
|
|
|
use crate::abi::FnAbiLlvmExt;
|
2019-02-17 18:58:58 +00:00
|
|
|
use crate::common::Funclet;
|
2025-02-24 08:15:31 +00:00
|
|
|
use crate::context::{CodegenCx, FullCx, GenericCx, SCx};
|
2025-02-19 10:06:11 +00:00
|
|
|
use crate::llvm::{
|
|
|
|
self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, GEPNoWrapFlags, Metadata, True,
|
|
|
|
};
|
2019-02-17 18:58:58 +00:00
|
|
|
use crate::type_::Type;
|
|
|
|
use crate::type_of::LayoutLlvmExt;
|
|
|
|
use crate::value::Value;
|
2025-02-15 04:25:43 +00:00
|
|
|
use crate::{attributes, llvm_util};
|
2024-07-28 22:13:50 +00:00
|
|
|
|
2016-12-31 23:00:24 +00:00
|
|
|
#[must_use]
|
2025-02-24 08:15:31 +00:00
|
|
|
pub(crate) struct GenericBuilder<'a, 'll, CX: Borrow<SCx<'ll>>> {
|
2018-07-17 15:33:09 +00:00
|
|
|
pub llbuilder: &'ll mut llvm::Builder<'ll>,
|
2025-02-24 08:15:31 +00:00
|
|
|
pub cx: &'a GenericCx<'ll, CX>,
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2025-02-24 08:15:31 +00:00
|
|
|
pub(crate) type SBuilder<'a, 'll> = GenericBuilder<'a, 'll, SCx<'ll>>;
|
|
|
|
pub(crate) type Builder<'a, 'll, 'tcx> = GenericBuilder<'a, 'll, FullCx<'ll, 'tcx>>;
|
2025-01-24 21:05:26 +00:00
|
|
|
|
2025-02-24 08:15:31 +00:00
|
|
|
impl<'a, 'll, CX: Borrow<SCx<'ll>>> Drop for GenericBuilder<'a, 'll, CX> {
|
2016-12-17 00:39:35 +00:00
|
|
|
fn drop(&mut self) {
|
|
|
|
unsafe {
|
2018-07-17 15:33:09 +00:00
|
|
|
llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
|
2016-12-17 00:39:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-24 21:05:26 +00:00
|
|
|
impl<'a, 'll> SBuilder<'a, 'll> {
|
|
|
|
fn call(
|
|
|
|
&mut self,
|
|
|
|
llty: &'ll Type,
|
|
|
|
llfn: &'ll Value,
|
|
|
|
args: &[&'ll Value],
|
|
|
|
funclet: Option<&Funclet<'ll>>,
|
|
|
|
) -> &'ll Value {
|
|
|
|
debug!("call {:?} with args ({:?})", llfn, args);
|
|
|
|
|
|
|
|
let args = self.check_call("call", llty, llfn, args);
|
|
|
|
let funclet_bundle = funclet.map(|funclet| funclet.bundle());
|
|
|
|
let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
|
|
|
|
if let Some(funclet_bundle) = funclet_bundle {
|
|
|
|
bundles.push(funclet_bundle);
|
|
|
|
}
|
|
|
|
|
|
|
|
let call = unsafe {
|
|
|
|
llvm::LLVMBuildCallWithOperandBundles(
|
|
|
|
self.llbuilder,
|
|
|
|
llty,
|
|
|
|
llfn,
|
|
|
|
args.as_ptr() as *const &llvm::Value,
|
|
|
|
args.len() as c_uint,
|
|
|
|
bundles.as_ptr(),
|
|
|
|
bundles.len() as c_uint,
|
|
|
|
c"".as_ptr(),
|
|
|
|
)
|
|
|
|
};
|
|
|
|
call
|
|
|
|
}
|
2025-02-24 08:15:31 +00:00
|
|
|
}
|
2025-01-24 21:05:26 +00:00
|
|
|
|
2025-02-24 08:15:31 +00:00
|
|
|
impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> {
|
|
|
|
fn with_cx(scx: &'a GenericCx<'ll, CX>) -> Self {
|
2025-01-24 21:05:26 +00:00
|
|
|
// Create a fresh builder from the simple context.
|
2025-02-24 08:15:31 +00:00
|
|
|
let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(scx.deref().borrow().llcx) };
|
|
|
|
GenericBuilder { llbuilder, cx: scx }
|
2025-01-24 21:05:26 +00:00
|
|
|
}
|
2025-02-24 08:15:31 +00:00
|
|
|
|
2025-01-24 21:05:26 +00:00
|
|
|
pub(crate) fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
|
|
|
|
}
|
|
|
|
|
|
|
|
fn ret_void(&mut self) {
|
2025-02-21 17:23:49 +00:00
|
|
|
llvm::LLVMBuildRetVoid(self.llbuilder);
|
2025-01-24 21:05:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn ret(&mut self, v: &'ll Value) {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildRet(self.llbuilder, v);
|
|
|
|
}
|
|
|
|
}
|
2025-02-24 08:15:31 +00:00
|
|
|
|
|
|
|
fn build(cx: &'a GenericCx<'ll, CX>, llbb: &'ll BasicBlock) -> Self {
|
|
|
|
let bx = Self::with_cx(cx);
|
2025-01-24 21:05:26 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMPositionBuilderAtEnd(bx.llbuilder, llbb);
|
|
|
|
}
|
|
|
|
bx
|
|
|
|
}
|
2025-02-24 08:15:31 +00:00
|
|
|
}
|
2025-01-24 21:05:26 +00:00
|
|
|
|
2025-02-24 08:15:31 +00:00
|
|
|
impl<'a, 'll> SBuilder<'a, 'll> {
|
2025-01-24 21:05:26 +00:00
|
|
|
fn check_call<'b>(
|
|
|
|
&mut self,
|
|
|
|
typ: &str,
|
|
|
|
fn_ty: &'ll Type,
|
|
|
|
llfn: &'ll Value,
|
|
|
|
args: &'b [&'ll Value],
|
|
|
|
) -> Cow<'b, [&'ll Value]> {
|
|
|
|
assert!(
|
|
|
|
self.cx.type_kind(fn_ty) == TypeKind::Function,
|
|
|
|
"builder::{typ} not passed a function, but {fn_ty:?}"
|
|
|
|
);
|
|
|
|
|
|
|
|
let param_tys = self.cx.func_params_types(fn_ty);
|
|
|
|
|
|
|
|
let all_args_match = iter::zip(¶m_tys, args.iter().map(|&v| self.cx.val_ty(v)))
|
|
|
|
.all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
|
|
|
|
|
|
|
|
if all_args_match {
|
|
|
|
return Cow::Borrowed(args);
|
|
|
|
}
|
|
|
|
|
|
|
|
let casted_args: Vec<_> = iter::zip(param_tys, args)
|
|
|
|
.enumerate()
|
|
|
|
.map(|(i, (expected_ty, &actual_val))| {
|
|
|
|
let actual_ty = self.cx.val_ty(actual_val);
|
|
|
|
if expected_ty != actual_ty {
|
|
|
|
debug!(
|
|
|
|
"type mismatch in function call of {:?}. \
|
|
|
|
Expected {:?} for param {}, got {:?}; injecting bitcast",
|
|
|
|
llfn, expected_ty, i, actual_ty
|
|
|
|
);
|
|
|
|
self.bitcast(actual_val, expected_ty)
|
|
|
|
} else {
|
|
|
|
actual_val
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
Cow::Owned(casted_args)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-29 01:46:55 +00:00
|
|
|
/// Empty string, to be used where LLVM expects an instruction name, indicating
|
|
|
|
/// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
|
|
|
|
// FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
|
2023-12-03 11:02:31 +00:00
|
|
|
const UNNAMED: *const c_char = c"".as_ptr();
|
2013-07-21 13:33:40 +00:00
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> {
|
2018-09-20 13:47:22 +00:00
|
|
|
type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
|
2024-09-19 01:39:28 +00:00
|
|
|
type Metadata = <CodegenCx<'ll, 'tcx> as BackendTypes>::Metadata;
|
2019-10-13 09:28:19 +00:00
|
|
|
type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
|
2018-09-20 13:47:22 +00:00
|
|
|
type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
|
|
|
|
type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
|
2018-11-13 10:51:42 +00:00
|
|
|
type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
|
2018-09-20 13:47:22 +00:00
|
|
|
|
|
|
|
type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
|
2020-02-10 20:52:30 +00:00
|
|
|
type DILocation = <CodegenCx<'ll, 'tcx> as BackendTypes>::DILocation;
|
2020-01-26 16:50:13 +00:00
|
|
|
type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable;
|
2018-09-13 12:58:19 +00:00
|
|
|
}
|
|
|
|
|
2020-03-31 16:16:47 +00:00
|
|
|
impl abi::HasDataLayout for Builder<'_, '_, '_> {
|
|
|
|
fn data_layout(&self) -> &abi::TargetDataLayout {
|
2018-09-13 12:58:19 +00:00
|
|
|
self.cx.data_layout()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'tcx> ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
|
2021-06-01 00:00:00 +00:00
|
|
|
#[inline]
|
2019-06-13 21:48:52 +00:00
|
|
|
fn tcx(&self) -> TyCtxt<'tcx> {
|
2018-09-13 12:58:19 +00:00
|
|
|
self.cx.tcx
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-15 12:53:31 +00:00
|
|
|
impl<'tcx> ty::layout::HasTypingEnv<'tcx> for Builder<'_, '_, 'tcx> {
|
|
|
|
fn typing_env(&self) -> ty::TypingEnv<'tcx> {
|
|
|
|
self.cx.typing_env()
|
2019-05-04 09:32:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl HasTargetSpec for Builder<'_, '_, '_> {
|
2021-06-01 00:00:00 +00:00
|
|
|
#[inline]
|
2019-05-14 15:53:01 +00:00
|
|
|
fn target_spec(&self) -> &Target {
|
2021-09-30 17:38:50 +00:00
|
|
|
self.cx.target_spec()
|
2019-05-14 15:53:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
|
2021-08-30 15:01:58 +00:00
|
|
|
#[inline]
|
|
|
|
fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
|
|
|
|
self.cx.handle_layout_err(err, span, ty)
|
2018-09-13 12:58:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
|
2021-09-01 21:09:34 +00:00
|
|
|
#[inline]
|
|
|
|
fn handle_fn_abi_err(
|
|
|
|
&self,
|
|
|
|
err: FnAbiError<'tcx>,
|
|
|
|
span: Span,
|
2021-09-01 21:29:15 +00:00
|
|
|
fn_abi_request: FnAbiRequest<'tcx>,
|
2021-09-01 21:09:34 +00:00
|
|
|
) -> ! {
|
|
|
|
self.cx.handle_fn_abi_err(err, span, fn_abi_request)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'ll, 'tcx> Deref for Builder<'_, 'll, 'tcx> {
|
2018-11-27 17:35:35 +00:00
|
|
|
type Target = CodegenCx<'ll, 'tcx>;
|
|
|
|
|
2021-06-01 00:00:00 +00:00
|
|
|
#[inline]
|
2018-11-27 17:35:35 +00:00
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
self.cx
|
|
|
|
}
|
|
|
|
}
|
2018-09-13 12:58:19 +00:00
|
|
|
|
2024-09-17 06:52:25 +00:00
|
|
|
macro_rules! math_builder_methods {
|
2019-05-29 01:34:33 +00:00
|
|
|
($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
|
|
|
|
$(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
|
|
|
|
unsafe {
|
2019-05-29 01:46:55 +00:00
|
|
|
llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
|
2019-05-29 01:34:33 +00:00
|
|
|
}
|
2019-05-29 18:05:43 +00:00
|
|
|
})+
|
2019-05-29 01:34:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-17 06:52:25 +00:00
|
|
|
macro_rules! set_math_builder_methods {
|
|
|
|
($($name:ident($($arg:ident),*) => ($llvm_capi:ident, $llvm_set_math:ident)),+ $(,)?) => {
|
|
|
|
$(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
|
|
|
|
unsafe {
|
|
|
|
let instr = llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED);
|
|
|
|
llvm::$llvm_set_math(instr);
|
|
|
|
instr
|
|
|
|
}
|
|
|
|
})+
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
2024-09-16 05:45:46 +00:00
|
|
|
type CodegenCx = CodegenCx<'ll, 'tcx>;
|
|
|
|
|
2021-05-06 15:57:04 +00:00
|
|
|
fn build(cx: &'a CodegenCx<'ll, 'tcx>, llbb: &'ll BasicBlock) -> Self {
|
|
|
|
let bx = Builder::with_cx(cx);
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMPositionBuilderAtEnd(bx.llbuilder, llbb);
|
|
|
|
}
|
2018-01-05 05:12:32 +00:00
|
|
|
bx
|
2016-12-31 23:00:24 +00:00
|
|
|
}
|
|
|
|
|
2021-05-06 15:57:04 +00:00
|
|
|
fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
|
|
|
|
self.cx
|
2016-12-31 23:00:24 +00:00
|
|
|
}
|
|
|
|
|
2018-08-23 13:23:48 +00:00
|
|
|
fn llbb(&self) -> &'ll BasicBlock {
|
2016-12-31 23:00:24 +00:00
|
|
|
unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
|
|
|
|
}
|
|
|
|
|
2020-10-17 11:28:58 +00:00
|
|
|
fn set_span(&mut self, _span: Span) {}
|
2020-10-06 13:39:12 +00:00
|
|
|
|
2021-05-06 15:57:04 +00:00
|
|
|
fn append_block(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &str) -> &'ll BasicBlock {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2021-05-06 15:57:04 +00:00
|
|
|
let name = SmallCStr::new(name);
|
|
|
|
llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr())
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-06 15:57:04 +00:00
|
|
|
fn append_sibling_block(&mut self, name: &str) -> &'ll BasicBlock {
|
|
|
|
Self::append_block(self.cx, self.llfn(), name)
|
|
|
|
}
|
|
|
|
|
2022-02-18 14:37:31 +00:00
|
|
|
fn switch_to_block(&mut self, llbb: Self::BasicBlock) {
|
|
|
|
*self = Self::build(self.cx, llbb)
|
|
|
|
}
|
|
|
|
|
2018-10-04 13:23:10 +00:00
|
|
|
fn ret_void(&mut self) {
|
2025-02-21 17:23:49 +00:00
|
|
|
llvm::LLVMBuildRetVoid(self.llbuilder);
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-04 13:23:10 +00:00
|
|
|
fn ret(&mut self, v: &'ll Value) {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildRet(self.llbuilder, v);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-04 13:23:10 +00:00
|
|
|
fn br(&mut self, dest: &'ll BasicBlock) {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildBr(self.llbuilder, dest);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn cond_br(
|
2018-10-04 13:23:10 +00:00
|
|
|
&mut self,
|
2018-08-23 13:23:48 +00:00
|
|
|
cond: &'ll Value,
|
|
|
|
then_llbb: &'ll BasicBlock,
|
|
|
|
else_llbb: &'ll BasicBlock,
|
2018-07-17 15:26:58 +00:00
|
|
|
) {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn switch(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-23 13:23:48 +00:00
|
|
|
v: &'ll Value,
|
|
|
|
else_llbb: &'ll BasicBlock,
|
2020-09-24 17:10:34 +00:00
|
|
|
cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)>,
|
2018-12-08 17:42:31 +00:00
|
|
|
) {
|
|
|
|
let switch =
|
2019-03-29 16:23:52 +00:00
|
|
|
unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
|
2018-12-08 17:42:31 +00:00
|
|
|
for (on_val, dest) in cases {
|
|
|
|
let on_val = self.const_uint_big(self.val_ty(v), on_val);
|
|
|
|
unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-12-04 11:30:42 +00:00
|
|
|
fn switch_with_weights(
|
|
|
|
&mut self,
|
|
|
|
v: Self::Value,
|
|
|
|
else_llbb: Self::BasicBlock,
|
|
|
|
else_is_cold: bool,
|
|
|
|
cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock, bool)>,
|
|
|
|
) {
|
|
|
|
if self.cx.sess().opts.optimize == rustc_session::config::OptLevel::No {
|
|
|
|
self.switch(v, else_llbb, cases.map(|(val, dest, _)| (val, dest)));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let id_str = "branch_weights";
|
|
|
|
let id = unsafe {
|
|
|
|
llvm::LLVMMDStringInContext2(self.cx.llcx, id_str.as_ptr().cast(), id_str.len())
|
|
|
|
};
|
|
|
|
|
|
|
|
// For switch instructions with 2 targets, the `llvm.expect` intrinsic is used.
|
|
|
|
// This function handles switch instructions with more than 2 targets and it needs to
|
|
|
|
// emit branch weights metadata instead of using the intrinsic.
|
|
|
|
// The values 1 and 2000 are the same as the values used by the `llvm.expect` intrinsic.
|
|
|
|
let cold_weight = unsafe { llvm::LLVMValueAsMetadata(self.cx.const_u32(1)) };
|
|
|
|
let hot_weight = unsafe { llvm::LLVMValueAsMetadata(self.cx.const_u32(2000)) };
|
|
|
|
let weight =
|
|
|
|
|is_cold: bool| -> &Metadata { if is_cold { cold_weight } else { hot_weight } };
|
|
|
|
|
|
|
|
let mut md: SmallVec<[&Metadata; 16]> = SmallVec::with_capacity(cases.len() + 2);
|
|
|
|
md.push(id);
|
|
|
|
md.push(weight(else_is_cold));
|
|
|
|
|
|
|
|
let switch =
|
|
|
|
unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
|
|
|
|
for (on_val, dest, is_cold) in cases {
|
|
|
|
let on_val = self.const_uint_big(self.val_ty(v), on_val);
|
|
|
|
unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
|
|
|
|
md.push(weight(is_cold));
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
let md_node = llvm::LLVMMDNodeInContext2(self.cx.llcx, md.as_ptr(), md.len() as size_t);
|
|
|
|
self.cx.set_metadata(switch, llvm::MD_prof, md_node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-13 10:51:42 +00:00
|
|
|
fn invoke(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2021-08-03 22:09:57 +00:00
|
|
|
llty: &'ll Type,
|
2022-12-13 06:42:44 +00:00
|
|
|
fn_attrs: Option<&CodegenFnAttrs>,
|
2022-10-01 17:01:31 +00:00
|
|
|
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
|
2018-11-13 10:51:42 +00:00
|
|
|
llfn: &'ll Value,
|
|
|
|
args: &[&'ll Value],
|
|
|
|
then: &'ll BasicBlock,
|
|
|
|
catch: &'ll BasicBlock,
|
|
|
|
funclet: Option<&Funclet<'ll>>,
|
2024-03-15 19:45:46 +00:00
|
|
|
instance: Option<Instance<'tcx>>,
|
2018-11-13 10:51:42 +00:00
|
|
|
) -> &'ll Value {
|
2019-07-07 21:14:41 +00:00
|
|
|
debug!("invoke {:?} with args ({:?})", llfn, args);
|
2014-06-28 19:55:17 +00:00
|
|
|
|
2021-08-03 22:09:57 +00:00
|
|
|
let args = self.check_call("invoke", llty, llfn, args);
|
2022-11-22 05:29:00 +00:00
|
|
|
let funclet_bundle = funclet.map(|funclet| funclet.bundle());
|
2023-06-28 04:07:43 +00:00
|
|
|
let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
|
2023-06-28 04:09:51 +00:00
|
|
|
if let Some(funclet_bundle) = funclet_bundle {
|
2023-06-28 04:07:43 +00:00
|
|
|
bundles.push(funclet_bundle);
|
|
|
|
}
|
2022-11-22 05:29:00 +00:00
|
|
|
|
2022-12-13 06:42:44 +00:00
|
|
|
// Emit CFI pointer type membership test
|
2024-03-15 19:45:46 +00:00
|
|
|
self.cfi_type_test(fn_attrs, fn_abi, instance, llfn);
|
2022-12-13 06:42:44 +00:00
|
|
|
|
|
|
|
// Emit KCFI operand bundle
|
2024-03-15 19:45:46 +00:00
|
|
|
let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
|
2024-10-28 06:25:40 +00:00
|
|
|
if let Some(kcfi_bundle) = kcfi_bundle.as_deref() {
|
2023-06-28 04:07:43 +00:00
|
|
|
bundles.push(kcfi_bundle);
|
|
|
|
}
|
2015-10-24 01:18:44 +00:00
|
|
|
|
2022-10-01 17:01:31 +00:00
|
|
|
let invoke = unsafe {
|
2024-10-28 06:25:40 +00:00
|
|
|
llvm::LLVMBuildInvokeWithOperandBundles(
|
2016-02-25 23:10:40 +00:00
|
|
|
self.llbuilder,
|
2021-08-03 22:09:57 +00:00
|
|
|
llty,
|
2016-02-25 23:10:40 +00:00
|
|
|
llfn,
|
|
|
|
args.as_ptr(),
|
|
|
|
args.len() as c_uint,
|
|
|
|
then,
|
|
|
|
catch,
|
2022-11-22 05:29:00 +00:00
|
|
|
bundles.as_ptr(),
|
|
|
|
bundles.len() as c_uint,
|
2019-05-29 01:46:55 +00:00
|
|
|
UNNAMED,
|
|
|
|
)
|
2022-10-01 17:01:31 +00:00
|
|
|
};
|
|
|
|
if let Some(fn_abi) = fn_abi {
|
|
|
|
fn_abi.apply_attrs_callsite(self, invoke);
|
2013-09-16 22:30:59 +00:00
|
|
|
}
|
2022-10-01 17:01:31 +00:00
|
|
|
invoke
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn unreachable(&mut self) {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildUnreachable(self.llbuilder);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-17 06:52:25 +00:00
|
|
|
math_builder_methods! {
|
2019-05-29 01:34:33 +00:00
|
|
|
add(a, b) => LLVMBuildAdd,
|
|
|
|
fadd(a, b) => LLVMBuildFAdd,
|
|
|
|
sub(a, b) => LLVMBuildSub,
|
|
|
|
fsub(a, b) => LLVMBuildFSub,
|
|
|
|
mul(a, b) => LLVMBuildMul,
|
|
|
|
fmul(a, b) => LLVMBuildFMul,
|
|
|
|
udiv(a, b) => LLVMBuildUDiv,
|
|
|
|
exactudiv(a, b) => LLVMBuildExactUDiv,
|
|
|
|
sdiv(a, b) => LLVMBuildSDiv,
|
|
|
|
exactsdiv(a, b) => LLVMBuildExactSDiv,
|
|
|
|
fdiv(a, b) => LLVMBuildFDiv,
|
|
|
|
urem(a, b) => LLVMBuildURem,
|
|
|
|
srem(a, b) => LLVMBuildSRem,
|
|
|
|
frem(a, b) => LLVMBuildFRem,
|
|
|
|
shl(a, b) => LLVMBuildShl,
|
|
|
|
lshr(a, b) => LLVMBuildLShr,
|
|
|
|
ashr(a, b) => LLVMBuildAShr,
|
|
|
|
and(a, b) => LLVMBuildAnd,
|
|
|
|
or(a, b) => LLVMBuildOr,
|
|
|
|
xor(a, b) => LLVMBuildXor,
|
|
|
|
neg(x) => LLVMBuildNeg,
|
|
|
|
fneg(x) => LLVMBuildFNeg,
|
|
|
|
not(x) => LLVMBuildNot,
|
2019-06-03 10:59:17 +00:00
|
|
|
unchecked_sadd(x, y) => LLVMBuildNSWAdd,
|
|
|
|
unchecked_uadd(x, y) => LLVMBuildNUWAdd,
|
|
|
|
unchecked_ssub(x, y) => LLVMBuildNSWSub,
|
|
|
|
unchecked_usub(x, y) => LLVMBuildNUWSub,
|
|
|
|
unchecked_smul(x, y) => LLVMBuildNSWMul,
|
|
|
|
unchecked_umul(x, y) => LLVMBuildNUWMul,
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2025-02-05 11:43:54 +00:00
|
|
|
fn unchecked_suadd(&mut self, a: &'ll Value, b: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe {
|
|
|
|
let add = llvm::LLVMBuildAdd(self.llbuilder, a, b, UNNAMED);
|
|
|
|
if llvm::LLVMIsAInstruction(add).is_some() {
|
|
|
|
llvm::LLVMSetNUW(add, True);
|
|
|
|
llvm::LLVMSetNSW(add, True);
|
|
|
|
}
|
|
|
|
add
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fn unchecked_susub(&mut self, a: &'ll Value, b: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe {
|
|
|
|
let sub = llvm::LLVMBuildSub(self.llbuilder, a, b, UNNAMED);
|
|
|
|
if llvm::LLVMIsAInstruction(sub).is_some() {
|
|
|
|
llvm::LLVMSetNUW(sub, True);
|
|
|
|
llvm::LLVMSetNSW(sub, True);
|
|
|
|
}
|
|
|
|
sub
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fn unchecked_sumul(&mut self, a: &'ll Value, b: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe {
|
|
|
|
let mul = llvm::LLVMBuildMul(self.llbuilder, a, b, UNNAMED);
|
|
|
|
if llvm::LLVMIsAInstruction(mul).is_some() {
|
|
|
|
llvm::LLVMSetNUW(mul, True);
|
|
|
|
llvm::LLVMSetNSW(mul, True);
|
|
|
|
}
|
|
|
|
mul
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-20 08:00:44 +00:00
|
|
|
fn or_disjoint(&mut self, a: &'ll Value, b: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe {
|
|
|
|
let or = llvm::LLVMBuildOr(self.llbuilder, a, b, UNNAMED);
|
2025-02-03 03:23:44 +00:00
|
|
|
|
|
|
|
// If a and b are both values, then `or` is a value, rather than
|
|
|
|
// an instruction, so we need to check before setting the flag.
|
|
|
|
// (See also `LLVMBuildNUWNeg` which also needs a check.)
|
|
|
|
if llvm::LLVMIsAInstruction(or).is_some() {
|
|
|
|
llvm::LLVMSetIsDisjoint(or, True);
|
|
|
|
}
|
2025-01-20 08:00:44 +00:00
|
|
|
or
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-17 06:52:25 +00:00
|
|
|
set_math_builder_methods! {
|
|
|
|
fadd_fast(x, y) => (LLVMBuildFAdd, LLVMRustSetFastMath),
|
|
|
|
fsub_fast(x, y) => (LLVMBuildFSub, LLVMRustSetFastMath),
|
|
|
|
fmul_fast(x, y) => (LLVMBuildFMul, LLVMRustSetFastMath),
|
|
|
|
fdiv_fast(x, y) => (LLVMBuildFDiv, LLVMRustSetFastMath),
|
|
|
|
frem_fast(x, y) => (LLVMBuildFRem, LLVMRustSetFastMath),
|
|
|
|
fadd_algebraic(x, y) => (LLVMBuildFAdd, LLVMRustSetAlgebraicMath),
|
|
|
|
fsub_algebraic(x, y) => (LLVMBuildFSub, LLVMRustSetAlgebraicMath),
|
|
|
|
fmul_algebraic(x, y) => (LLVMBuildFMul, LLVMRustSetAlgebraicMath),
|
|
|
|
fdiv_algebraic(x, y) => (LLVMBuildFDiv, LLVMRustSetAlgebraicMath),
|
|
|
|
frem_algebraic(x, y) => (LLVMBuildFRem, LLVMRustSetAlgebraicMath),
|
2024-02-06 19:32:00 +00:00
|
|
|
}
|
|
|
|
|
2018-11-24 15:44:17 +00:00
|
|
|
fn checked_binop(
|
|
|
|
&mut self,
|
|
|
|
oop: OverflowOp,
|
2019-02-25 07:40:18 +00:00
|
|
|
ty: Ty<'_>,
|
2018-11-24 15:44:17 +00:00
|
|
|
lhs: Self::Value,
|
|
|
|
rhs: Self::Value,
|
|
|
|
) -> (Self::Value, Self::Value) {
|
2020-12-12 14:32:30 +00:00
|
|
|
use rustc_middle::ty::IntTy::*;
|
|
|
|
use rustc_middle::ty::UintTy::*;
|
2020-03-29 14:41:09 +00:00
|
|
|
use rustc_middle::ty::{Int, Uint};
|
2018-11-24 15:44:17 +00:00
|
|
|
|
2020-08-02 22:49:11 +00:00
|
|
|
let new_kind = match ty.kind() {
|
2020-10-15 09:44:00 +00:00
|
|
|
Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
|
|
|
|
Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
|
2023-12-15 22:19:51 +00:00
|
|
|
t @ (Uint(_) | Int(_)) => *t,
|
2018-11-24 15:44:17 +00:00
|
|
|
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
|
|
|
|
};
|
|
|
|
|
|
|
|
let name = match oop {
|
2019-09-26 11:10:43 +00:00
|
|
|
OverflowOp::Add => match new_kind {
|
2018-11-24 15:44:17 +00:00
|
|
|
Int(I8) => "llvm.sadd.with.overflow.i8",
|
|
|
|
Int(I16) => "llvm.sadd.with.overflow.i16",
|
|
|
|
Int(I32) => "llvm.sadd.with.overflow.i32",
|
|
|
|
Int(I64) => "llvm.sadd.with.overflow.i64",
|
|
|
|
Int(I128) => "llvm.sadd.with.overflow.i128",
|
|
|
|
|
|
|
|
Uint(U8) => "llvm.uadd.with.overflow.i8",
|
|
|
|
Uint(U16) => "llvm.uadd.with.overflow.i16",
|
|
|
|
Uint(U32) => "llvm.uadd.with.overflow.i32",
|
|
|
|
Uint(U64) => "llvm.uadd.with.overflow.i64",
|
|
|
|
Uint(U128) => "llvm.uadd.with.overflow.i128",
|
|
|
|
|
|
|
|
_ => unreachable!(),
|
|
|
|
},
|
2019-09-26 11:10:43 +00:00
|
|
|
OverflowOp::Sub => match new_kind {
|
2018-11-24 15:44:17 +00:00
|
|
|
Int(I8) => "llvm.ssub.with.overflow.i8",
|
|
|
|
Int(I16) => "llvm.ssub.with.overflow.i16",
|
|
|
|
Int(I32) => "llvm.ssub.with.overflow.i32",
|
|
|
|
Int(I64) => "llvm.ssub.with.overflow.i64",
|
|
|
|
Int(I128) => "llvm.ssub.with.overflow.i128",
|
|
|
|
|
2022-10-20 10:28:31 +00:00
|
|
|
Uint(_) => {
|
|
|
|
// Emit sub and icmp instead of llvm.usub.with.overflow. LLVM considers these
|
|
|
|
// to be the canonical form. It will attempt to reform llvm.usub.with.overflow
|
|
|
|
// in the backend if profitable.
|
|
|
|
let sub = self.sub(lhs, rhs);
|
|
|
|
let cmp = self.icmp(IntPredicate::IntULT, lhs, rhs);
|
|
|
|
return (sub, cmp);
|
|
|
|
}
|
2018-11-24 15:44:17 +00:00
|
|
|
|
|
|
|
_ => unreachable!(),
|
|
|
|
},
|
2019-09-26 11:10:43 +00:00
|
|
|
OverflowOp::Mul => match new_kind {
|
2018-11-24 15:44:17 +00:00
|
|
|
Int(I8) => "llvm.smul.with.overflow.i8",
|
|
|
|
Int(I16) => "llvm.smul.with.overflow.i16",
|
|
|
|
Int(I32) => "llvm.smul.with.overflow.i32",
|
|
|
|
Int(I64) => "llvm.smul.with.overflow.i64",
|
|
|
|
Int(I128) => "llvm.smul.with.overflow.i128",
|
|
|
|
|
|
|
|
Uint(U8) => "llvm.umul.with.overflow.i8",
|
|
|
|
Uint(U16) => "llvm.umul.with.overflow.i16",
|
|
|
|
Uint(U32) => "llvm.umul.with.overflow.i32",
|
|
|
|
Uint(U64) => "llvm.umul.with.overflow.i64",
|
|
|
|
Uint(U128) => "llvm.umul.with.overflow.i128",
|
|
|
|
|
|
|
|
_ => unreachable!(),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2021-08-03 22:09:57 +00:00
|
|
|
let res = self.call_intrinsic(name, &[lhs, rhs]);
|
2018-11-24 15:44:17 +00:00
|
|
|
(self.extract_value(res, 0), self.extract_value(res, 1))
|
|
|
|
}
|
|
|
|
|
2020-08-29 16:10:01 +00:00
|
|
|
fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
|
|
|
|
if self.cx().val_ty(val) == self.cx().type_i1() {
|
|
|
|
self.zext(val, self.cx().type_i8())
|
|
|
|
} else {
|
|
|
|
val
|
|
|
|
}
|
|
|
|
}
|
2024-09-17 06:52:25 +00:00
|
|
|
|
2021-08-29 09:06:55 +00:00
|
|
|
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
|
2020-08-29 16:10:01 +00:00
|
|
|
if scalar.is_bool() {
|
2025-02-15 04:25:43 +00:00
|
|
|
return self.unchecked_utrunc(val, self.cx().type_i1());
|
2020-08-29 16:10:01 +00:00
|
|
|
}
|
|
|
|
val
|
|
|
|
}
|
|
|
|
|
2024-02-24 05:48:20 +00:00
|
|
|
fn alloca(&mut self, size: Size, align: Align) -> &'ll Value {
|
2018-10-04 13:23:10 +00:00
|
|
|
let mut bx = Builder::with_cx(self.cx);
|
2016-12-31 23:00:24 +00:00
|
|
|
bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
|
2024-02-24 05:48:20 +00:00
|
|
|
let ty = self.cx().type_array(self.cx().type_i8(), size.bytes());
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2022-10-01 17:34:21 +00:00
|
|
|
let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED);
|
2018-09-08 22:16:45 +00:00
|
|
|
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
|
2025-02-10 20:38:44 +00:00
|
|
|
// Cast to default addrspace if necessary
|
|
|
|
llvm::LLVMBuildPointerCast(bx.llbuilder, alloca, self.cx().type_ptr(), UNNAMED)
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-24 05:48:20 +00:00
|
|
|
fn dynamic_alloca(&mut self, size: &'ll Value, align: Align) -> &'ll Value {
|
2018-05-28 15:07:23 +00:00
|
|
|
unsafe {
|
2022-10-01 18:22:46 +00:00
|
|
|
let alloca =
|
2024-02-24 05:48:20 +00:00
|
|
|
llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), size, UNNAMED);
|
2018-09-08 22:16:45 +00:00
|
|
|
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
|
2025-02-10 20:38:44 +00:00
|
|
|
// Cast to default addrspace if necessary
|
|
|
|
llvm::LLVMBuildPointerCast(self.llbuilder, alloca, self.cx().type_ptr(), UNNAMED)
|
2018-05-28 15:07:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-04 16:53:04 +00:00
|
|
|
fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2021-07-04 16:53:04 +00:00
|
|
|
let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
|
2018-09-08 22:16:45 +00:00
|
|
|
llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
|
2017-02-06 16:27:09 +00:00
|
|
|
load
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-04 16:53:04 +00:00
|
|
|
fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value {
|
2013-12-28 14:01:53 +00:00
|
|
|
unsafe {
|
2021-07-04 16:53:04 +00:00
|
|
|
let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
|
2019-05-28 22:22:37 +00:00
|
|
|
llvm::LLVMSetVolatile(load, llvm::True);
|
|
|
|
load
|
2013-12-28 14:01:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-21 16:08:20 +00:00
|
|
|
fn atomic_load(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2021-07-04 15:49:51 +00:00
|
|
|
ty: &'ll Type,
|
2018-08-23 13:23:48 +00:00
|
|
|
ptr: &'ll Value,
|
2018-10-01 16:07:04 +00:00
|
|
|
order: rustc_codegen_ssa::common::AtomicOrdering,
|
2018-08-21 16:08:20 +00:00
|
|
|
size: Size,
|
2018-08-23 13:23:48 +00:00
|
|
|
) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2018-08-21 16:08:20 +00:00
|
|
|
let load = llvm::LLVMRustBuildAtomicLoad(
|
|
|
|
self.llbuilder,
|
2021-07-04 15:49:51 +00:00
|
|
|
ty,
|
2018-08-21 16:08:20 +00:00
|
|
|
ptr,
|
2019-05-29 01:46:55 +00:00
|
|
|
UNNAMED,
|
2018-08-21 16:08:20 +00:00
|
|
|
AtomicOrdering::from_generic(order),
|
|
|
|
);
|
2018-09-08 11:50:19 +00:00
|
|
|
// LLVM requires the alignment of atomic loads to be at least the size of the type.
|
|
|
|
llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
|
2017-06-01 18:50:53 +00:00
|
|
|
load
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-05 09:27:06 +00:00
|
|
|
#[instrument(level = "trace", skip(self))]
|
2018-09-14 15:48:57 +00:00
|
|
|
fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> {
|
2023-12-02 21:25:14 +00:00
|
|
|
if place.layout.is_unsized() {
|
2024-11-15 12:53:31 +00:00
|
|
|
let tail = self.tcx.struct_tail_for_codegen(place.layout.ty, self.typing_env());
|
2023-12-02 21:25:14 +00:00
|
|
|
if matches!(tail.kind(), ty::Foreign(..)) {
|
|
|
|
// Unsized locals and, at least conceptually, even unsized arguments must be copied
|
|
|
|
// around, which requires dynamically determining their size. Therefore, we cannot
|
|
|
|
// allow `extern` types here. Consult t-opsem before removing this check.
|
|
|
|
panic!("unsized locals must not be `extern` types");
|
|
|
|
}
|
|
|
|
}
|
2024-04-11 05:07:21 +00:00
|
|
|
assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized());
|
2018-09-14 15:48:57 +00:00
|
|
|
|
|
|
|
if place.layout.is_zst() {
|
2023-05-07 10:00:41 +00:00
|
|
|
return OperandRef::zero_sized(place.layout);
|
2018-09-14 15:48:57 +00:00
|
|
|
}
|
|
|
|
|
2022-06-29 09:56:30 +00:00
|
|
|
#[instrument(level = "trace", skip(bx))]
|
2018-10-05 13:08:49 +00:00
|
|
|
fn scalar_load_metadata<'a, 'll, 'tcx>(
|
|
|
|
bx: &mut Builder<'a, 'll, 'tcx>,
|
|
|
|
load: &'ll Value,
|
2021-08-29 09:06:55 +00:00
|
|
|
scalar: abi::Scalar,
|
2022-02-16 04:45:09 +00:00
|
|
|
layout: TyAndLayout<'tcx>,
|
|
|
|
offset: Size,
|
2018-10-05 13:08:49 +00:00
|
|
|
) {
|
2024-03-21 22:38:22 +00:00
|
|
|
if bx.cx.sess().opts.optimize == OptLevel::No {
|
|
|
|
// Don't emit metadata we're not going to use
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-12-30 14:55:05 +00:00
|
|
|
if !scalar.is_uninit_valid() {
|
2022-02-12 19:01:33 +00:00
|
|
|
bx.noundef_metadata(load);
|
|
|
|
}
|
|
|
|
|
2022-03-03 12:02:12 +00:00
|
|
|
match scalar.primitive() {
|
2024-10-09 00:29:00 +00:00
|
|
|
abi::Primitive::Int(..) => {
|
2021-09-07 18:51:09 +00:00
|
|
|
if !scalar.is_always_valid(bx) {
|
2022-03-03 12:02:12 +00:00
|
|
|
bx.range_metadata(load, scalar.valid_range(bx));
|
2018-09-14 15:48:57 +00:00
|
|
|
}
|
|
|
|
}
|
2024-10-09 00:29:00 +00:00
|
|
|
abi::Primitive::Pointer(_) => {
|
2022-03-03 12:02:12 +00:00
|
|
|
if !scalar.valid_range(bx).contains(0) {
|
2022-02-16 04:45:09 +00:00
|
|
|
bx.nonnull_metadata(load);
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(pointee) = layout.pointee_info_at(bx, offset) {
|
|
|
|
if let Some(_) = pointee.safe {
|
|
|
|
bx.align_metadata(load, pointee.align);
|
|
|
|
}
|
|
|
|
}
|
2018-09-14 15:48:57 +00:00
|
|
|
}
|
2024-10-09 00:29:00 +00:00
|
|
|
abi::Primitive::Float(_) => {}
|
2018-09-14 15:48:57 +00:00
|
|
|
}
|
2018-10-05 13:08:49 +00:00
|
|
|
}
|
2018-09-14 15:48:57 +00:00
|
|
|
|
2024-04-11 06:08:34 +00:00
|
|
|
let val = if let Some(_) = place.val.llextra {
|
|
|
|
// FIXME: Merge with the `else` below?
|
|
|
|
OperandValue::Ref(place.val)
|
2018-09-14 15:48:57 +00:00
|
|
|
} else if place.layout.is_llvm_immediate() {
|
|
|
|
let mut const_llval = None;
|
2022-02-21 10:11:48 +00:00
|
|
|
let llty = place.layout.llvm_type(self);
|
2018-09-14 15:48:57 +00:00
|
|
|
unsafe {
|
2024-04-11 05:07:21 +00:00
|
|
|
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) {
|
2018-09-14 15:48:57 +00:00
|
|
|
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
|
2022-02-21 10:11:48 +00:00
|
|
|
if let Some(init) = llvm::LLVMGetInitializer(global) {
|
|
|
|
if self.val_ty(init) == llty {
|
|
|
|
const_llval = Some(init);
|
|
|
|
}
|
|
|
|
}
|
2018-09-14 15:48:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let llval = const_llval.unwrap_or_else(|| {
|
2024-04-11 05:07:21 +00:00
|
|
|
let load = self.load(llty, place.val.llval, place.val.align);
|
2024-10-29 20:37:26 +00:00
|
|
|
if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr {
|
2022-02-16 04:45:09 +00:00
|
|
|
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
|
2025-02-16 00:07:18 +00:00
|
|
|
self.to_immediate_scalar(load, scalar)
|
|
|
|
} else {
|
|
|
|
load
|
2018-09-14 15:48:57 +00:00
|
|
|
}
|
|
|
|
});
|
2025-02-16 00:07:18 +00:00
|
|
|
OperandValue::Immediate(llval)
|
2024-10-29 20:37:26 +00:00
|
|
|
} else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr {
|
2022-03-03 12:02:12 +00:00
|
|
|
let b_offset = a.size(self).align_to(b.align(self).abi);
|
2018-11-28 22:37:38 +00:00
|
|
|
|
2022-02-16 04:45:09 +00:00
|
|
|
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
|
Separate immediate and in-memory ScalarPair representation
Currently, we assume that ScalarPair is always represented using
a two-element struct, both as an immediate value and when stored
in memory.
This currently works fairly well, but runs into problems with
https://github.com/rust-lang/rust/pull/116672, where a ScalarPair
involving an i128 type can no longer be represented as a two-element
struct in memory. For example, the tuple `(i32, i128)` needs to be
represented in-memory as `{ i32, [3 x i32], i128 }` to satisfy
alignment requirement. Using `{ i32, i128 }` instead will result in
the second element being stored at the wrong offset (prior to
LLVM 18).
Resolve this issue by no longer requiring that the immediate and
in-memory type for ScalarPair are the same. The in-memory type
will now look the same as for normal struct types (and will include
padding filler and similar), while the immediate type stays a
simple two-element struct type. This also means that booleans in
immediate ScalarPair are now represented as i1 rather than i8,
just like we do everywhere else.
The core change here is to llvm_type (which now treats ScalarPair
as a normal struct) and immediate_llvm_type (which returns the
two-element struct that llvm_type used to produce). The rest is
fixing things up to no longer assume these are the same. In
particular, this switches places that try to get pointers to the
ScalarPair elements to use byte-geps instead of struct-geps.
2023-12-15 11:14:39 +00:00
|
|
|
let llptr = if i == 0 {
|
2024-04-11 05:07:21 +00:00
|
|
|
place.val.llval
|
Separate immediate and in-memory ScalarPair representation
Currently, we assume that ScalarPair is always represented using
a two-element struct, both as an immediate value and when stored
in memory.
This currently works fairly well, but runs into problems with
https://github.com/rust-lang/rust/pull/116672, where a ScalarPair
involving an i128 type can no longer be represented as a two-element
struct in memory. For example, the tuple `(i32, i128)` needs to be
represented in-memory as `{ i32, [3 x i32], i128 }` to satisfy
alignment requirement. Using `{ i32, i128 }` instead will result in
the second element being stored at the wrong offset (prior to
LLVM 18).
Resolve this issue by no longer requiring that the immediate and
in-memory type for ScalarPair are the same. The in-memory type
will now look the same as for normal struct types (and will include
padding filler and similar), while the immediate type stays a
simple two-element struct type. This also means that booleans in
immediate ScalarPair are now represented as i1 rather than i8,
just like we do everywhere else.
The core change here is to llvm_type (which now treats ScalarPair
as a normal struct) and immediate_llvm_type (which returns the
two-element struct that llvm_type used to produce). The rest is
fixing things up to no longer assume these are the same. In
particular, this switches places that try to get pointers to the
ScalarPair elements to use byte-geps instead of struct-geps.
2023-12-15 11:14:39 +00:00
|
|
|
} else {
|
2024-04-11 05:07:21 +00:00
|
|
|
self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes()))
|
Separate immediate and in-memory ScalarPair representation
Currently, we assume that ScalarPair is always represented using
a two-element struct, both as an immediate value and when stored
in memory.
This currently works fairly well, but runs into problems with
https://github.com/rust-lang/rust/pull/116672, where a ScalarPair
involving an i128 type can no longer be represented as a two-element
struct in memory. For example, the tuple `(i32, i128)` needs to be
represented in-memory as `{ i32, [3 x i32], i128 }` to satisfy
alignment requirement. Using `{ i32, i128 }` instead will result in
the second element being stored at the wrong offset (prior to
LLVM 18).
Resolve this issue by no longer requiring that the immediate and
in-memory type for ScalarPair are the same. The in-memory type
will now look the same as for normal struct types (and will include
padding filler and similar), while the immediate type stays a
simple two-element struct type. This also means that booleans in
immediate ScalarPair are now represented as i1 rather than i8,
just like we do everywhere else.
The core change here is to llvm_type (which now treats ScalarPair
as a normal struct) and immediate_llvm_type (which returns the
two-element struct that llvm_type used to produce). The rest is
fixing things up to no longer assume these are the same. In
particular, this switches places that try to get pointers to the
ScalarPair elements to use byte-geps instead of struct-geps.
2023-12-15 11:14:39 +00:00
|
|
|
};
|
2021-07-04 16:53:04 +00:00
|
|
|
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
|
|
|
|
let load = self.load(llty, llptr, align);
|
2022-02-16 04:45:09 +00:00
|
|
|
scalar_load_metadata(self, load, scalar, layout, offset);
|
2020-08-29 16:10:01 +00:00
|
|
|
self.to_immediate_scalar(load, scalar)
|
2018-09-14 15:48:57 +00:00
|
|
|
};
|
2018-11-28 22:37:38 +00:00
|
|
|
|
|
|
|
OperandValue::Pair(
|
2024-04-11 05:07:21 +00:00
|
|
|
load(0, a, place.layout, place.val.align, Size::ZERO),
|
|
|
|
load(1, b, place.layout, place.val.align.restrict_for_offset(b_offset), b_offset),
|
2018-11-28 22:37:38 +00:00
|
|
|
)
|
2018-09-14 15:48:57 +00:00
|
|
|
} else {
|
2024-04-11 06:08:34 +00:00
|
|
|
OperandValue::Ref(place.val)
|
2018-09-14 15:48:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
OperandRef { val, layout: place.layout }
|
|
|
|
}
|
|
|
|
|
2018-12-08 10:48:43 +00:00
|
|
|
fn write_operand_repeatedly(
|
2022-11-09 00:04:10 +00:00
|
|
|
&mut self,
|
2018-12-08 10:48:43 +00:00
|
|
|
cg_elem: OperandRef<'tcx, &'ll Value>,
|
|
|
|
count: u64,
|
|
|
|
dest: PlaceRef<'tcx, &'ll Value>,
|
2022-11-09 00:04:10 +00:00
|
|
|
) {
|
2018-12-08 10:48:43 +00:00
|
|
|
let zero = self.const_usize(0);
|
|
|
|
let count = self.const_usize(count);
|
|
|
|
|
2022-02-18 14:10:56 +00:00
|
|
|
let header_bb = self.append_sibling_block("repeat_loop_header");
|
|
|
|
let body_bb = self.append_sibling_block("repeat_loop_body");
|
|
|
|
let next_bb = self.append_sibling_block("repeat_loop_next");
|
|
|
|
|
|
|
|
self.br(header_bb);
|
2018-12-08 10:48:43 +00:00
|
|
|
|
2022-02-18 14:10:56 +00:00
|
|
|
let mut header_bx = Self::build(self.cx, header_bb);
|
2023-06-11 04:04:53 +00:00
|
|
|
let i = header_bx.phi(self.val_ty(zero), &[zero], &[self.llbb()]);
|
2018-12-08 10:48:43 +00:00
|
|
|
|
2023-06-11 04:04:53 +00:00
|
|
|
let keep_going = header_bx.icmp(IntPredicate::IntULT, i, count);
|
2022-02-18 14:10:56 +00:00
|
|
|
header_bx.cond_br(keep_going, body_bb, next_bb);
|
2018-09-14 15:48:57 +00:00
|
|
|
|
2022-02-18 14:10:56 +00:00
|
|
|
let mut body_bx = Self::build(self.cx, body_bb);
|
2023-06-11 04:04:53 +00:00
|
|
|
let dest_elem = dest.project_index(&mut body_bx, i);
|
|
|
|
cg_elem.val.store(&mut body_bx, dest_elem);
|
|
|
|
|
|
|
|
let next = body_bx.unchecked_uadd(i, self.const_usize(1));
|
2022-02-18 14:10:56 +00:00
|
|
|
body_bx.br(header_bb);
|
2023-06-11 04:04:53 +00:00
|
|
|
header_bx.add_incoming_to_phi(i, next, body_bb);
|
2018-12-08 10:48:43 +00:00
|
|
|
|
2022-11-09 00:04:10 +00:00
|
|
|
*self = Self::build(self.cx, next_bb);
|
2018-12-08 10:48:43 +00:00
|
|
|
}
|
2013-07-21 13:33:40 +00:00
|
|
|
|
2021-08-29 09:06:55 +00:00
|
|
|
fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
|
2024-03-21 22:38:22 +00:00
|
|
|
if self.cx.sess().opts.optimize == OptLevel::No {
|
|
|
|
// Don't emit metadata we're not going to use
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2018-08-29 16:42:25 +00:00
|
|
|
let llty = self.cx.val_ty(load);
|
2024-09-19 10:45:23 +00:00
|
|
|
let md = [
|
2024-09-19 01:39:28 +00:00
|
|
|
llvm::LLVMValueAsMetadata(self.cx.const_uint_big(llty, range.start)),
|
|
|
|
llvm::LLVMValueAsMetadata(self.cx.const_uint_big(llty, range.end.wrapping_add(1))),
|
2017-09-23 12:04:37 +00:00
|
|
|
];
|
2024-09-19 10:45:23 +00:00
|
|
|
let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, md.as_ptr(), md.len());
|
2024-09-19 10:56:02 +00:00
|
|
|
self.set_metadata(load, llvm::MD_range, md);
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn nonnull_metadata(&mut self, load: &'ll Value) {
|
2015-02-02 18:03:23 +00:00
|
|
|
unsafe {
|
2024-09-19 01:39:28 +00:00
|
|
|
let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, ptr::null(), 0);
|
2024-09-19 10:56:02 +00:00
|
|
|
self.set_metadata(load, llvm::MD_nonnull, md);
|
2015-02-02 18:03:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-08 22:16:45 +00:00
|
|
|
fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
|
2018-05-11 10:26:32 +00:00
|
|
|
self.store_with_flags(val, ptr, align, MemFlags::empty())
|
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn store_with_flags(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-23 13:23:48 +00:00
|
|
|
val: &'ll Value,
|
|
|
|
ptr: &'ll Value,
|
2018-09-08 22:16:45 +00:00
|
|
|
align: Align,
|
2018-05-11 10:26:32 +00:00
|
|
|
flags: MemFlags,
|
2018-08-23 13:23:48 +00:00
|
|
|
) -> &'ll Value {
|
2018-07-10 10:28:39 +00:00
|
|
|
debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
|
2023-07-29 20:31:53 +00:00
|
|
|
assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2016-12-29 01:20:26 +00:00
|
|
|
let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
|
2018-07-14 22:28:39 +00:00
|
|
|
let align =
|
|
|
|
if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
|
|
|
|
llvm::LLVMSetAlignment(store, align);
|
2018-05-11 10:26:32 +00:00
|
|
|
if flags.contains(MemFlags::VOLATILE) {
|
|
|
|
llvm::LLVMSetVolatile(store, llvm::True);
|
|
|
|
}
|
|
|
|
if flags.contains(MemFlags::NONTEMPORAL) {
|
2024-07-24 18:55:43 +00:00
|
|
|
// Make sure that the current target architectures supports "sane" non-temporal
|
|
|
|
// stores, i.e., non-temporal stores that are equivalent to regular stores except
|
|
|
|
// for performance. LLVM doesn't seem to care about this, and will happily treat
|
|
|
|
// `!nontemporal` stores as-if they were normal stores (for reordering optimizations
|
|
|
|
// etc) even on x86, despite later lowering them to MOVNT which do *not* behave like
|
2024-09-18 03:34:49 +00:00
|
|
|
// regular stores but require special fences. So we keep a list of architectures
|
|
|
|
// where `!nontemporal` is known to be truly just a hint, and use regular stores
|
|
|
|
// everywhere else. (In the future, we could alternatively ensure that an sfence
|
|
|
|
// gets emitted after a sequence of movnt before any kind of synchronizing
|
|
|
|
// operation. But it's not clear how to do that with LLVM.)
|
2024-08-12 09:10:26 +00:00
|
|
|
// For more context, see <https://github.com/rust-lang/rust/issues/114582> and
|
|
|
|
// <https://github.com/llvm/llvm-project/issues/64521>.
|
2024-07-25 16:54:00 +00:00
|
|
|
const WELL_BEHAVED_NONTEMPORAL_ARCHS: &[&str] =
|
|
|
|
&["aarch64", "arm", "riscv32", "riscv64"];
|
2024-07-24 18:55:43 +00:00
|
|
|
|
|
|
|
let use_nontemporal =
|
|
|
|
WELL_BEHAVED_NONTEMPORAL_ARCHS.contains(&&*self.cx.tcx.sess.target.arch);
|
|
|
|
if use_nontemporal {
|
|
|
|
// According to LLVM [1] building a nontemporal store must
|
|
|
|
// *always* point to a metadata value of the integer 1.
|
|
|
|
//
|
|
|
|
// [1]: https://llvm.org/docs/LangRef.html#store-instruction
|
2024-09-19 01:39:28 +00:00
|
|
|
let one = llvm::LLVMValueAsMetadata(self.cx.const_i32(1));
|
2024-09-19 10:45:23 +00:00
|
|
|
let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, &one, 1);
|
2024-09-19 10:56:02 +00:00
|
|
|
self.set_metadata(store, llvm::MD_nontemporal, md);
|
2024-07-24 18:55:43 +00:00
|
|
|
}
|
2018-05-11 10:26:32 +00:00
|
|
|
}
|
2016-12-29 01:20:26 +00:00
|
|
|
store
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn atomic_store(
|
|
|
|
&mut self,
|
|
|
|
val: &'ll Value,
|
|
|
|
ptr: &'ll Value,
|
2018-10-01 16:07:04 +00:00
|
|
|
order: rustc_codegen_ssa::common::AtomicOrdering,
|
|
|
|
size: Size,
|
|
|
|
) {
|
2018-07-10 10:28:39 +00:00
|
|
|
debug!("Store {:?} -> {:?}", val, ptr);
|
2023-07-29 20:31:53 +00:00
|
|
|
assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2018-08-21 16:08:20 +00:00
|
|
|
let store = llvm::LLVMRustBuildAtomicStore(
|
|
|
|
self.llbuilder,
|
|
|
|
val,
|
|
|
|
ptr,
|
|
|
|
AtomicOrdering::from_generic(order),
|
|
|
|
);
|
2018-09-08 11:50:19 +00:00
|
|
|
// LLVM requires the alignment of atomic stores to be at least the size of the type.
|
|
|
|
llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-31 00:00:00 +00:00
|
|
|
fn gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2025-02-19 09:55:36 +00:00
|
|
|
llvm::LLVMBuildGEPWithNoWrapFlags(
|
2013-12-15 12:35:12 +00:00
|
|
|
self.llbuilder,
|
2021-07-31 00:00:00 +00:00
|
|
|
ty,
|
2013-12-15 12:35:12 +00:00
|
|
|
ptr,
|
|
|
|
indices.as_ptr(),
|
2019-05-29 01:46:55 +00:00
|
|
|
indices.len() as c_uint,
|
|
|
|
UNNAMED,
|
2025-02-19 09:55:36 +00:00
|
|
|
GEPNoWrapFlags::default(),
|
2019-05-29 01:46:55 +00:00
|
|
|
)
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-01 00:00:00 +00:00
|
|
|
fn inbounds_gep(
|
|
|
|
&mut self,
|
|
|
|
ty: &'ll Type,
|
|
|
|
ptr: &'ll Value,
|
|
|
|
indices: &[&'ll Value],
|
|
|
|
) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2025-02-19 09:55:36 +00:00
|
|
|
llvm::LLVMBuildGEPWithNoWrapFlags(
|
2019-05-29 01:46:55 +00:00
|
|
|
self.llbuilder,
|
2021-08-01 00:00:00 +00:00
|
|
|
ty,
|
2019-05-29 01:46:55 +00:00
|
|
|
ptr,
|
|
|
|
indices.as_ptr(),
|
|
|
|
indices.len() as c_uint,
|
|
|
|
UNNAMED,
|
2025-02-19 09:55:36 +00:00
|
|
|
GEPNoWrapFlags::InBounds,
|
2019-05-29 01:46:55 +00:00
|
|
|
)
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-02-19 10:06:11 +00:00
|
|
|
fn inbounds_nuw_gep(
|
|
|
|
&mut self,
|
|
|
|
ty: &'ll Type,
|
|
|
|
ptr: &'ll Value,
|
|
|
|
indices: &[&'ll Value],
|
|
|
|
) -> &'ll Value {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildGEPWithNoWrapFlags(
|
2019-05-29 01:46:55 +00:00
|
|
|
self.llbuilder,
|
2021-08-01 00:00:00 +00:00
|
|
|
ty,
|
2019-05-29 01:46:55 +00:00
|
|
|
ptr,
|
|
|
|
indices.as_ptr(),
|
|
|
|
indices.len() as c_uint,
|
|
|
|
UNNAMED,
|
2025-02-19 10:06:11 +00:00
|
|
|
GEPNoWrapFlags::InBounds | GEPNoWrapFlags::NUW,
|
2019-05-29 01:46:55 +00:00
|
|
|
)
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Casts */
|
2018-10-05 13:08:49 +00:00
|
|
|
fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2025-02-15 04:25:43 +00:00
|
|
|
fn unchecked_utrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2025-02-16 00:07:18 +00:00
|
|
|
debug_assert_ne!(self.val_ty(val), dest_ty);
|
|
|
|
|
2025-02-15 04:25:43 +00:00
|
|
|
let trunc = self.trunc(val, dest_ty);
|
|
|
|
if llvm_util::get_version() >= (19, 0, 0) {
|
|
|
|
unsafe {
|
2025-02-15 18:24:24 +00:00
|
|
|
if llvm::LLVMIsAInstruction(trunc).is_some() {
|
2025-02-15 04:25:43 +00:00
|
|
|
llvm::LLVMSetNUW(trunc, True);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
trunc
|
|
|
|
}
|
|
|
|
|
|
|
|
fn unchecked_strunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2025-02-16 00:07:18 +00:00
|
|
|
debug_assert_ne!(self.val_ty(val), dest_ty);
|
|
|
|
|
2025-02-15 04:25:43 +00:00
|
|
|
let trunc = self.trunc(val, dest_ty);
|
|
|
|
if llvm_util::get_version() >= (19, 0, 0) {
|
|
|
|
unsafe {
|
2025-02-15 18:24:24 +00:00
|
|
|
if llvm::LLVMIsAInstruction(trunc).is_some() {
|
2025-02-15 04:25:43 +00:00
|
|
|
llvm::LLVMSetNSW(trunc, True);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
trunc
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2022-08-16 22:46:17 +00:00
|
|
|
fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2021-12-30 01:18:44 +00:00
|
|
|
self.fptoint_sat(false, val, dest_ty)
|
2020-06-25 15:05:12 +00:00
|
|
|
}
|
|
|
|
|
2022-08-16 22:46:17 +00:00
|
|
|
fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2021-12-30 01:18:44 +00:00
|
|
|
self.fptoint_sat(true, val, dest_ty)
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2021-04-19 17:55:32 +00:00
|
|
|
// On WebAssembly the `fptoui` and `fptosi` instructions currently have
|
|
|
|
// poor codegen. The reason for this is that the corresponding wasm
|
|
|
|
// instructions, `i32.trunc_f32_s` for example, will trap when the float
|
|
|
|
// is out-of-bounds, infinity, or nan. This means that LLVM
|
|
|
|
// automatically inserts control flow around `fptoui` and `fptosi`
|
|
|
|
// because the LLVM instruction `fptoui` is defined as producing a
|
|
|
|
// poison value, not having UB on out-of-bounds values.
|
2020-08-09 01:09:40 +00:00
|
|
|
//
|
2021-04-19 17:55:32 +00:00
|
|
|
// This method, however, is only used with non-saturating casts that
|
|
|
|
// have UB on out-of-bounds values. This means that it's ok if we use
|
|
|
|
// the raw wasm instruction since out-of-bounds values can do whatever
|
|
|
|
// we like. To ensure that LLVM picks the right instruction we choose
|
|
|
|
// the raw wasm intrinsic functions which avoid LLVM inserting all the
|
|
|
|
// other control flow automatically.
|
2021-11-01 21:32:55 +00:00
|
|
|
if self.sess().target.is_like_wasm {
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
let src_ty = self.cx.val_ty(val);
|
2020-08-09 01:09:40 +00:00
|
|
|
if self.cx.type_kind(src_ty) != TypeKind::Vector {
|
|
|
|
let float_width = self.cx.float_width(src_ty);
|
|
|
|
let int_width = self.cx.int_width(dest_ty);
|
|
|
|
let name = match (int_width, float_width) {
|
|
|
|
(32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
|
|
|
|
(32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
|
|
|
|
(64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
|
|
|
|
(64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
|
|
|
|
_ => None,
|
|
|
|
};
|
|
|
|
if let Some(name) = name {
|
2021-08-03 22:09:57 +00:00
|
|
|
return self.call_intrinsic(name, &[val]);
|
2020-08-09 01:09:40 +00:00
|
|
|
}
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2021-04-19 17:55:32 +00:00
|
|
|
// see `fptoui` above for why wasm is different here
|
2021-11-01 21:32:55 +00:00
|
|
|
if self.sess().target.is_like_wasm {
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
let src_ty = self.cx.val_ty(val);
|
2020-08-09 01:09:40 +00:00
|
|
|
if self.cx.type_kind(src_ty) != TypeKind::Vector {
|
|
|
|
let float_width = self.cx.float_width(src_ty);
|
|
|
|
let int_width = self.cx.int_width(dest_ty);
|
|
|
|
let name = match (int_width, float_width) {
|
|
|
|
(32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"),
|
|
|
|
(32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"),
|
|
|
|
(64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"),
|
|
|
|
(64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"),
|
|
|
|
_ => None,
|
|
|
|
};
|
|
|
|
if let Some(name) = name {
|
2021-08-03 22:09:57 +00:00
|
|
|
return self.call_intrinsic(name, &[val]);
|
2020-08-09 01:09:40 +00:00
|
|
|
}
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
|
2023-04-08 09:15:26 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildIntCast2(
|
|
|
|
self.llbuilder,
|
|
|
|
val,
|
|
|
|
dest_ty,
|
|
|
|
if is_signed { True } else { False },
|
|
|
|
UNNAMED,
|
|
|
|
)
|
|
|
|
}
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Comparisons */
|
2018-10-05 13:08:49 +00:00
|
|
|
fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
2018-08-21 14:31:36 +00:00
|
|
|
let op = llvm::IntPredicate::from_generic(op);
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
2021-10-12 00:00:00 +00:00
|
|
|
let op = llvm::RealPredicate::from_generic(op);
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Miscellaneous instructions */
|
2018-09-08 22:16:45 +00:00
|
|
|
fn memcpy(
|
|
|
|
&mut self,
|
|
|
|
dst: &'ll Value,
|
|
|
|
dst_align: Align,
|
|
|
|
src: &'ll Value,
|
|
|
|
src_align: Align,
|
2018-09-10 15:59:20 +00:00
|
|
|
size: &'ll Value,
|
|
|
|
flags: MemFlags,
|
|
|
|
) {
|
2021-07-04 16:53:04 +00:00
|
|
|
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
|
2018-11-27 18:00:25 +00:00
|
|
|
let size = self.intcast(size, self.type_isize(), false);
|
2018-09-10 15:59:20 +00:00
|
|
|
let is_volatile = flags.contains(MemFlags::VOLATILE);
|
2018-11-02 22:38:16 +00:00
|
|
|
unsafe {
|
2018-09-08 22:16:45 +00:00
|
|
|
llvm::LLVMRustBuildMemCpy(
|
|
|
|
self.llbuilder,
|
|
|
|
dst,
|
|
|
|
dst_align.bytes() as c_uint,
|
|
|
|
src,
|
|
|
|
src_align.bytes() as c_uint,
|
|
|
|
size,
|
|
|
|
is_volatile,
|
|
|
|
);
|
2018-11-02 22:38:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-08 22:16:45 +00:00
|
|
|
fn memmove(
|
|
|
|
&mut self,
|
|
|
|
dst: &'ll Value,
|
|
|
|
dst_align: Align,
|
|
|
|
src: &'ll Value,
|
|
|
|
src_align: Align,
|
2018-09-10 15:59:20 +00:00
|
|
|
size: &'ll Value,
|
|
|
|
flags: MemFlags,
|
|
|
|
) {
|
2021-07-04 16:53:04 +00:00
|
|
|
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
|
2018-11-27 18:00:25 +00:00
|
|
|
let size = self.intcast(size, self.type_isize(), false);
|
2018-09-10 15:59:20 +00:00
|
|
|
let is_volatile = flags.contains(MemFlags::VOLATILE);
|
2018-11-02 22:38:16 +00:00
|
|
|
unsafe {
|
2018-09-08 22:16:45 +00:00
|
|
|
llvm::LLVMRustBuildMemMove(
|
|
|
|
self.llbuilder,
|
|
|
|
dst,
|
|
|
|
dst_align.bytes() as c_uint,
|
|
|
|
src,
|
|
|
|
src_align.bytes() as c_uint,
|
|
|
|
size,
|
|
|
|
is_volatile,
|
|
|
|
);
|
2018-11-02 22:38:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-10 15:59:20 +00:00
|
|
|
fn memset(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-09-10 15:59:20 +00:00
|
|
|
ptr: &'ll Value,
|
|
|
|
fill_byte: &'ll Value,
|
|
|
|
size: &'ll Value,
|
2018-09-08 22:16:45 +00:00
|
|
|
align: Align,
|
2018-09-10 15:59:20 +00:00
|
|
|
flags: MemFlags,
|
|
|
|
) {
|
2024-05-09 16:27:27 +00:00
|
|
|
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memset not supported");
|
2020-01-05 21:32:15 +00:00
|
|
|
let is_volatile = flags.contains(MemFlags::VOLATILE);
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMRustBuildMemSet(
|
|
|
|
self.llbuilder,
|
|
|
|
ptr,
|
|
|
|
align.bytes() as c_uint,
|
|
|
|
fill_byte,
|
|
|
|
size,
|
|
|
|
is_volatile,
|
|
|
|
);
|
|
|
|
}
|
2018-09-10 15:59:20 +00:00
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn select(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
|
|
|
cond: &'ll Value,
|
2018-08-23 13:23:48 +00:00
|
|
|
then_val: &'ll Value,
|
|
|
|
else_val: &'ll Value,
|
|
|
|
) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-12-04 19:20:45 +00:00
|
|
|
fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
|
2018-12-04 19:20:45 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2018-08-29 16:42:25 +00:00
|
|
|
let elt_ty = self.cx.val_ty(elt);
|
2018-11-27 18:00:25 +00:00
|
|
|
let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
|
2018-09-06 18:57:42 +00:00
|
|
|
let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
|
2018-11-27 18:00:25 +00:00
|
|
|
let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
|
|
|
|
self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
|
2017-06-25 09:41:24 +00:00
|
|
|
assert_eq!(idx as c_uint as u64, idx);
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value {
|
2017-06-25 09:41:24 +00:00
|
|
|
assert_eq!(idx as c_uint as u64, idx);
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
|
|
|
|
}
|
|
|
|
|
2022-01-24 12:45:34 +00:00
|
|
|
fn set_personality_fn(&mut self, personality: &'ll Value) {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMSetPersonalityFn(self.llfn(), personality);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-03 18:27:18 +00:00
|
|
|
fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
|
2022-12-06 05:07:28 +00:00
|
|
|
let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
|
2023-04-07 19:11:19 +00:00
|
|
|
let landing_pad = self.landing_pad(ty, pers_fn, 0);
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2014-07-08 00:58:01 +00:00
|
|
|
llvm::LLVMSetCleanup(landing_pad, llvm::True);
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
2022-12-03 18:27:18 +00:00
|
|
|
(self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2022-11-06 21:24:20 +00:00
|
|
|
fn filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
|
2022-12-06 05:07:28 +00:00
|
|
|
let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
|
2023-04-07 19:11:19 +00:00
|
|
|
let landing_pad = self.landing_pad(ty, pers_fn, 1);
|
2022-12-06 05:07:28 +00:00
|
|
|
self.add_clause(landing_pad, self.const_array(self.type_ptr(), &[]));
|
2022-11-06 21:24:20 +00:00
|
|
|
(self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
|
|
|
|
}
|
|
|
|
|
2022-12-03 18:27:18 +00:00
|
|
|
fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) {
|
2022-12-06 05:07:28 +00:00
|
|
|
let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
|
2023-03-16 13:56:02 +00:00
|
|
|
let mut exn = self.const_poison(ty);
|
2022-12-03 18:27:18 +00:00
|
|
|
exn = self.insert_value(exn, exn0, 0);
|
|
|
|
exn = self.insert_value(exn, exn1, 1);
|
2022-01-24 12:48:09 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildResume(self.llbuilder, exn);
|
|
|
|
}
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-11-13 10:51:42 +00:00
|
|
|
fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
|
2015-10-24 01:18:44 +00:00
|
|
|
let ret = unsafe {
|
2023-04-03 12:12:21 +00:00
|
|
|
llvm::LLVMBuildCleanupPad(
|
2015-10-24 01:18:44 +00:00
|
|
|
self.llbuilder,
|
|
|
|
parent,
|
|
|
|
args.as_ptr(),
|
2023-04-03 12:12:21 +00:00
|
|
|
args.len() as c_uint,
|
2023-12-03 11:02:31 +00:00
|
|
|
c"cleanuppad".as_ptr(),
|
2015-10-24 01:18:44 +00:00
|
|
|
)
|
|
|
|
};
|
2018-11-13 10:51:42 +00:00
|
|
|
Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
|
2015-10-24 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2022-01-24 12:48:09 +00:00
|
|
|
fn cleanup_ret(&mut self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>) {
|
|
|
|
unsafe {
|
2023-04-03 12:12:21 +00:00
|
|
|
llvm::LLVMBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind)
|
2022-01-24 12:48:09 +00:00
|
|
|
.expect("LLVM does not have support for cleanupret");
|
|
|
|
}
|
2015-10-24 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2018-11-13 10:51:42 +00:00
|
|
|
fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
|
2015-10-24 01:18:44 +00:00
|
|
|
let ret = unsafe {
|
2023-04-03 12:12:21 +00:00
|
|
|
llvm::LLVMBuildCatchPad(
|
2015-10-24 01:18:44 +00:00
|
|
|
self.llbuilder,
|
|
|
|
parent,
|
|
|
|
args.as_ptr(),
|
2023-04-03 12:12:21 +00:00
|
|
|
args.len() as c_uint,
|
2023-12-03 11:02:31 +00:00
|
|
|
c"catchpad".as_ptr(),
|
2015-10-24 01:18:44 +00:00
|
|
|
)
|
|
|
|
};
|
2018-11-13 10:51:42 +00:00
|
|
|
Funclet::new(ret.expect("LLVM does not have support for catchpad"))
|
2015-10-24 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn catch_switch(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-23 13:23:48 +00:00
|
|
|
parent: Option<&'ll Value>,
|
|
|
|
unwind: Option<&'ll BasicBlock>,
|
2022-01-24 12:50:12 +00:00
|
|
|
handlers: &[&'ll BasicBlock],
|
2018-08-23 13:23:48 +00:00
|
|
|
) -> &'ll Value {
|
2015-10-24 01:18:44 +00:00
|
|
|
let ret = unsafe {
|
2023-04-03 12:12:21 +00:00
|
|
|
llvm::LLVMBuildCatchSwitch(
|
2015-10-24 01:18:44 +00:00
|
|
|
self.llbuilder,
|
|
|
|
parent,
|
|
|
|
unwind,
|
2022-01-24 12:50:12 +00:00
|
|
|
handlers.len() as c_uint,
|
2023-12-03 11:02:31 +00:00
|
|
|
c"catchswitch".as_ptr(),
|
2015-10-24 01:18:44 +00:00
|
|
|
)
|
|
|
|
};
|
2022-01-24 12:50:12 +00:00
|
|
|
let ret = ret.expect("LLVM does not have support for catchswitch");
|
|
|
|
for handler in handlers {
|
|
|
|
unsafe {
|
2023-04-03 13:30:34 +00:00
|
|
|
llvm::LLVMAddHandler(ret, handler);
|
2022-01-24 12:50:12 +00:00
|
|
|
}
|
2015-10-24 01:18:44 +00:00
|
|
|
}
|
2022-01-24 12:50:12 +00:00
|
|
|
ret
|
2015-10-24 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2013-07-21 13:33:40 +00:00
|
|
|
// Atomic Operations
|
2018-08-07 15:14:40 +00:00
|
|
|
fn atomic_cmpxchg(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-23 13:23:48 +00:00
|
|
|
dst: &'ll Value,
|
|
|
|
cmp: &'ll Value,
|
|
|
|
src: &'ll Value,
|
2022-08-12 17:22:38 +00:00
|
|
|
order: rustc_codegen_ssa::common::AtomicOrdering,
|
2018-10-01 16:07:04 +00:00
|
|
|
failure_order: rustc_codegen_ssa::common::AtomicOrdering,
|
2018-08-20 16:16:51 +00:00
|
|
|
weak: bool,
|
2023-11-30 13:54:27 +00:00
|
|
|
) -> (&'ll Value, &'ll Value) {
|
2018-08-20 16:16:51 +00:00
|
|
|
let weak = if weak { llvm::True } else { llvm::False };
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2022-09-18 22:49:49 +00:00
|
|
|
let value = llvm::LLVMBuildAtomicCmpXchg(
|
2018-08-21 16:08:20 +00:00
|
|
|
self.llbuilder,
|
|
|
|
dst,
|
|
|
|
cmp,
|
|
|
|
src,
|
|
|
|
AtomicOrdering::from_generic(order),
|
|
|
|
AtomicOrdering::from_generic(failure_order),
|
2022-09-18 22:49:49 +00:00
|
|
|
llvm::False, // SingleThreaded
|
|
|
|
);
|
|
|
|
llvm::LLVMSetWeak(value, weak);
|
2023-11-30 13:54:27 +00:00
|
|
|
let val = self.extract_value(value, 0);
|
|
|
|
let success = self.extract_value(value, 1);
|
|
|
|
(val, success)
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
2024-09-17 06:52:25 +00:00
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn atomic_rmw(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-10-01 16:07:04 +00:00
|
|
|
op: rustc_codegen_ssa::common::AtomicRmwBinOp,
|
2018-08-23 13:23:48 +00:00
|
|
|
dst: &'ll Value,
|
2024-03-09 02:50:23 +00:00
|
|
|
mut src: &'ll Value,
|
2018-10-01 16:07:04 +00:00
|
|
|
order: rustc_codegen_ssa::common::AtomicOrdering,
|
2018-08-23 13:23:48 +00:00
|
|
|
) -> &'ll Value {
|
2024-03-09 02:50:23 +00:00
|
|
|
// The only RMW operation that LLVM supports on pointers is compare-exchange.
|
2024-03-22 22:07:43 +00:00
|
|
|
let requires_cast_to_int = self.val_ty(src) == self.type_ptr()
|
|
|
|
&& op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg;
|
|
|
|
if requires_cast_to_int {
|
2024-03-09 02:50:23 +00:00
|
|
|
src = self.ptrtoint(src, self.type_isize());
|
|
|
|
}
|
2024-03-22 22:07:43 +00:00
|
|
|
let mut res = unsafe {
|
2018-08-21 15:54:12 +00:00
|
|
|
llvm::LLVMBuildAtomicRMW(
|
|
|
|
self.llbuilder,
|
|
|
|
AtomicRmwBinOp::from_generic(op),
|
|
|
|
dst,
|
|
|
|
src,
|
2018-08-21 16:08:20 +00:00
|
|
|
AtomicOrdering::from_generic(order),
|
2022-09-18 22:49:49 +00:00
|
|
|
llvm::False, // SingleThreaded
|
2018-08-21 15:54:12 +00:00
|
|
|
)
|
2024-03-22 22:07:43 +00:00
|
|
|
};
|
|
|
|
if requires_cast_to_int {
|
|
|
|
res = self.inttoptr(res, self.type_ptr());
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
2024-03-22 22:07:43 +00:00
|
|
|
res
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
2013-07-28 07:48:16 +00:00
|
|
|
|
2018-09-28 10:18:03 +00:00
|
|
|
fn atomic_fence(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-10-01 16:07:04 +00:00
|
|
|
order: rustc_codegen_ssa::common::AtomicOrdering,
|
2022-09-18 22:49:49 +00:00
|
|
|
scope: SynchronizationScope,
|
2018-09-28 10:18:03 +00:00
|
|
|
) {
|
2022-09-18 22:49:49 +00:00
|
|
|
let single_threaded = match scope {
|
|
|
|
SynchronizationScope::SingleThread => llvm::True,
|
|
|
|
SynchronizationScope::CrossThread => llvm::False,
|
|
|
|
};
|
2013-07-28 07:48:16 +00:00
|
|
|
unsafe {
|
2022-09-18 22:49:49 +00:00
|
|
|
llvm::LLVMBuildFence(
|
2018-08-21 16:08:20 +00:00
|
|
|
self.llbuilder,
|
|
|
|
AtomicOrdering::from_generic(order),
|
2022-09-18 22:49:49 +00:00
|
|
|
single_threaded,
|
|
|
|
UNNAMED,
|
2018-08-21 16:08:20 +00:00
|
|
|
);
|
2013-07-28 07:48:16 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-20 23:43:01 +00:00
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn set_invariant_load(&mut self, load: &'ll Value) {
|
2017-02-21 08:08:06 +00:00
|
|
|
unsafe {
|
2024-09-19 01:39:28 +00:00
|
|
|
let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, ptr::null(), 0);
|
2024-09-19 10:56:02 +00:00
|
|
|
self.set_metadata(load, llvm::MD_invariant_load, md);
|
2017-02-21 08:08:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
|
2020-01-05 22:27:05 +00:00
|
|
|
self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
|
2017-06-01 18:50:53 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
|
2020-01-05 22:27:05 +00:00
|
|
|
self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
|
2017-06-01 18:50:53 +00:00
|
|
|
}
|
|
|
|
|
2018-11-13 10:51:42 +00:00
|
|
|
fn call(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2021-08-03 22:09:57 +00:00
|
|
|
llty: &'ll Type,
|
2022-12-13 06:42:44 +00:00
|
|
|
fn_attrs: Option<&CodegenFnAttrs>,
|
2022-10-01 17:01:31 +00:00
|
|
|
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
|
2018-11-13 10:51:42 +00:00
|
|
|
llfn: &'ll Value,
|
|
|
|
args: &[&'ll Value],
|
|
|
|
funclet: Option<&Funclet<'ll>>,
|
2024-03-15 19:45:46 +00:00
|
|
|
instance: Option<Instance<'tcx>>,
|
2018-11-13 10:51:42 +00:00
|
|
|
) -> &'ll Value {
|
2019-07-07 21:14:41 +00:00
|
|
|
debug!("call {:?} with args ({:?})", llfn, args);
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2021-08-03 22:09:57 +00:00
|
|
|
let args = self.check_call("call", llty, llfn, args);
|
2022-11-22 05:29:00 +00:00
|
|
|
let funclet_bundle = funclet.map(|funclet| funclet.bundle());
|
2023-06-28 04:07:43 +00:00
|
|
|
let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
|
2023-06-28 04:09:51 +00:00
|
|
|
if let Some(funclet_bundle) = funclet_bundle {
|
2023-06-28 04:07:43 +00:00
|
|
|
bundles.push(funclet_bundle);
|
|
|
|
}
|
2022-11-22 05:29:00 +00:00
|
|
|
|
2022-12-13 06:42:44 +00:00
|
|
|
// Emit CFI pointer type membership test
|
2024-03-15 19:45:46 +00:00
|
|
|
self.cfi_type_test(fn_attrs, fn_abi, instance, llfn);
|
2022-12-13 06:42:44 +00:00
|
|
|
|
|
|
|
// Emit KCFI operand bundle
|
2024-03-15 19:45:46 +00:00
|
|
|
let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
|
2024-10-28 06:25:40 +00:00
|
|
|
if let Some(kcfi_bundle) = kcfi_bundle.as_deref() {
|
2023-06-28 04:07:43 +00:00
|
|
|
bundles.push(kcfi_bundle);
|
|
|
|
}
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2022-10-01 17:01:31 +00:00
|
|
|
let call = unsafe {
|
2024-10-28 06:25:40 +00:00
|
|
|
llvm::LLVMBuildCallWithOperandBundles(
|
2018-08-07 15:14:40 +00:00
|
|
|
self.llbuilder,
|
2021-08-03 22:09:57 +00:00
|
|
|
llty,
|
2018-08-07 15:14:40 +00:00
|
|
|
llfn,
|
|
|
|
args.as_ptr() as *const &llvm::Value,
|
|
|
|
args.len() as c_uint,
|
2022-11-22 05:29:00 +00:00
|
|
|
bundles.as_ptr(),
|
|
|
|
bundles.len() as c_uint,
|
2024-10-28 06:25:40 +00:00
|
|
|
c"".as_ptr(),
|
2018-08-07 15:14:40 +00:00
|
|
|
)
|
2022-10-01 17:01:31 +00:00
|
|
|
};
|
|
|
|
if let Some(fn_abi) = fn_abi {
|
|
|
|
fn_abi.apply_attrs_callsite(self, call);
|
2018-08-07 15:14:40 +00:00
|
|
|
}
|
2022-10-01 17:01:31 +00:00
|
|
|
call
|
2018-08-07 15:14:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
|
2018-08-07 15:14:40 +00:00
|
|
|
}
|
|
|
|
|
2022-03-10 22:10:36 +00:00
|
|
|
fn apply_attrs_to_cleanup_callsite(&mut self, llret: &'ll Value) {
|
2024-09-17 19:37:45 +00:00
|
|
|
// Cleanup is always the cold path.
|
|
|
|
let cold_inline = llvm::AttributeKind::Cold.create_attr(self.llcx);
|
|
|
|
attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[cold_inline]);
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|
2016-04-20 23:43:01 +00:00
|
|
|
}
|
2018-11-24 15:36:41 +00:00
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> {
|
2019-06-11 09:49:10 +00:00
|
|
|
fn get_static(&mut self, def_id: DefId) -> &'ll Value {
|
2019-03-01 14:05:18 +00:00
|
|
|
// Forward to the `get_static` method of `CodegenCx`
|
Cast global variables to default address space
Pointers for variables all need to be in the same address space for
correct compilation. Therefore ensure that even if a global variable is
created in a different address space, it is casted to the default
address space before its value is used.
This is necessary for the amdgpu target and others where the default
address space for global variables is not 0.
For example `core` does not compile in debug mode when not casting the
address space to the default one because it tries to emit the following
(simplified) LLVM IR, containing a type mismatch:
```llvm
@alloc_0 = addrspace(1) constant <{ [6 x i8] }> <{ [6 x i8] c"bit.rs" }>, align 1
@alloc_1 = addrspace(1) constant <{ ptr }> <{ ptr addrspace(1) @alloc_0 }>, align 8
; ^ here a struct containing a `ptr` is needed, but it is created using a `ptr addrspace(1)`
```
For this to compile, we need to insert a constant `addrspacecast` before
we use a global variable:
```llvm
@alloc_0 = addrspace(1) constant <{ [6 x i8] }> <{ [6 x i8] c"bit.rs" }>, align 1
@alloc_1 = addrspace(1) constant <{ ptr }> <{ ptr addrspacecast (ptr addrspace(1) @alloc_0 to ptr) }>, align 8
```
As vtables are global variables as well, they are also created with an
`addrspacecast`. In the SSA backend, after a vtable global is created,
metadata is added to it. To add metadata, we need the non-casted global
variable. Therefore we strip away an addrspacecast if there is one, to
get the underlying global.
2025-01-02 12:10:11 +00:00
|
|
|
let s = self.cx().get_static(def_id);
|
2025-01-23 23:37:05 +00:00
|
|
|
// Cast to default address space if globals are in a different addrspace
|
Cast global variables to default address space
Pointers for variables all need to be in the same address space for
correct compilation. Therefore ensure that even if a global variable is
created in a different address space, it is casted to the default
address space before its value is used.
This is necessary for the amdgpu target and others where the default
address space for global variables is not 0.
For example `core` does not compile in debug mode when not casting the
address space to the default one because it tries to emit the following
(simplified) LLVM IR, containing a type mismatch:
```llvm
@alloc_0 = addrspace(1) constant <{ [6 x i8] }> <{ [6 x i8] c"bit.rs" }>, align 1
@alloc_1 = addrspace(1) constant <{ ptr }> <{ ptr addrspace(1) @alloc_0 }>, align 8
; ^ here a struct containing a `ptr` is needed, but it is created using a `ptr addrspace(1)`
```
For this to compile, we need to insert a constant `addrspacecast` before
we use a global variable:
```llvm
@alloc_0 = addrspace(1) constant <{ [6 x i8] }> <{ [6 x i8] c"bit.rs" }>, align 1
@alloc_1 = addrspace(1) constant <{ ptr }> <{ ptr addrspacecast (ptr addrspace(1) @alloc_0 to ptr) }>, align 8
```
As vtables are global variables as well, they are also created with an
`addrspacecast`. In the SSA backend, after a vtable global is created,
metadata is added to it. To add metadata, we need the non-casted global
variable. Therefore we strip away an addrspacecast if there is one, to
get the underlying global.
2025-01-02 12:10:11 +00:00
|
|
|
self.cx().const_pointercast(s, self.type_ptr())
|
2018-11-26 17:36:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn llfn(&self) -> &'ll Value {
|
2018-12-04 19:20:45 +00:00
|
|
|
unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
|
|
|
|
}
|
2025-01-24 21:05:26 +00:00
|
|
|
}
|
2018-12-04 19:20:45 +00:00
|
|
|
|
2025-02-24 08:15:31 +00:00
|
|
|
impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> {
|
2018-12-02 17:54:46 +00:00
|
|
|
fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
|
|
|
|
}
|
|
|
|
}
|
2025-01-24 21:05:26 +00:00
|
|
|
}
|
|
|
|
impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
|
2022-02-16 04:45:09 +00:00
|
|
|
fn align_metadata(&mut self, load: &'ll Value, align: Align) {
|
|
|
|
unsafe {
|
2024-09-19 10:45:23 +00:00
|
|
|
let md = [llvm::LLVMValueAsMetadata(self.cx.const_u64(align.bytes()))];
|
|
|
|
let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, md.as_ptr(), md.len());
|
2024-09-19 10:56:02 +00:00
|
|
|
self.set_metadata(load, llvm::MD_align, md);
|
2022-02-16 04:45:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-12 19:01:33 +00:00
|
|
|
fn noundef_metadata(&mut self, load: &'ll Value) {
|
|
|
|
unsafe {
|
2024-09-19 01:39:28 +00:00
|
|
|
let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, ptr::null(), 0);
|
2024-09-19 10:56:02 +00:00
|
|
|
self.set_metadata(load, llvm::MD_noundef, md);
|
2022-02-12 19:01:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn set_unpredictable(&mut self, inst: &'ll Value) {
|
2024-07-26 18:36:21 +00:00
|
|
|
unsafe {
|
2024-09-19 01:39:28 +00:00
|
|
|
let md = llvm::LLVMMDNodeInContext2(self.cx.llcx, ptr::null(), 0);
|
2024-09-19 10:56:02 +00:00
|
|
|
self.set_metadata(inst, llvm::MD_unpredictable, md);
|
2024-07-26 18:36:21 +00:00
|
|
|
}
|
|
|
|
}
|
2025-01-24 21:05:26 +00:00
|
|
|
}
|
2025-02-24 08:15:31 +00:00
|
|
|
impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> {
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
|
|
|
|
}
|
|
|
|
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
|
|
|
|
}
|
|
|
|
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn insert_element(
|
2018-12-02 17:54:46 +00:00
|
|
|
&mut self,
|
|
|
|
vec: &'ll Value,
|
|
|
|
elt: &'ll Value,
|
|
|
|
idx: &'ll Value,
|
|
|
|
) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
|
2018-12-02 17:54:46 +00:00
|
|
|
}
|
|
|
|
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn shuffle_vector(
|
2018-12-02 17:54:46 +00:00
|
|
|
&mut self,
|
|
|
|
v1: &'ll Value,
|
|
|
|
v2: &'ll Value,
|
|
|
|
mask: &'ll Value,
|
|
|
|
) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
|
2018-12-02 17:54:46 +00:00
|
|
|
}
|
|
|
|
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
|
2019-07-20 11:05:37 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
|
2019-07-20 11:05:37 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_fadd_reassoc(
|
|
|
|
&mut self,
|
|
|
|
acc: &'ll Value,
|
|
|
|
src: &'ll Value,
|
|
|
|
) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe {
|
|
|
|
let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
|
2024-02-18 22:10:34 +00:00
|
|
|
llvm::LLVMRustSetAllowReassoc(instr);
|
2018-12-02 17:54:46 +00:00
|
|
|
instr
|
|
|
|
}
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_fmul_reassoc(
|
|
|
|
&mut self,
|
|
|
|
acc: &'ll Value,
|
|
|
|
src: &'ll Value,
|
|
|
|
) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe {
|
|
|
|
let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
|
2024-02-18 22:10:34 +00:00
|
|
|
llvm::LLVMRustSetAllowReassoc(instr);
|
2018-12-02 17:54:46 +00:00
|
|
|
instr
|
|
|
|
}
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false)
|
|
|
|
}
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false)
|
|
|
|
}
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
|
|
|
|
}
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
|
|
|
|
}
|
|
|
|
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
|
2018-12-02 17:54:46 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMAddClause(landing_pad, clause);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn catch_ret(
|
|
|
|
&mut self,
|
|
|
|
funclet: &Funclet<'ll>,
|
|
|
|
unwind: &'ll BasicBlock,
|
|
|
|
) -> &'ll Value {
|
2023-04-03 12:12:21 +00:00
|
|
|
let ret = unsafe { llvm::LLVMBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) };
|
2018-12-02 17:54:46 +00:00
|
|
|
ret.expect("LLVM does not have support for catchret")
|
|
|
|
}
|
2025-01-24 21:05:26 +00:00
|
|
|
}
|
2018-12-02 17:54:46 +00:00
|
|
|
|
2025-01-24 21:05:26 +00:00
|
|
|
impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
|
2018-12-02 17:54:46 +00:00
|
|
|
fn check_call<'b>(
|
|
|
|
&mut self,
|
|
|
|
typ: &str,
|
2021-08-03 22:09:57 +00:00
|
|
|
fn_ty: &'ll Type,
|
2018-12-02 17:54:46 +00:00
|
|
|
llfn: &'ll Value,
|
|
|
|
args: &'b [&'ll Value],
|
|
|
|
) -> Cow<'b, [&'ll Value]> {
|
|
|
|
assert!(
|
|
|
|
self.cx.type_kind(fn_ty) == TypeKind::Function,
|
2023-07-25 21:04:01 +00:00
|
|
|
"builder::{typ} not passed a function, but {fn_ty:?}"
|
2018-12-02 17:54:46 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
let param_tys = self.cx.func_params_types(fn_ty);
|
|
|
|
|
2021-03-08 23:32:41 +00:00
|
|
|
let all_args_match = iter::zip(¶m_tys, args.iter().map(|&v| self.val_ty(v)))
|
2018-12-02 17:54:46 +00:00
|
|
|
.all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
|
|
|
|
|
|
|
|
if all_args_match {
|
|
|
|
return Cow::Borrowed(args);
|
|
|
|
}
|
|
|
|
|
2021-03-08 23:32:41 +00:00
|
|
|
let casted_args: Vec<_> = iter::zip(param_tys, args)
|
2018-12-02 17:54:46 +00:00
|
|
|
.enumerate()
|
|
|
|
.map(|(i, (expected_ty, &actual_val))| {
|
|
|
|
let actual_ty = self.val_ty(actual_val);
|
|
|
|
if expected_ty != actual_ty {
|
2019-07-07 21:14:41 +00:00
|
|
|
debug!(
|
|
|
|
"type mismatch in function call of {:?}. \
|
2018-12-02 17:54:46 +00:00
|
|
|
Expected {:?} for param {}, got {:?}; injecting bitcast",
|
|
|
|
llfn, expected_ty, i, actual_ty
|
|
|
|
);
|
|
|
|
self.bitcast(actual_val, expected_ty)
|
|
|
|
} else {
|
|
|
|
actual_val
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
Cow::Owned(casted_args)
|
|
|
|
}
|
2025-01-24 21:05:26 +00:00
|
|
|
}
|
2025-02-24 08:15:31 +00:00
|
|
|
impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> {
|
2024-07-06 12:26:42 +00:00
|
|
|
pub(crate) fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
|
2019-05-29 01:46:55 +00:00
|
|
|
unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
|
2018-12-02 17:54:46 +00:00
|
|
|
}
|
2025-01-24 21:05:26 +00:00
|
|
|
}
|
|
|
|
impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
|
2022-05-20 23:51:09 +00:00
|
|
|
pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
|
2021-08-03 22:09:57 +00:00
|
|
|
let (ty, f) = self.cx.get_intrinsic(intrinsic);
|
2024-03-15 19:45:46 +00:00
|
|
|
self.call(ty, None, None, f, args, None, None)
|
2021-08-03 22:09:57 +00:00
|
|
|
}
|
|
|
|
|
2018-11-24 15:36:41 +00:00
|
|
|
fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
|
2020-01-27 00:00:00 +00:00
|
|
|
let size = size.bytes();
|
|
|
|
if size == 0 {
|
2018-11-24 15:36:41 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-13 00:00:00 +00:00
|
|
|
if !self.cx().sess().emit_lifetime_markers() {
|
2018-11-24 15:36:41 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-08-03 22:09:57 +00:00
|
|
|
self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
|
2018-11-24 15:36:41 +00:00
|
|
|
}
|
2025-01-24 21:05:26 +00:00
|
|
|
}
|
2025-02-24 08:15:31 +00:00
|
|
|
impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> {
|
2020-06-30 08:57:59 +00:00
|
|
|
pub(crate) fn phi(
|
|
|
|
&mut self,
|
|
|
|
ty: &'ll Type,
|
|
|
|
vals: &[&'ll Value],
|
|
|
|
bbs: &[&'ll BasicBlock],
|
|
|
|
) -> &'ll Value {
|
2018-12-08 10:48:43 +00:00
|
|
|
assert_eq!(vals.len(), bbs.len());
|
2019-05-29 01:46:55 +00:00
|
|
|
let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) };
|
2018-12-08 10:48:43 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint);
|
|
|
|
phi
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
|
|
|
|
}
|
|
|
|
}
|
2025-01-24 21:05:26 +00:00
|
|
|
}
|
|
|
|
impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
|
2022-08-16 22:46:17 +00:00
|
|
|
fn fptoint_sat(&mut self, signed: bool, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2022-08-12 17:22:38 +00:00
|
|
|
let src_ty = self.cx.val_ty(val);
|
|
|
|
let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector {
|
|
|
|
assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
|
|
|
|
(
|
|
|
|
self.cx.element_type(src_ty),
|
|
|
|
self.cx.element_type(dest_ty),
|
|
|
|
Some(self.cx.vector_length(src_ty)),
|
|
|
|
)
|
2022-01-18 03:29:12 +00:00
|
|
|
} else {
|
2022-08-12 17:22:38 +00:00
|
|
|
(src_ty, dest_ty, None)
|
|
|
|
};
|
|
|
|
let float_width = self.cx.float_width(float_ty);
|
|
|
|
let int_width = self.cx.int_width(int_ty);
|
|
|
|
|
|
|
|
let instr = if signed { "fptosi" } else { "fptoui" };
|
|
|
|
let name = if let Some(vector_length) = vector_length {
|
2023-07-25 21:04:01 +00:00
|
|
|
format!("llvm.{instr}.sat.v{vector_length}i{int_width}.v{vector_length}f{float_width}")
|
2022-08-12 17:22:38 +00:00
|
|
|
} else {
|
2023-07-25 21:04:01 +00:00
|
|
|
format!("llvm.{instr}.sat.i{int_width}.f{float_width}")
|
2022-08-12 17:22:38 +00:00
|
|
|
};
|
|
|
|
let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
|
2024-03-15 19:45:46 +00:00
|
|
|
self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None, None)
|
2021-12-30 01:18:44 +00:00
|
|
|
}
|
2022-01-24 12:57:32 +00:00
|
|
|
|
|
|
|
pub(crate) fn landing_pad(
|
|
|
|
&mut self,
|
|
|
|
ty: &'ll Type,
|
|
|
|
pers_fn: &'ll Value,
|
|
|
|
num_clauses: usize,
|
|
|
|
) -> &'ll Value {
|
|
|
|
// Use LLVMSetPersonalityFn to set the personality. It supports arbitrary Consts while,
|
|
|
|
// LLVMBuildLandingPad requires the argument to be a Function (as of LLVM 12). The
|
|
|
|
// personality lives on the parent function anyway.
|
|
|
|
self.set_personality_fn(pers_fn);
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildLandingPad(self.llbuilder, ty, None, num_clauses as c_uint, UNNAMED)
|
|
|
|
}
|
|
|
|
}
|
2022-12-13 06:42:44 +00:00
|
|
|
|
2023-12-26 04:20:35 +00:00
|
|
|
pub(crate) fn callbr(
|
|
|
|
&mut self,
|
|
|
|
llty: &'ll Type,
|
|
|
|
fn_attrs: Option<&CodegenFnAttrs>,
|
|
|
|
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
|
|
|
|
llfn: &'ll Value,
|
|
|
|
args: &[&'ll Value],
|
|
|
|
default_dest: &'ll BasicBlock,
|
|
|
|
indirect_dest: &[&'ll BasicBlock],
|
|
|
|
funclet: Option<&Funclet<'ll>>,
|
2024-03-15 19:45:46 +00:00
|
|
|
instance: Option<Instance<'tcx>>,
|
2023-12-26 04:20:35 +00:00
|
|
|
) -> &'ll Value {
|
|
|
|
debug!("invoke {:?} with args ({:?})", llfn, args);
|
|
|
|
|
|
|
|
let args = self.check_call("callbr", llty, llfn, args);
|
|
|
|
let funclet_bundle = funclet.map(|funclet| funclet.bundle());
|
|
|
|
let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
|
|
|
|
if let Some(funclet_bundle) = funclet_bundle {
|
|
|
|
bundles.push(funclet_bundle);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit CFI pointer type membership test
|
2024-03-15 19:45:46 +00:00
|
|
|
self.cfi_type_test(fn_attrs, fn_abi, instance, llfn);
|
2023-12-26 04:20:35 +00:00
|
|
|
|
|
|
|
// Emit KCFI operand bundle
|
2024-03-15 19:45:46 +00:00
|
|
|
let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
|
2024-10-28 06:25:40 +00:00
|
|
|
if let Some(kcfi_bundle) = kcfi_bundle.as_deref() {
|
2023-12-26 04:20:35 +00:00
|
|
|
bundles.push(kcfi_bundle);
|
|
|
|
}
|
|
|
|
|
|
|
|
let callbr = unsafe {
|
2024-10-28 06:25:40 +00:00
|
|
|
llvm::LLVMBuildCallBr(
|
2023-12-26 04:20:35 +00:00
|
|
|
self.llbuilder,
|
|
|
|
llty,
|
|
|
|
llfn,
|
|
|
|
default_dest,
|
|
|
|
indirect_dest.as_ptr(),
|
|
|
|
indirect_dest.len() as c_uint,
|
|
|
|
args.as_ptr(),
|
|
|
|
args.len() as c_uint,
|
|
|
|
bundles.as_ptr(),
|
|
|
|
bundles.len() as c_uint,
|
|
|
|
UNNAMED,
|
|
|
|
)
|
|
|
|
};
|
|
|
|
if let Some(fn_abi) = fn_abi {
|
|
|
|
fn_abi.apply_attrs_callsite(self, callbr);
|
|
|
|
}
|
|
|
|
callbr
|
|
|
|
}
|
|
|
|
|
2022-12-13 06:42:44 +00:00
|
|
|
// Emits CFI pointer type membership tests.
|
|
|
|
fn cfi_type_test(
|
|
|
|
&mut self,
|
|
|
|
fn_attrs: Option<&CodegenFnAttrs>,
|
|
|
|
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
|
2024-03-15 19:45:46 +00:00
|
|
|
instance: Option<Instance<'tcx>>,
|
2022-12-13 06:42:44 +00:00
|
|
|
llfn: &'ll Value,
|
|
|
|
) {
|
2023-07-11 23:19:42 +00:00
|
|
|
let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
|
|
|
|
if self.tcx.sess.is_sanitizer_cfi_enabled()
|
|
|
|
&& let Some(fn_abi) = fn_abi
|
|
|
|
&& is_indirect_call
|
|
|
|
{
|
|
|
|
if let Some(fn_attrs) = fn_attrs
|
|
|
|
&& fn_attrs.no_sanitize.contains(SanitizerSet::CFI)
|
2023-10-13 08:58:33 +00:00
|
|
|
{
|
2022-12-13 06:42:44 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-04-08 02:35:49 +00:00
|
|
|
let mut options = cfi::TypeIdOptions::empty();
|
2022-12-13 06:42:44 +00:00
|
|
|
if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
|
2024-04-08 02:35:49 +00:00
|
|
|
options.insert(cfi::TypeIdOptions::GENERALIZE_POINTERS);
|
2022-12-13 06:42:44 +00:00
|
|
|
}
|
|
|
|
if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
|
2024-04-08 02:35:49 +00:00
|
|
|
options.insert(cfi::TypeIdOptions::NORMALIZE_INTEGERS);
|
2022-12-13 06:42:44 +00:00
|
|
|
}
|
|
|
|
|
2024-03-15 19:45:46 +00:00
|
|
|
let typeid = if let Some(instance) = instance {
|
2024-04-08 02:35:49 +00:00
|
|
|
cfi::typeid_for_instance(self.tcx, instance, options)
|
2024-03-15 19:45:46 +00:00
|
|
|
} else {
|
2024-04-08 02:35:49 +00:00
|
|
|
cfi::typeid_for_fnabi(self.tcx, fn_abi, options)
|
2024-03-15 19:45:46 +00:00
|
|
|
};
|
2022-12-13 06:42:44 +00:00
|
|
|
let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap();
|
2024-11-08 08:51:19 +00:00
|
|
|
let dbg_loc = self.get_dbg_loc();
|
2022-12-13 06:42:44 +00:00
|
|
|
|
|
|
|
// Test whether the function pointer is associated with the type identifier.
|
|
|
|
let cond = self.type_test(llfn, typeid_metadata);
|
|
|
|
let bb_pass = self.append_sibling_block("type_test.pass");
|
|
|
|
let bb_fail = self.append_sibling_block("type_test.fail");
|
|
|
|
self.cond_br(cond, bb_pass, bb_fail);
|
|
|
|
|
|
|
|
self.switch_to_block(bb_fail);
|
2024-11-08 08:51:19 +00:00
|
|
|
if let Some(dbg_loc) = dbg_loc {
|
|
|
|
self.set_dbg_loc(dbg_loc);
|
|
|
|
}
|
2022-12-13 06:42:44 +00:00
|
|
|
self.abort();
|
|
|
|
self.unreachable();
|
|
|
|
|
|
|
|
self.switch_to_block(bb_pass);
|
2024-11-08 08:51:19 +00:00
|
|
|
if let Some(dbg_loc) = dbg_loc {
|
|
|
|
self.set_dbg_loc(dbg_loc);
|
|
|
|
}
|
2022-12-13 06:42:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emits KCFI operand bundles.
|
|
|
|
fn kcfi_operand_bundle(
|
|
|
|
&mut self,
|
|
|
|
fn_attrs: Option<&CodegenFnAttrs>,
|
|
|
|
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
|
2024-03-15 19:45:46 +00:00
|
|
|
instance: Option<Instance<'tcx>>,
|
2022-12-13 06:42:44 +00:00
|
|
|
llfn: &'ll Value,
|
2024-10-28 06:25:40 +00:00
|
|
|
) -> Option<llvm::OperandBundleOwned<'ll>> {
|
2023-07-11 23:19:42 +00:00
|
|
|
let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
|
|
|
|
let kcfi_bundle = if self.tcx.sess.is_sanitizer_kcfi_enabled()
|
|
|
|
&& let Some(fn_abi) = fn_abi
|
|
|
|
&& is_indirect_call
|
|
|
|
{
|
|
|
|
if let Some(fn_attrs) = fn_attrs
|
|
|
|
&& fn_attrs.no_sanitize.contains(SanitizerSet::KCFI)
|
|
|
|
{
|
|
|
|
return None;
|
|
|
|
}
|
2022-12-13 06:42:44 +00:00
|
|
|
|
2024-04-08 02:35:49 +00:00
|
|
|
let mut options = kcfi::TypeIdOptions::empty();
|
2023-07-11 23:19:42 +00:00
|
|
|
if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
|
2024-04-08 02:35:49 +00:00
|
|
|
options.insert(kcfi::TypeIdOptions::GENERALIZE_POINTERS);
|
2023-07-11 23:19:42 +00:00
|
|
|
}
|
|
|
|
if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
|
2024-04-08 02:35:49 +00:00
|
|
|
options.insert(kcfi::TypeIdOptions::NORMALIZE_INTEGERS);
|
2023-07-11 23:19:42 +00:00
|
|
|
}
|
2022-12-13 06:42:44 +00:00
|
|
|
|
2024-03-15 19:45:46 +00:00
|
|
|
let kcfi_typeid = if let Some(instance) = instance {
|
2024-04-08 02:35:49 +00:00
|
|
|
kcfi::typeid_for_instance(self.tcx, instance, options)
|
2024-03-15 19:45:46 +00:00
|
|
|
} else {
|
2024-04-08 02:35:49 +00:00
|
|
|
kcfi::typeid_for_fnabi(self.tcx, fn_abi, options)
|
2024-03-15 19:45:46 +00:00
|
|
|
};
|
|
|
|
|
2024-10-28 06:25:40 +00:00
|
|
|
Some(llvm::OperandBundleOwned::new("kcfi", &[self.const_u32(kcfi_typeid)]))
|
2023-07-11 23:19:42 +00:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2022-12-13 06:42:44 +00:00
|
|
|
kcfi_bundle
|
|
|
|
}
|
2024-04-19 02:58:16 +00:00
|
|
|
|
2024-10-25 02:57:46 +00:00
|
|
|
/// Emits a call to `llvm.instrprof.increment`. Used by coverage instrumentation.
|
|
|
|
#[instrument(level = "debug", skip(self))]
|
|
|
|
pub(crate) fn instrprof_increment(
|
|
|
|
&mut self,
|
|
|
|
fn_name: &'ll Value,
|
|
|
|
hash: &'ll Value,
|
|
|
|
num_counters: &'ll Value,
|
|
|
|
index: &'ll Value,
|
|
|
|
) {
|
|
|
|
self.call_intrinsic("llvm.instrprof.increment", &[fn_name, hash, num_counters, index]);
|
|
|
|
}
|
|
|
|
|
2024-04-30 07:45:40 +00:00
|
|
|
/// Emits a call to `llvm.instrprof.mcdc.parameters`.
|
|
|
|
///
|
|
|
|
/// This doesn't produce any code directly, but is used as input by
|
|
|
|
/// the LLVM pass that handles coverage instrumentation.
|
|
|
|
///
|
|
|
|
/// (See clang's [`CodeGenPGO::emitMCDCParameters`] for comparison.)
|
|
|
|
///
|
|
|
|
/// [`CodeGenPGO::emitMCDCParameters`]:
|
|
|
|
/// https://github.com/rust-lang/llvm-project/blob/5399a24/clang/lib/CodeGen/CodeGenPGO.cpp#L1124
|
2024-10-25 02:25:19 +00:00
|
|
|
#[instrument(level = "debug", skip(self))]
|
2024-04-19 02:58:16 +00:00
|
|
|
pub(crate) fn mcdc_parameters(
|
|
|
|
&mut self,
|
|
|
|
fn_name: &'ll Value,
|
|
|
|
hash: &'ll Value,
|
2024-07-25 07:23:35 +00:00
|
|
|
bitmap_bits: &'ll Value,
|
2024-04-30 07:45:40 +00:00
|
|
|
) {
|
2024-06-19 09:49:10 +00:00
|
|
|
assert!(
|
|
|
|
crate::llvm_util::get_version() >= (19, 0, 0),
|
|
|
|
"MCDC intrinsics require LLVM 19 or later"
|
|
|
|
);
|
2024-10-25 02:25:19 +00:00
|
|
|
self.call_intrinsic("llvm.instrprof.mcdc.parameters", &[fn_name, hash, bitmap_bits]);
|
2024-04-19 02:58:16 +00:00
|
|
|
}
|
|
|
|
|
2024-10-25 02:25:19 +00:00
|
|
|
#[instrument(level = "debug", skip(self))]
|
2024-04-19 02:58:16 +00:00
|
|
|
pub(crate) fn mcdc_tvbitmap_update(
|
|
|
|
&mut self,
|
|
|
|
fn_name: &'ll Value,
|
|
|
|
hash: &'ll Value,
|
|
|
|
bitmap_index: &'ll Value,
|
|
|
|
mcdc_temp: &'ll Value,
|
|
|
|
) {
|
2024-06-19 09:49:10 +00:00
|
|
|
assert!(
|
|
|
|
crate::llvm_util::get_version() >= (19, 0, 0),
|
|
|
|
"MCDC intrinsics require LLVM 19 or later"
|
|
|
|
);
|
2024-07-25 07:23:35 +00:00
|
|
|
let args = &[fn_name, hash, bitmap_index, mcdc_temp];
|
2024-10-25 02:25:19 +00:00
|
|
|
self.call_intrinsic("llvm.instrprof.mcdc.tvbitmap.update", args);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[instrument(level = "debug", skip(self))]
|
|
|
|
pub(crate) fn mcdc_condbitmap_reset(&mut self, mcdc_temp: &'ll Value) {
|
2024-04-30 07:45:40 +00:00
|
|
|
self.store(self.const_i32(0), mcdc_temp, self.tcx.data_layout.i32_align.abi);
|
2024-04-19 02:58:16 +00:00
|
|
|
}
|
|
|
|
|
2024-10-25 02:25:19 +00:00
|
|
|
#[instrument(level = "debug", skip(self))]
|
2024-07-25 07:23:35 +00:00
|
|
|
pub(crate) fn mcdc_condbitmap_update(&mut self, cond_index: &'ll Value, mcdc_temp: &'ll Value) {
|
2024-06-19 09:49:10 +00:00
|
|
|
assert!(
|
|
|
|
crate::llvm_util::get_version() >= (19, 0, 0),
|
|
|
|
"MCDC intrinsics require LLVM 19 or later"
|
|
|
|
);
|
2024-07-25 07:23:35 +00:00
|
|
|
let align = self.tcx.data_layout.i32_align.abi;
|
|
|
|
let current_tv_index = self.load(self.cx.type_i32(), mcdc_temp, align);
|
|
|
|
let new_tv_index = self.add(current_tv_index, cond_index);
|
|
|
|
self.store(new_tv_index, mcdc_temp, align);
|
2024-04-19 02:58:16 +00:00
|
|
|
}
|
2018-11-24 15:36:41 +00:00
|
|
|
}
|