2019-02-17 18:58:58 +00:00
|
|
|
use crate::common::Funclet;
|
|
|
|
use crate::context::CodegenCx;
|
2019-12-22 22:42:04 +00:00
|
|
|
use crate::llvm::{self, BasicBlock, False};
|
|
|
|
use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope};
|
2019-02-17 18:58:58 +00:00
|
|
|
use crate::type_::Type;
|
|
|
|
use crate::type_of::LayoutLlvmExt;
|
|
|
|
use crate::value::Value;
|
2019-12-22 22:42:04 +00:00
|
|
|
use libc::{c_char, c_uint};
|
|
|
|
use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind};
|
|
|
|
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
|
2018-10-03 14:56:24 +00:00
|
|
|
use rustc_codegen_ssa::mir::place::PlaceRef;
|
2019-12-22 22:42:04 +00:00
|
|
|
use rustc_codegen_ssa::traits::*;
|
|
|
|
use rustc_codegen_ssa::MemFlags;
|
|
|
|
use rustc_data_structures::const_cstr;
|
|
|
|
use rustc_data_structures::small_c_str::SmallCStr;
|
2020-01-05 01:37:57 +00:00
|
|
|
use rustc_hir::def_id::DefId;
|
2020-03-31 16:16:47 +00:00
|
|
|
use rustc_middle::ty::layout::TyAndLayout;
|
2020-03-29 14:41:09 +00:00
|
|
|
use rustc_middle::ty::{self, Ty, TyCtxt};
|
2020-10-06 13:39:12 +00:00
|
|
|
use rustc_span::{sym, Span};
|
2020-03-31 16:16:47 +00:00
|
|
|
use rustc_target::abi::{self, Align, Size};
|
2019-05-14 15:53:01 +00:00
|
|
|
use rustc_target::spec::{HasTargetSpec, Target};
|
2016-10-12 15:36:04 +00:00
|
|
|
use std::borrow::Cow;
|
2019-05-29 01:46:55 +00:00
|
|
|
use std::ffi::CStr;
|
2018-11-27 17:35:35 +00:00
|
|
|
use std::ops::{Deref, Range};
|
2015-02-02 18:03:23 +00:00
|
|
|
use std::ptr;
|
2020-08-05 11:35:53 +00:00
|
|
|
use tracing::debug;
|
2013-07-21 13:33:40 +00:00
|
|
|
|
2016-12-31 23:00:24 +00:00
|
|
|
// All Builders must have an llfn associated with them
|
|
|
|
#[must_use]
|
2019-06-14 16:39:39 +00:00
|
|
|
pub struct Builder<'a, 'll, 'tcx> {
|
2018-07-17 15:33:09 +00:00
|
|
|
pub llbuilder: &'ll mut llvm::Builder<'ll>,
|
2018-11-16 11:48:26 +00:00
|
|
|
pub cx: &'a CodegenCx<'ll, 'tcx>,
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-11-16 11:48:26 +00:00
|
|
|
impl Drop for Builder<'a, 'll, 'tcx> {
|
2016-12-17 00:39:35 +00:00
|
|
|
fn drop(&mut self) {
|
|
|
|
unsafe {
|
2018-07-17 15:33:09 +00:00
|
|
|
llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
|
2016-12-17 00:39:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-29 01:46:55 +00:00
|
|
|
// FIXME(eddyb) use a checked constructor when they become `const fn`.
|
2019-12-22 22:42:04 +00:00
|
|
|
const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") };
|
2019-05-29 01:46:55 +00:00
|
|
|
|
|
|
|
/// Empty string, to be used where LLVM expects an instruction name, indicating
|
|
|
|
/// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
|
|
|
|
// FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
|
|
|
|
const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr();
|
2013-07-21 13:33:40 +00:00
|
|
|
|
2018-09-20 13:47:22 +00:00
|
|
|
impl BackendTypes for Builder<'_, 'll, 'tcx> {
|
|
|
|
type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
|
2019-10-13 09:28:19 +00:00
|
|
|
type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
|
2018-09-20 13:47:22 +00:00
|
|
|
type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
|
|
|
|
type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
|
2018-11-13 10:51:42 +00:00
|
|
|
type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
|
2018-09-20 13:47:22 +00:00
|
|
|
|
|
|
|
type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
|
2020-01-26 16:50:13 +00:00
|
|
|
type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable;
|
2018-09-13 12:58:19 +00:00
|
|
|
}
|
|
|
|
|
2020-03-31 16:16:47 +00:00
|
|
|
impl abi::HasDataLayout for Builder<'_, '_, '_> {
|
|
|
|
fn data_layout(&self) -> &abi::TargetDataLayout {
|
2018-09-13 12:58:19 +00:00
|
|
|
self.cx.data_layout()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
|
2019-06-13 21:48:52 +00:00
|
|
|
fn tcx(&self) -> TyCtxt<'tcx> {
|
2018-09-13 12:58:19 +00:00
|
|
|
self.cx.tcx
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-04 09:32:22 +00:00
|
|
|
impl ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
|
|
|
|
fn param_env(&self) -> ty::ParamEnv<'tcx> {
|
|
|
|
self.cx.param_env()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-14 15:53:01 +00:00
|
|
|
impl HasTargetSpec for Builder<'_, '_, 'tcx> {
|
|
|
|
fn target_spec(&self) -> &Target {
|
|
|
|
&self.cx.target_spec()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-31 16:16:47 +00:00
|
|
|
impl abi::LayoutOf for Builder<'_, '_, 'tcx> {
|
2018-09-13 12:58:19 +00:00
|
|
|
type Ty = Ty<'tcx>;
|
2020-03-04 14:50:21 +00:00
|
|
|
type TyAndLayout = TyAndLayout<'tcx>;
|
2018-09-13 12:58:19 +00:00
|
|
|
|
2020-03-04 14:50:21 +00:00
|
|
|
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
|
2018-09-13 12:58:19 +00:00
|
|
|
self.cx.layout_of(ty)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 17:35:35 +00:00
|
|
|
impl Deref for Builder<'_, 'll, 'tcx> {
|
|
|
|
type Target = CodegenCx<'ll, 'tcx>;
|
|
|
|
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
self.cx
|
|
|
|
}
|
|
|
|
}
|
2018-09-13 12:58:19 +00:00
|
|
|
|
|
|
|
impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
|
2018-09-11 09:46:03 +00:00
|
|
|
type CodegenCx = CodegenCx<'ll, 'tcx>;
|
2018-08-28 09:40:34 +00:00
|
|
|
}
|
2018-08-22 16:57:31 +00:00
|
|
|
|
2019-05-29 01:34:33 +00:00
|
|
|
macro_rules! builder_methods_for_value_instructions {
|
|
|
|
($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
|
|
|
|
$(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
|
|
|
|
unsafe {
|
2019-05-29 01:46:55 +00:00
|
|
|
llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
|
2019-05-29 01:34:33 +00:00
|
|
|
}
|
2019-05-29 18:05:43 +00:00
|
|
|
})+
|
2019-05-29 01:34:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-07 01:31:42 +00:00
|
|
|
impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
2019-12-22 22:42:04 +00:00
|
|
|
fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self {
|
2018-10-04 13:23:10 +00:00
|
|
|
let mut bx = Builder::with_cx(cx);
|
2016-12-31 23:00:24 +00:00
|
|
|
let llbb = unsafe {
|
2018-08-07 14:04:34 +00:00
|
|
|
let name = SmallCStr::new(name);
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr())
|
2016-12-31 23:00:24 +00:00
|
|
|
};
|
2018-01-05 05:12:32 +00:00
|
|
|
bx.position_at_end(llbb);
|
|
|
|
bx
|
2016-12-31 23:00:24 +00:00
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
|
2016-12-17 00:39:35 +00:00
|
|
|
// Create a fresh builder from the crate context.
|
2019-12-22 22:42:04 +00:00
|
|
|
let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) };
|
|
|
|
Builder { llbuilder, cx }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2019-06-21 18:27:44 +00:00
|
|
|
fn build_sibling_block(&self, name: &str) -> Self {
|
2018-01-05 05:04:08 +00:00
|
|
|
Builder::new_block(self.cx, self.llfn(), name)
|
2016-12-31 23:00:24 +00:00
|
|
|
}
|
|
|
|
|
2018-08-23 13:23:48 +00:00
|
|
|
fn llbb(&self) -> &'ll BasicBlock {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
|
2016-12-31 23:00:24 +00:00
|
|
|
}
|
|
|
|
|
2020-10-17 11:28:58 +00:00
|
|
|
fn set_span(&mut self, _span: Span) {}
|
2020-10-06 13:39:12 +00:00
|
|
|
|
2018-10-04 13:23:10 +00:00
|
|
|
fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-04 13:23:10 +00:00
|
|
|
fn ret_void(&mut self) {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildRetVoid(self.llbuilder);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-04 13:23:10 +00:00
|
|
|
fn ret(&mut self, v: &'ll Value) {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildRet(self.llbuilder, v);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-04 13:23:10 +00:00
|
|
|
fn br(&mut self, dest: &'ll BasicBlock) {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildBr(self.llbuilder, dest);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn cond_br(
|
2018-10-04 13:23:10 +00:00
|
|
|
&mut self,
|
2018-08-23 13:23:48 +00:00
|
|
|
cond: &'ll Value,
|
|
|
|
then_llbb: &'ll BasicBlock,
|
|
|
|
else_llbb: &'ll BasicBlock,
|
2018-07-17 15:26:58 +00:00
|
|
|
) {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn switch(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-23 13:23:48 +00:00
|
|
|
v: &'ll Value,
|
|
|
|
else_llbb: &'ll BasicBlock,
|
2020-09-24 17:10:34 +00:00
|
|
|
cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)>,
|
2018-12-08 17:42:31 +00:00
|
|
|
) {
|
2019-12-22 22:42:04 +00:00
|
|
|
let switch =
|
|
|
|
unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
|
2018-12-08 17:42:31 +00:00
|
|
|
for (on_val, dest) in cases {
|
|
|
|
let on_val = self.const_uint_big(self.val_ty(v), on_val);
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-13 10:51:42 +00:00
|
|
|
fn invoke(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-11-13 10:51:42 +00:00
|
|
|
llfn: &'ll Value,
|
|
|
|
args: &[&'ll Value],
|
|
|
|
then: &'ll BasicBlock,
|
|
|
|
catch: &'ll BasicBlock,
|
|
|
|
funclet: Option<&Funclet<'ll>>,
|
|
|
|
) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
debug!("invoke {:?} with args ({:?})", llfn, args);
|
2014-06-28 19:55:17 +00:00
|
|
|
|
2016-10-12 15:36:04 +00:00
|
|
|
let args = self.check_call("invoke", llfn, args);
|
2018-09-20 13:47:22 +00:00
|
|
|
let bundle = funclet.map(|funclet| funclet.bundle());
|
2018-08-22 15:48:32 +00:00
|
|
|
let bundle = bundle.as_ref().map(|b| &*b.raw);
|
2015-10-24 01:18:44 +00:00
|
|
|
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMRustBuildInvoke(
|
|
|
|
self.llbuilder,
|
|
|
|
llfn,
|
|
|
|
args.as_ptr(),
|
|
|
|
args.len() as c_uint,
|
|
|
|
then,
|
|
|
|
catch,
|
|
|
|
bundle,
|
|
|
|
UNNAMED,
|
|
|
|
)
|
2013-09-16 22:30:59 +00:00
|
|
|
}
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn unreachable(&mut self) {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildUnreachable(self.llbuilder);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-29 01:34:33 +00:00
|
|
|
builder_methods_for_value_instructions! {
|
|
|
|
add(a, b) => LLVMBuildAdd,
|
|
|
|
fadd(a, b) => LLVMBuildFAdd,
|
|
|
|
sub(a, b) => LLVMBuildSub,
|
|
|
|
fsub(a, b) => LLVMBuildFSub,
|
|
|
|
mul(a, b) => LLVMBuildMul,
|
|
|
|
fmul(a, b) => LLVMBuildFMul,
|
|
|
|
udiv(a, b) => LLVMBuildUDiv,
|
|
|
|
exactudiv(a, b) => LLVMBuildExactUDiv,
|
|
|
|
sdiv(a, b) => LLVMBuildSDiv,
|
|
|
|
exactsdiv(a, b) => LLVMBuildExactSDiv,
|
|
|
|
fdiv(a, b) => LLVMBuildFDiv,
|
|
|
|
urem(a, b) => LLVMBuildURem,
|
|
|
|
srem(a, b) => LLVMBuildSRem,
|
|
|
|
frem(a, b) => LLVMBuildFRem,
|
|
|
|
shl(a, b) => LLVMBuildShl,
|
|
|
|
lshr(a, b) => LLVMBuildLShr,
|
|
|
|
ashr(a, b) => LLVMBuildAShr,
|
|
|
|
and(a, b) => LLVMBuildAnd,
|
|
|
|
or(a, b) => LLVMBuildOr,
|
|
|
|
xor(a, b) => LLVMBuildXor,
|
|
|
|
neg(x) => LLVMBuildNeg,
|
|
|
|
fneg(x) => LLVMBuildFNeg,
|
|
|
|
not(x) => LLVMBuildNot,
|
2019-06-03 10:59:17 +00:00
|
|
|
unchecked_sadd(x, y) => LLVMBuildNSWAdd,
|
|
|
|
unchecked_uadd(x, y) => LLVMBuildNUWAdd,
|
|
|
|
unchecked_ssub(x, y) => LLVMBuildNSWSub,
|
|
|
|
unchecked_usub(x, y) => LLVMBuildNUWSub,
|
|
|
|
unchecked_smul(x, y) => LLVMBuildNSWMul,
|
|
|
|
unchecked_umul(x, y) => LLVMBuildNUWMul,
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
2016-03-14 23:01:12 +00:00
|
|
|
unsafe {
|
2019-05-29 01:46:55 +00:00
|
|
|
let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
|
2016-03-14 23:01:12 +00:00
|
|
|
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
|
|
|
|
instr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
2016-03-14 23:01:12 +00:00
|
|
|
unsafe {
|
2019-05-29 01:46:55 +00:00
|
|
|
let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
|
2016-03-14 23:01:12 +00:00
|
|
|
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
|
|
|
|
instr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
2016-03-14 23:01:12 +00:00
|
|
|
unsafe {
|
2019-05-29 01:46:55 +00:00
|
|
|
let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
|
2016-03-14 23:01:12 +00:00
|
|
|
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
|
|
|
|
instr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
2016-03-14 23:01:12 +00:00
|
|
|
unsafe {
|
2019-05-29 01:46:55 +00:00
|
|
|
let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
|
2016-03-14 23:01:12 +00:00
|
|
|
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
|
|
|
|
instr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
2016-03-14 23:01:12 +00:00
|
|
|
unsafe {
|
2019-05-29 01:46:55 +00:00
|
|
|
let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
|
2016-03-14 23:01:12 +00:00
|
|
|
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
|
|
|
|
instr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-24 15:44:17 +00:00
|
|
|
fn checked_binop(
|
|
|
|
&mut self,
|
|
|
|
oop: OverflowOp,
|
2019-02-25 07:40:18 +00:00
|
|
|
ty: Ty<'_>,
|
2018-11-24 15:44:17 +00:00
|
|
|
lhs: Self::Value,
|
|
|
|
rhs: Self::Value,
|
|
|
|
) -> (Self::Value, Self::Value) {
|
2020-04-27 17:56:11 +00:00
|
|
|
use rustc_ast::IntTy::*;
|
|
|
|
use rustc_ast::UintTy::*;
|
2020-03-29 14:41:09 +00:00
|
|
|
use rustc_middle::ty::{Int, Uint};
|
2018-11-24 15:44:17 +00:00
|
|
|
|
2020-08-02 22:49:11 +00:00
|
|
|
let new_kind = match ty.kind() {
|
2020-10-15 09:44:00 +00:00
|
|
|
Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
|
|
|
|
Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
|
2020-08-02 22:49:11 +00:00
|
|
|
t @ (Uint(_) | Int(_)) => t.clone(),
|
2019-12-22 22:42:04 +00:00
|
|
|
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
|
2018-11-24 15:44:17 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let name = match oop {
|
2019-09-26 11:10:43 +00:00
|
|
|
OverflowOp::Add => match new_kind {
|
2018-11-24 15:44:17 +00:00
|
|
|
Int(I8) => "llvm.sadd.with.overflow.i8",
|
|
|
|
Int(I16) => "llvm.sadd.with.overflow.i16",
|
|
|
|
Int(I32) => "llvm.sadd.with.overflow.i32",
|
|
|
|
Int(I64) => "llvm.sadd.with.overflow.i64",
|
|
|
|
Int(I128) => "llvm.sadd.with.overflow.i128",
|
|
|
|
|
|
|
|
Uint(U8) => "llvm.uadd.with.overflow.i8",
|
|
|
|
Uint(U16) => "llvm.uadd.with.overflow.i16",
|
|
|
|
Uint(U32) => "llvm.uadd.with.overflow.i32",
|
|
|
|
Uint(U64) => "llvm.uadd.with.overflow.i64",
|
|
|
|
Uint(U128) => "llvm.uadd.with.overflow.i128",
|
|
|
|
|
|
|
|
_ => unreachable!(),
|
|
|
|
},
|
2019-09-26 11:10:43 +00:00
|
|
|
OverflowOp::Sub => match new_kind {
|
2018-11-24 15:44:17 +00:00
|
|
|
Int(I8) => "llvm.ssub.with.overflow.i8",
|
|
|
|
Int(I16) => "llvm.ssub.with.overflow.i16",
|
|
|
|
Int(I32) => "llvm.ssub.with.overflow.i32",
|
|
|
|
Int(I64) => "llvm.ssub.with.overflow.i64",
|
|
|
|
Int(I128) => "llvm.ssub.with.overflow.i128",
|
|
|
|
|
|
|
|
Uint(U8) => "llvm.usub.with.overflow.i8",
|
|
|
|
Uint(U16) => "llvm.usub.with.overflow.i16",
|
|
|
|
Uint(U32) => "llvm.usub.with.overflow.i32",
|
|
|
|
Uint(U64) => "llvm.usub.with.overflow.i64",
|
|
|
|
Uint(U128) => "llvm.usub.with.overflow.i128",
|
|
|
|
|
|
|
|
_ => unreachable!(),
|
|
|
|
},
|
2019-09-26 11:10:43 +00:00
|
|
|
OverflowOp::Mul => match new_kind {
|
2018-11-24 15:44:17 +00:00
|
|
|
Int(I8) => "llvm.smul.with.overflow.i8",
|
|
|
|
Int(I16) => "llvm.smul.with.overflow.i16",
|
|
|
|
Int(I32) => "llvm.smul.with.overflow.i32",
|
|
|
|
Int(I64) => "llvm.smul.with.overflow.i64",
|
|
|
|
Int(I128) => "llvm.smul.with.overflow.i128",
|
|
|
|
|
|
|
|
Uint(U8) => "llvm.umul.with.overflow.i8",
|
|
|
|
Uint(U16) => "llvm.umul.with.overflow.i16",
|
|
|
|
Uint(U32) => "llvm.umul.with.overflow.i32",
|
|
|
|
Uint(U64) => "llvm.umul.with.overflow.i64",
|
|
|
|
Uint(U128) => "llvm.umul.with.overflow.i128",
|
|
|
|
|
|
|
|
_ => unreachable!(),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2018-11-27 18:00:25 +00:00
|
|
|
let intrinsic = self.get_intrinsic(&name);
|
2018-11-24 15:44:17 +00:00
|
|
|
let res = self.call(intrinsic, &[lhs, rhs], None);
|
2019-12-22 22:42:04 +00:00
|
|
|
(self.extract_value(res, 0), self.extract_value(res, 1))
|
2018-11-24 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
2020-08-29 16:10:01 +00:00
|
|
|
fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
|
|
|
|
if self.cx().val_ty(val) == self.cx().type_i1() {
|
|
|
|
self.zext(val, self.cx().type_i8())
|
|
|
|
} else {
|
|
|
|
val
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value {
|
|
|
|
if scalar.is_bool() {
|
|
|
|
return self.trunc(val, self.cx().type_i1());
|
|
|
|
}
|
|
|
|
val
|
|
|
|
}
|
|
|
|
|
2019-09-12 16:04:30 +00:00
|
|
|
fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
|
2018-10-04 13:23:10 +00:00
|
|
|
let mut bx = Builder::with_cx(self.cx);
|
2019-12-22 22:42:04 +00:00
|
|
|
bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
|
2019-09-12 16:04:30 +00:00
|
|
|
bx.dynamic_alloca(ty, align)
|
2016-12-31 23:00:24 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 16:04:30 +00:00
|
|
|
fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2019-09-12 16:04:30 +00:00
|
|
|
let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
|
2018-09-08 22:16:45 +00:00
|
|
|
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
|
2017-01-14 22:49:29 +00:00
|
|
|
alloca
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value {
|
2018-05-28 15:07:23 +00:00
|
|
|
unsafe {
|
2019-09-12 16:04:30 +00:00
|
|
|
let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
|
2018-09-08 22:16:45 +00:00
|
|
|
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
|
2018-05-28 15:07:23 +00:00
|
|
|
alloca
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-08 22:16:45 +00:00
|
|
|
fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2019-05-29 01:46:55 +00:00
|
|
|
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
|
2018-09-08 22:16:45 +00:00
|
|
|
llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
|
2017-02-06 16:27:09 +00:00
|
|
|
load
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
|
2013-12-28 14:01:53 +00:00
|
|
|
unsafe {
|
2019-05-29 01:46:55 +00:00
|
|
|
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
|
2019-05-28 22:22:37 +00:00
|
|
|
llvm::LLVMSetVolatile(load, llvm::True);
|
|
|
|
load
|
2013-12-28 14:01:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-21 16:08:20 +00:00
|
|
|
fn atomic_load(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-23 13:23:48 +00:00
|
|
|
ptr: &'ll Value,
|
2018-10-01 16:07:04 +00:00
|
|
|
order: rustc_codegen_ssa::common::AtomicOrdering,
|
2018-08-21 16:08:20 +00:00
|
|
|
size: Size,
|
2018-08-23 13:23:48 +00:00
|
|
|
) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2018-08-21 16:08:20 +00:00
|
|
|
let load = llvm::LLVMRustBuildAtomicLoad(
|
|
|
|
self.llbuilder,
|
|
|
|
ptr,
|
2019-05-29 01:46:55 +00:00
|
|
|
UNNAMED,
|
2018-08-21 16:08:20 +00:00
|
|
|
AtomicOrdering::from_generic(order),
|
|
|
|
);
|
2018-09-08 11:50:19 +00:00
|
|
|
// LLVM requires the alignment of atomic loads to be at least the size of the type.
|
|
|
|
llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
|
2017-06-01 18:50:53 +00:00
|
|
|
load
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> {
|
2018-09-14 15:48:57 +00:00
|
|
|
debug!("PlaceRef::load: {:?}", place);
|
|
|
|
|
|
|
|
assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
|
|
|
|
|
|
|
|
if place.layout.is_zst() {
|
2019-03-01 14:03:48 +00:00
|
|
|
return OperandRef::new_zst(self, place.layout);
|
2018-09-14 15:48:57 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn scalar_load_metadata<'a, 'll, 'tcx>(
|
|
|
|
bx: &mut Builder<'a, 'll, 'tcx>,
|
|
|
|
load: &'ll Value,
|
2020-03-31 16:16:47 +00:00
|
|
|
scalar: &abi::Scalar,
|
2018-10-05 13:08:49 +00:00
|
|
|
) {
|
2018-09-14 15:48:57 +00:00
|
|
|
let vr = scalar.valid_range.clone();
|
|
|
|
match scalar.value {
|
2020-03-31 16:16:47 +00:00
|
|
|
abi::Int(..) => {
|
2018-11-27 18:00:25 +00:00
|
|
|
let range = scalar.valid_range_exclusive(bx);
|
2018-09-14 15:48:57 +00:00
|
|
|
if range.start != range.end {
|
2018-10-05 13:08:49 +00:00
|
|
|
bx.range_metadata(load, range);
|
2018-09-14 15:48:57 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-31 16:16:47 +00:00
|
|
|
abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
|
2018-10-05 13:08:49 +00:00
|
|
|
bx.nonnull_metadata(load);
|
2018-09-14 15:48:57 +00:00
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
2018-10-05 13:08:49 +00:00
|
|
|
}
|
2018-09-14 15:48:57 +00:00
|
|
|
|
|
|
|
let val = if let Some(llextra) = place.llextra {
|
|
|
|
OperandValue::Ref(place.llval, Some(llextra), place.align)
|
|
|
|
} else if place.layout.is_llvm_immediate() {
|
|
|
|
let mut const_llval = None;
|
|
|
|
unsafe {
|
|
|
|
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
|
|
|
|
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
|
|
|
|
const_llval = llvm::LLVMGetInitializer(global);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let llval = const_llval.unwrap_or_else(|| {
|
|
|
|
let load = self.load(place.llval, place.align);
|
2020-03-31 16:16:47 +00:00
|
|
|
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
|
2018-10-05 13:08:49 +00:00
|
|
|
scalar_load_metadata(self, load, scalar);
|
2018-09-14 15:48:57 +00:00
|
|
|
}
|
|
|
|
load
|
|
|
|
});
|
2020-08-29 16:10:01 +00:00
|
|
|
OperandValue::Immediate(self.to_immediate(llval, place.layout))
|
2020-03-31 16:16:47 +00:00
|
|
|
} else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
|
2018-11-28 22:37:38 +00:00
|
|
|
let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
|
|
|
|
|
2020-03-31 16:16:47 +00:00
|
|
|
let mut load = |i, scalar: &abi::Scalar, align| {
|
2018-09-14 15:48:57 +00:00
|
|
|
let llptr = self.struct_gep(place.llval, i as u64);
|
2018-11-28 22:37:38 +00:00
|
|
|
let load = self.load(llptr, align);
|
2018-10-05 13:08:49 +00:00
|
|
|
scalar_load_metadata(self, load, scalar);
|
2020-08-29 16:10:01 +00:00
|
|
|
self.to_immediate_scalar(load, scalar)
|
2018-09-14 15:48:57 +00:00
|
|
|
};
|
2018-11-28 22:37:38 +00:00
|
|
|
|
|
|
|
OperandValue::Pair(
|
|
|
|
load(0, a, place.align),
|
|
|
|
load(1, b, place.align.restrict_for_offset(b_offset)),
|
|
|
|
)
|
2018-09-14 15:48:57 +00:00
|
|
|
} else {
|
|
|
|
OperandValue::Ref(place.llval, None, place.align)
|
|
|
|
};
|
|
|
|
|
|
|
|
OperandRef { val, layout: place.layout }
|
|
|
|
}
|
|
|
|
|
2018-12-08 10:48:43 +00:00
|
|
|
fn write_operand_repeatedly(
|
|
|
|
mut self,
|
|
|
|
cg_elem: OperandRef<'tcx, &'ll Value>,
|
|
|
|
count: u64,
|
|
|
|
dest: PlaceRef<'tcx, &'ll Value>,
|
|
|
|
) -> Self {
|
|
|
|
let zero = self.const_usize(0);
|
|
|
|
let count = self.const_usize(count);
|
|
|
|
let start = dest.project_index(&mut self, zero).llval;
|
|
|
|
let end = dest.project_index(&mut self, count).llval;
|
|
|
|
|
|
|
|
let mut header_bx = self.build_sibling_block("repeat_loop_header");
|
|
|
|
let mut body_bx = self.build_sibling_block("repeat_loop_body");
|
|
|
|
let next_bx = self.build_sibling_block("repeat_loop_next");
|
|
|
|
|
|
|
|
self.br(header_bx.llbb());
|
|
|
|
let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
|
|
|
|
|
|
|
|
let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
|
|
|
|
header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
|
2018-09-14 15:48:57 +00:00
|
|
|
|
2018-12-08 10:48:43 +00:00
|
|
|
let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
|
2019-12-22 22:42:04 +00:00
|
|
|
cg_elem
|
|
|
|
.val
|
|
|
|
.store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
|
2018-12-08 10:48:43 +00:00
|
|
|
|
|
|
|
let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]);
|
|
|
|
body_bx.br(header_bx.llbb());
|
|
|
|
header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
|
|
|
|
|
|
|
|
next_bx
|
|
|
|
}
|
2013-07-21 13:33:40 +00:00
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
|
2020-10-15 09:44:00 +00:00
|
|
|
if self.sess().target.arch == "amdgpu" {
|
2018-07-19 03:05:08 +00:00
|
|
|
// amdgpu/LLVM does something weird and thinks a i64 value is
|
|
|
|
// split into a v2i32, halving the bitwidth LLVM expects,
|
|
|
|
// tripping an assertion. So, for now, just disable this
|
|
|
|
// optimization.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2018-08-29 16:42:25 +00:00
|
|
|
let llty = self.cx.val_ty(load);
|
2017-09-23 12:04:37 +00:00
|
|
|
let v = [
|
2018-09-06 18:57:42 +00:00
|
|
|
self.cx.const_uint_big(llty, range.start),
|
2019-12-22 22:42:04 +00:00
|
|
|
self.cx.const_uint_big(llty, range.end),
|
2017-09-23 12:04:37 +00:00
|
|
|
];
|
2013-12-17 14:49:31 +00:00
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMSetMetadata(
|
|
|
|
load,
|
|
|
|
llvm::MD_range as c_uint,
|
|
|
|
llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
|
|
|
|
);
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn nonnull_metadata(&mut self, load: &'ll Value) {
|
2015-02-02 18:03:23 +00:00
|
|
|
unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMSetMetadata(
|
|
|
|
load,
|
|
|
|
llvm::MD_nonnull as c_uint,
|
|
|
|
llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
|
|
|
|
);
|
2015-02-02 18:03:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-08 22:16:45 +00:00
|
|
|
fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
|
2018-05-11 10:26:32 +00:00
|
|
|
self.store_with_flags(val, ptr, align, MemFlags::empty())
|
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn store_with_flags(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-23 13:23:48 +00:00
|
|
|
val: &'ll Value,
|
|
|
|
ptr: &'ll Value,
|
2018-09-08 22:16:45 +00:00
|
|
|
align: Align,
|
2018-05-11 10:26:32 +00:00
|
|
|
flags: MemFlags,
|
2018-08-23 13:23:48 +00:00
|
|
|
) -> &'ll Value {
|
2018-07-10 10:28:39 +00:00
|
|
|
debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
|
2016-10-12 17:26:06 +00:00
|
|
|
let ptr = self.check_store(val, ptr);
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2016-12-29 01:20:26 +00:00
|
|
|
let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
|
2019-12-22 22:42:04 +00:00
|
|
|
let align =
|
|
|
|
if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
|
2018-07-14 22:28:39 +00:00
|
|
|
llvm::LLVMSetAlignment(store, align);
|
2018-05-11 10:26:32 +00:00
|
|
|
if flags.contains(MemFlags::VOLATILE) {
|
|
|
|
llvm::LLVMSetVolatile(store, llvm::True);
|
|
|
|
}
|
|
|
|
if flags.contains(MemFlags::NONTEMPORAL) {
|
|
|
|
// According to LLVM [1] building a nontemporal store must
|
|
|
|
// *always* point to a metadata value of the integer 1.
|
|
|
|
//
|
|
|
|
// [1]: http://llvm.org/docs/LangRef.html#store-instruction
|
2018-09-06 18:57:42 +00:00
|
|
|
let one = self.cx.const_i32(1);
|
2018-05-11 10:26:32 +00:00
|
|
|
let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
|
|
|
|
llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
|
|
|
|
}
|
2016-12-29 01:20:26 +00:00
|
|
|
store
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
fn atomic_store(
|
|
|
|
&mut self,
|
|
|
|
val: &'ll Value,
|
|
|
|
ptr: &'ll Value,
|
|
|
|
order: rustc_codegen_ssa::common::AtomicOrdering,
|
|
|
|
size: Size,
|
|
|
|
) {
|
2018-07-10 10:28:39 +00:00
|
|
|
debug!("Store {:?} -> {:?}", val, ptr);
|
2016-10-12 17:26:06 +00:00
|
|
|
let ptr = self.check_store(val, ptr);
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2018-08-21 16:08:20 +00:00
|
|
|
let store = llvm::LLVMRustBuildAtomicStore(
|
|
|
|
self.llbuilder,
|
|
|
|
val,
|
|
|
|
ptr,
|
|
|
|
AtomicOrdering::from_generic(order),
|
|
|
|
);
|
2018-09-08 11:50:19 +00:00
|
|
|
// LLVM requires the alignment of atomic stores to be at least the size of the type.
|
|
|
|
llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMBuildGEP(
|
|
|
|
self.llbuilder,
|
|
|
|
ptr,
|
|
|
|
indices.as_ptr(),
|
|
|
|
indices.len() as c_uint,
|
|
|
|
UNNAMED,
|
|
|
|
)
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMBuildInBoundsGEP(
|
2019-12-22 22:42:04 +00:00
|
|
|
self.llbuilder,
|
|
|
|
ptr,
|
|
|
|
indices.as_ptr(),
|
|
|
|
indices.len() as c_uint,
|
|
|
|
UNNAMED,
|
|
|
|
)
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-04 19:20:45 +00:00
|
|
|
fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value {
|
|
|
|
assert_eq!(idx as c_uint as u64, idx);
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED) }
|
2018-12-04 19:20:45 +00:00
|
|
|
}
|
|
|
|
|
2013-07-21 13:33:40 +00:00
|
|
|
/* Casts */
|
2018-10-05 13:08:49 +00:00
|
|
|
fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2020-06-25 15:05:12 +00:00
|
|
|
fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
|
2020-06-27 12:03:32 +00:00
|
|
|
// WebAssembly has saturating floating point to integer casts if the
|
|
|
|
// `nontrapping-fptoint` target feature is activated. We'll use those if
|
|
|
|
// they are available.
|
2020-10-15 09:44:00 +00:00
|
|
|
if self.sess().target.arch == "wasm32"
|
2020-07-08 01:54:27 +00:00
|
|
|
&& self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
|
2020-06-25 15:05:12 +00:00
|
|
|
{
|
|
|
|
let src_ty = self.cx.val_ty(val);
|
|
|
|
let float_width = self.cx.float_width(src_ty);
|
|
|
|
let int_width = self.cx.int_width(dest_ty);
|
|
|
|
let name = match (int_width, float_width) {
|
|
|
|
(32, 32) => Some("llvm.wasm.trunc.saturate.unsigned.i32.f32"),
|
|
|
|
(32, 64) => Some("llvm.wasm.trunc.saturate.unsigned.i32.f64"),
|
|
|
|
(64, 32) => Some("llvm.wasm.trunc.saturate.unsigned.i64.f32"),
|
|
|
|
(64, 64) => Some("llvm.wasm.trunc.saturate.unsigned.i64.f64"),
|
|
|
|
_ => None,
|
|
|
|
};
|
|
|
|
if let Some(name) = name {
|
|
|
|
let intrinsic = self.get_intrinsic(name);
|
|
|
|
return Some(self.call(intrinsic, &[val], None));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
|
|
|
|
|
|
|
fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
|
2020-06-27 12:03:32 +00:00
|
|
|
// WebAssembly has saturating floating point to integer casts if the
|
|
|
|
// `nontrapping-fptoint` target feature is activated. We'll use those if
|
|
|
|
// they are available.
|
2020-10-15 09:44:00 +00:00
|
|
|
if self.sess().target.arch == "wasm32"
|
2020-07-08 01:54:27 +00:00
|
|
|
&& self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
|
2020-06-25 15:05:12 +00:00
|
|
|
{
|
|
|
|
let src_ty = self.cx.val_ty(val);
|
|
|
|
let float_width = self.cx.float_width(src_ty);
|
|
|
|
let int_width = self.cx.int_width(dest_ty);
|
|
|
|
let name = match (int_width, float_width) {
|
|
|
|
(32, 32) => Some("llvm.wasm.trunc.saturate.signed.i32.f32"),
|
|
|
|
(32, 64) => Some("llvm.wasm.trunc.saturate.signed.i32.f64"),
|
|
|
|
(64, 32) => Some("llvm.wasm.trunc.saturate.signed.i64.f32"),
|
|
|
|
(64, 64) => Some("llvm.wasm.trunc.saturate.signed.i64.f64"),
|
|
|
|
_ => None,
|
|
|
|
};
|
|
|
|
if let Some(name) = name {
|
|
|
|
let intrinsic = self.get_intrinsic(name);
|
|
|
|
return Some(self.call(intrinsic, &[val], None));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
|
|
|
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
fn fptosui_may_trap(&self, val: &'ll Value, dest_ty: &'ll Type) -> bool {
|
|
|
|
// Most of the time we'll be generating the `fptosi` or `fptoui`
|
|
|
|
// instruction for floating-point-to-integer conversions. These
|
|
|
|
// instructions by definition in LLVM do not trap. For the WebAssembly
|
|
|
|
// target, however, we'll lower in some cases to intrinsic calls instead
|
|
|
|
// which may trap. If we detect that this is a situation where we'll be
|
|
|
|
// using the intrinsics then we report that the call map trap, which
|
|
|
|
// callers might need to handle.
|
|
|
|
if !self.wasm_and_missing_nontrapping_fptoint() {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
let src_ty = self.cx.val_ty(val);
|
|
|
|
let float_width = self.cx.float_width(src_ty);
|
|
|
|
let int_width = self.cx.int_width(dest_ty);
|
|
|
|
match (int_width, float_width) {
|
|
|
|
(32, 32) | (32, 64) | (64, 32) | (64, 64) => true,
|
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
// When we can, use the native wasm intrinsics which have tighter
|
|
|
|
// codegen. Note that this has a semantic difference in that the
|
|
|
|
// intrinsic can trap whereas `fptoui` never traps. That difference,
|
|
|
|
// however, is handled by `fptosui_may_trap` above.
|
2020-08-09 01:09:40 +00:00
|
|
|
//
|
|
|
|
// Note that we skip the wasm intrinsics for vector types where `fptoui`
|
|
|
|
// must be used instead.
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
if self.wasm_and_missing_nontrapping_fptoint() {
|
|
|
|
let src_ty = self.cx.val_ty(val);
|
2020-08-09 01:09:40 +00:00
|
|
|
if self.cx.type_kind(src_ty) != TypeKind::Vector {
|
|
|
|
let float_width = self.cx.float_width(src_ty);
|
|
|
|
let int_width = self.cx.int_width(dest_ty);
|
|
|
|
let name = match (int_width, float_width) {
|
|
|
|
(32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
|
|
|
|
(32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
|
|
|
|
(64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
|
|
|
|
(64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
|
|
|
|
_ => None,
|
|
|
|
};
|
|
|
|
if let Some(name) = name {
|
|
|
|
let intrinsic = self.get_intrinsic(name);
|
|
|
|
return self.call(intrinsic, &[val], None);
|
|
|
|
}
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
if self.wasm_and_missing_nontrapping_fptoint() {
|
|
|
|
let src_ty = self.cx.val_ty(val);
|
2020-08-09 01:09:40 +00:00
|
|
|
if self.cx.type_kind(src_ty) != TypeKind::Vector {
|
|
|
|
let float_width = self.cx.float_width(src_ty);
|
|
|
|
let int_width = self.cx.int_width(dest_ty);
|
|
|
|
let name = match (int_width, float_width) {
|
|
|
|
(32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"),
|
|
|
|
(32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"),
|
|
|
|
(64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"),
|
|
|
|
(64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"),
|
|
|
|
_ => None,
|
|
|
|
};
|
|
|
|
if let Some(name) = name {
|
|
|
|
let intrinsic = self.get_intrinsic(name);
|
|
|
|
return self.call(intrinsic, &[val], None);
|
|
|
|
}
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Comparisons */
|
2018-10-05 13:08:49 +00:00
|
|
|
fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
2018-08-21 14:31:36 +00:00
|
|
|
let op = llvm::IntPredicate::from_generic(op);
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Miscellaneous instructions */
|
2019-12-22 22:42:04 +00:00
|
|
|
fn memcpy(
|
|
|
|
&mut self,
|
|
|
|
dst: &'ll Value,
|
|
|
|
dst_align: Align,
|
|
|
|
src: &'ll Value,
|
|
|
|
src_align: Align,
|
|
|
|
size: &'ll Value,
|
|
|
|
flags: MemFlags,
|
|
|
|
) {
|
2018-09-10 15:59:20 +00:00
|
|
|
if flags.contains(MemFlags::NONTEMPORAL) {
|
|
|
|
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
|
|
|
|
let val = self.load(src, src_align);
|
2018-11-27 18:00:25 +00:00
|
|
|
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
|
2018-09-10 15:59:20 +00:00
|
|
|
self.store_with_flags(val, ptr, dst_align, flags);
|
|
|
|
return;
|
|
|
|
}
|
2018-11-27 18:00:25 +00:00
|
|
|
let size = self.intcast(size, self.type_isize(), false);
|
2018-09-10 15:59:20 +00:00
|
|
|
let is_volatile = flags.contains(MemFlags::VOLATILE);
|
2018-11-27 18:00:25 +00:00
|
|
|
let dst = self.pointercast(dst, self.type_i8p());
|
|
|
|
let src = self.pointercast(src, self.type_i8p());
|
2018-11-02 22:38:16 +00:00
|
|
|
unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMRustBuildMemCpy(
|
|
|
|
self.llbuilder,
|
|
|
|
dst,
|
|
|
|
dst_align.bytes() as c_uint,
|
|
|
|
src,
|
|
|
|
src_align.bytes() as c_uint,
|
|
|
|
size,
|
|
|
|
is_volatile,
|
|
|
|
);
|
2018-11-02 22:38:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
fn memmove(
|
|
|
|
&mut self,
|
|
|
|
dst: &'ll Value,
|
|
|
|
dst_align: Align,
|
|
|
|
src: &'ll Value,
|
|
|
|
src_align: Align,
|
|
|
|
size: &'ll Value,
|
|
|
|
flags: MemFlags,
|
|
|
|
) {
|
2018-09-10 15:59:20 +00:00
|
|
|
if flags.contains(MemFlags::NONTEMPORAL) {
|
|
|
|
// HACK(nox): This is inefficient but there is no nontemporal memmove.
|
|
|
|
let val = self.load(src, src_align);
|
2018-11-27 18:00:25 +00:00
|
|
|
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
|
2018-09-10 15:59:20 +00:00
|
|
|
self.store_with_flags(val, ptr, dst_align, flags);
|
|
|
|
return;
|
|
|
|
}
|
2018-11-27 18:00:25 +00:00
|
|
|
let size = self.intcast(size, self.type_isize(), false);
|
2018-09-10 15:59:20 +00:00
|
|
|
let is_volatile = flags.contains(MemFlags::VOLATILE);
|
2018-11-27 18:00:25 +00:00
|
|
|
let dst = self.pointercast(dst, self.type_i8p());
|
|
|
|
let src = self.pointercast(src, self.type_i8p());
|
2018-11-02 22:38:16 +00:00
|
|
|
unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMRustBuildMemMove(
|
|
|
|
self.llbuilder,
|
|
|
|
dst,
|
|
|
|
dst_align.bytes() as c_uint,
|
|
|
|
src,
|
|
|
|
src_align.bytes() as c_uint,
|
|
|
|
size,
|
|
|
|
is_volatile,
|
|
|
|
);
|
2018-11-02 22:38:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-10 15:59:20 +00:00
|
|
|
fn memset(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-09-10 15:59:20 +00:00
|
|
|
ptr: &'ll Value,
|
|
|
|
fill_byte: &'ll Value,
|
|
|
|
size: &'ll Value,
|
2018-09-08 22:16:45 +00:00
|
|
|
align: Align,
|
2018-09-10 15:59:20 +00:00
|
|
|
flags: MemFlags,
|
|
|
|
) {
|
2020-01-05 21:32:15 +00:00
|
|
|
let is_volatile = flags.contains(MemFlags::VOLATILE);
|
2018-11-27 18:00:25 +00:00
|
|
|
let ptr = self.pointercast(ptr, self.type_i8p());
|
2020-01-05 21:32:15 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMRustBuildMemSet(
|
|
|
|
self.llbuilder,
|
|
|
|
ptr,
|
|
|
|
align.bytes() as c_uint,
|
|
|
|
fill_byte,
|
|
|
|
size,
|
|
|
|
is_volatile,
|
|
|
|
);
|
|
|
|
}
|
2018-09-10 15:59:20 +00:00
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn select(
|
2019-12-22 22:42:04 +00:00
|
|
|
&mut self,
|
|
|
|
cond: &'ll Value,
|
2018-08-23 13:23:48 +00:00
|
|
|
then_val: &'ll Value,
|
|
|
|
else_val: &'ll Value,
|
|
|
|
) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-12-04 19:20:45 +00:00
|
|
|
fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
|
2018-12-04 19:20:45 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2018-08-29 16:42:25 +00:00
|
|
|
let elt_ty = self.cx.val_ty(elt);
|
2018-11-27 18:00:25 +00:00
|
|
|
let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
|
2018-09-06 18:57:42 +00:00
|
|
|
let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
|
2018-11-27 18:00:25 +00:00
|
|
|
let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
|
|
|
|
self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
|
2017-06-25 09:41:24 +00:00
|
|
|
assert_eq!(idx as c_uint as u64, idx);
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value {
|
2017-06-25 09:41:24 +00:00
|
|
|
assert_eq!(idx as c_uint as u64, idx);
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
fn landing_pad(
|
|
|
|
&mut self,
|
|
|
|
ty: &'ll Type,
|
|
|
|
pers_fn: &'ll Value,
|
|
|
|
num_clauses: usize,
|
|
|
|
) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED)
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn set_cleanup(&mut self, landing_pad: &'ll Value) {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2014-07-08 00:58:01 +00:00
|
|
|
llvm::LLVMSetCleanup(landing_pad, llvm::True);
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn resume(&mut self, exn: &'ll Value) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
|
2018-08-07 14:03:57 +00:00
|
|
|
let name = const_cstr!("cleanuppad");
|
2015-10-24 01:18:44 +00:00
|
|
|
let ret = unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMRustBuildCleanupPad(
|
|
|
|
self.llbuilder,
|
|
|
|
parent,
|
|
|
|
args.len() as c_uint,
|
|
|
|
args.as_ptr(),
|
|
|
|
name.as_ptr(),
|
|
|
|
)
|
2015-10-24 01:18:44 +00:00
|
|
|
};
|
2018-11-13 10:51:42 +00:00
|
|
|
Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
|
2015-10-24 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn cleanup_ret(
|
2019-12-22 22:42:04 +00:00
|
|
|
&mut self,
|
|
|
|
funclet: &Funclet<'ll>,
|
2018-08-23 13:23:48 +00:00
|
|
|
unwind: Option<&'ll BasicBlock>,
|
|
|
|
) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
let ret =
|
|
|
|
unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) };
|
2018-07-10 10:28:39 +00:00
|
|
|
ret.expect("LLVM does not have support for cleanupret")
|
2015-10-24 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
|
2018-08-07 14:03:57 +00:00
|
|
|
let name = const_cstr!("catchpad");
|
2015-10-24 01:18:44 +00:00
|
|
|
let ret = unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMRustBuildCatchPad(
|
|
|
|
self.llbuilder,
|
|
|
|
parent,
|
|
|
|
args.len() as c_uint,
|
|
|
|
args.as_ptr(),
|
|
|
|
name.as_ptr(),
|
|
|
|
)
|
2015-10-24 01:18:44 +00:00
|
|
|
};
|
2018-11-13 10:51:42 +00:00
|
|
|
Funclet::new(ret.expect("LLVM does not have support for catchpad"))
|
2015-10-24 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2018-08-07 15:14:40 +00:00
|
|
|
fn catch_switch(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-23 13:23:48 +00:00
|
|
|
parent: Option<&'ll Value>,
|
|
|
|
unwind: Option<&'ll BasicBlock>,
|
2018-07-10 10:28:39 +00:00
|
|
|
num_handlers: usize,
|
2018-08-23 13:23:48 +00:00
|
|
|
) -> &'ll Value {
|
2018-08-07 14:03:57 +00:00
|
|
|
let name = const_cstr!("catchswitch");
|
2015-10-24 01:18:44 +00:00
|
|
|
let ret = unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMRustBuildCatchSwitch(
|
|
|
|
self.llbuilder,
|
|
|
|
parent,
|
|
|
|
unwind,
|
|
|
|
num_handlers as c_uint,
|
|
|
|
name.as_ptr(),
|
|
|
|
)
|
2015-10-24 01:18:44 +00:00
|
|
|
};
|
2018-07-10 10:28:39 +00:00
|
|
|
ret.expect("LLVM does not have support for catchswitch")
|
2015-10-24 01:18:44 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
|
2015-10-24 01:18:44 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMRustAddHandler(catch_switch, handler);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn set_personality_fn(&mut self, personality: &'ll Value) {
|
2015-10-24 01:18:44 +00:00
|
|
|
unsafe {
|
2017-01-26 15:51:10 +00:00
|
|
|
llvm::LLVMSetPersonalityFn(self.llfn(), personality);
|
2015-10-24 01:18:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-21 13:33:40 +00:00
|
|
|
// Atomic Operations
|
2018-08-07 15:14:40 +00:00
|
|
|
fn atomic_cmpxchg(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-23 13:23:48 +00:00
|
|
|
dst: &'ll Value,
|
|
|
|
cmp: &'ll Value,
|
|
|
|
src: &'ll Value,
|
2018-10-01 16:07:04 +00:00
|
|
|
order: rustc_codegen_ssa::common::AtomicOrdering,
|
|
|
|
failure_order: rustc_codegen_ssa::common::AtomicOrdering,
|
2018-08-20 16:16:51 +00:00
|
|
|
weak: bool,
|
2018-08-23 13:23:48 +00:00
|
|
|
) -> &'ll Value {
|
2018-08-20 16:16:51 +00:00
|
|
|
let weak = if weak { llvm::True } else { llvm::False };
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2018-08-21 16:08:20 +00:00
|
|
|
llvm::LLVMRustBuildAtomicCmpXchg(
|
|
|
|
self.llbuilder,
|
|
|
|
dst,
|
|
|
|
cmp,
|
|
|
|
src,
|
|
|
|
AtomicOrdering::from_generic(order),
|
|
|
|
AtomicOrdering::from_generic(failure_order),
|
2019-12-22 22:42:04 +00:00
|
|
|
weak,
|
2018-08-21 16:08:20 +00:00
|
|
|
)
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
2018-08-07 15:14:40 +00:00
|
|
|
fn atomic_rmw(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-10-01 16:07:04 +00:00
|
|
|
op: rustc_codegen_ssa::common::AtomicRmwBinOp,
|
2018-08-23 13:23:48 +00:00
|
|
|
dst: &'ll Value,
|
|
|
|
src: &'ll Value,
|
2018-10-01 16:07:04 +00:00
|
|
|
order: rustc_codegen_ssa::common::AtomicOrdering,
|
2018-08-23 13:23:48 +00:00
|
|
|
) -> &'ll Value {
|
2013-07-21 13:33:40 +00:00
|
|
|
unsafe {
|
2018-08-21 15:54:12 +00:00
|
|
|
llvm::LLVMBuildAtomicRMW(
|
|
|
|
self.llbuilder,
|
|
|
|
AtomicRmwBinOp::from_generic(op),
|
|
|
|
dst,
|
|
|
|
src,
|
2018-08-21 16:08:20 +00:00
|
|
|
AtomicOrdering::from_generic(order),
|
2019-12-22 22:42:04 +00:00
|
|
|
False,
|
|
|
|
)
|
2013-07-21 13:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
2013-07-28 07:48:16 +00:00
|
|
|
|
2018-09-28 10:18:03 +00:00
|
|
|
fn atomic_fence(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-10-01 16:07:04 +00:00
|
|
|
order: rustc_codegen_ssa::common::AtomicOrdering,
|
2019-12-22 22:42:04 +00:00
|
|
|
scope: rustc_codegen_ssa::common::SynchronizationScope,
|
2018-09-28 10:18:03 +00:00
|
|
|
) {
|
2013-07-28 07:48:16 +00:00
|
|
|
unsafe {
|
2018-08-21 16:08:20 +00:00
|
|
|
llvm::LLVMRustBuildAtomicFence(
|
|
|
|
self.llbuilder,
|
|
|
|
AtomicOrdering::from_generic(order),
|
2019-12-22 22:42:04 +00:00
|
|
|
SynchronizationScope::from_generic(scope),
|
2018-08-21 16:08:20 +00:00
|
|
|
);
|
2013-07-28 07:48:16 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-20 23:43:01 +00:00
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn set_invariant_load(&mut self, load: &'ll Value) {
|
2017-02-21 08:08:06 +00:00
|
|
|
unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMSetMetadata(
|
|
|
|
load,
|
|
|
|
llvm::MD_invariant_load as c_uint,
|
|
|
|
llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
|
|
|
|
);
|
2017-02-21 08:08:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
|
2020-01-05 22:27:05 +00:00
|
|
|
self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
|
2017-06-01 18:50:53 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
|
2020-01-05 22:27:05 +00:00
|
|
|
self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
|
2017-06-01 18:50:53 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 04:19:34 +00:00
|
|
|
fn instrprof_increment(
|
|
|
|
&mut self,
|
|
|
|
fn_name: &'ll Value,
|
|
|
|
hash: &'ll Value,
|
|
|
|
num_counters: &'ll Value,
|
|
|
|
index: &'ll Value,
|
2020-08-15 11:42:13 +00:00
|
|
|
) {
|
2020-06-04 04:19:34 +00:00
|
|
|
debug!(
|
|
|
|
"instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
|
|
|
|
fn_name, hash, num_counters, index
|
|
|
|
);
|
|
|
|
|
2020-07-02 18:27:15 +00:00
|
|
|
let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
|
2020-06-04 04:19:34 +00:00
|
|
|
let args = &[fn_name, hash, num_counters, index];
|
|
|
|
let args = self.check_call("call", llfn, args);
|
|
|
|
|
|
|
|
unsafe {
|
2020-08-15 11:42:13 +00:00
|
|
|
let _ = llvm::LLVMRustBuildCall(
|
2020-06-04 04:19:34 +00:00
|
|
|
self.llbuilder,
|
|
|
|
llfn,
|
|
|
|
args.as_ptr() as *const &llvm::Value,
|
|
|
|
args.len() as c_uint,
|
|
|
|
None,
|
2020-08-15 11:42:13 +00:00
|
|
|
);
|
2020-06-04 04:19:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-13 10:51:42 +00:00
|
|
|
fn call(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-11-13 10:51:42 +00:00
|
|
|
llfn: &'ll Value,
|
|
|
|
args: &[&'ll Value],
|
|
|
|
funclet: Option<&Funclet<'ll>>,
|
|
|
|
) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
debug!("call {:?} with args ({:?})", llfn, args);
|
2018-08-07 15:14:40 +00:00
|
|
|
|
|
|
|
let args = self.check_call("call", llfn, args);
|
2018-09-20 13:47:22 +00:00
|
|
|
let bundle = funclet.map(|funclet| funclet.bundle());
|
2018-08-22 15:48:32 +00:00
|
|
|
let bundle = bundle.as_ref().map(|b| &*b.raw);
|
2018-08-07 15:14:40 +00:00
|
|
|
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMRustBuildCall(
|
|
|
|
self.llbuilder,
|
|
|
|
llfn,
|
|
|
|
args.as_ptr() as *const &llvm::Value,
|
|
|
|
args.len() as c_uint,
|
2019-12-22 22:42:04 +00:00
|
|
|
bundle,
|
2018-08-07 15:14:40 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
|
2018-08-07 15:14:40 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 13:47:22 +00:00
|
|
|
fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
|
2018-11-07 10:08:41 +00:00
|
|
|
self.cx
|
2018-08-07 15:14:40 +00:00
|
|
|
}
|
2018-09-20 13:47:22 +00:00
|
|
|
|
2018-11-16 11:33:28 +00:00
|
|
|
unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) {
|
|
|
|
llvm::LLVMDeleteBasicBlock(bb);
|
2018-09-20 13:47:22 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn do_not_inline(&mut self, llret: &'ll Value) {
|
2018-09-20 13:47:22 +00:00
|
|
|
llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
|
|
|
|
}
|
2016-04-20 23:43:01 +00:00
|
|
|
}
|
2018-11-24 15:36:41 +00:00
|
|
|
|
2019-06-11 09:49:10 +00:00
|
|
|
impl StaticBuilderMethods for Builder<'a, 'll, 'tcx> {
|
|
|
|
fn get_static(&mut self, def_id: DefId) -> &'ll Value {
|
2019-03-01 14:05:18 +00:00
|
|
|
// Forward to the `get_static` method of `CodegenCx`
|
2018-11-26 17:36:58 +00:00
|
|
|
self.cx().get_static(def_id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-24 15:36:41 +00:00
|
|
|
impl Builder<'a, 'll, 'tcx> {
|
2018-12-04 19:20:45 +00:00
|
|
|
pub fn llfn(&self) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
|
2018-12-04 19:20:45 +00:00
|
|
|
}
|
|
|
|
|
2018-12-02 17:54:46 +00:00
|
|
|
fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn insert_element(
|
2019-12-22 22:42:04 +00:00
|
|
|
&mut self,
|
|
|
|
vec: &'ll Value,
|
2018-12-02 17:54:46 +00:00
|
|
|
elt: &'ll Value,
|
|
|
|
idx: &'ll Value,
|
|
|
|
) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
|
2018-12-02 17:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn shuffle_vector(
|
|
|
|
&mut self,
|
|
|
|
v1: &'ll Value,
|
|
|
|
v2: &'ll Value,
|
|
|
|
mask: &'ll Value,
|
|
|
|
) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
|
2018-12-02 17:54:46 +00:00
|
|
|
}
|
|
|
|
|
2019-07-20 11:05:37 +00:00
|
|
|
pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
|
|
|
|
}
|
|
|
|
pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
|
|
|
|
}
|
2018-12-02 17:54:46 +00:00
|
|
|
pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe {
|
|
|
|
let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
|
|
|
|
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
|
|
|
|
instr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe {
|
|
|
|
let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
|
|
|
|
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
|
|
|
|
instr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
|
|
|
|
}
|
|
|
|
pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
|
|
|
|
}
|
|
|
|
pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
|
|
|
|
}
|
|
|
|
pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
|
|
|
|
}
|
|
|
|
pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
|
|
|
|
}
|
|
|
|
pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false)
|
|
|
|
}
|
2018-12-02 17:54:46 +00:00
|
|
|
}
|
|
|
|
pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false)
|
|
|
|
}
|
2018-12-02 17:54:46 +00:00
|
|
|
}
|
|
|
|
pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
let instr =
|
|
|
|
llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
|
2018-12-02 17:54:46 +00:00
|
|
|
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
|
|
|
|
instr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
|
|
|
|
unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
let instr =
|
|
|
|
llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
|
2018-12-02 17:54:46 +00:00
|
|
|
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
|
|
|
|
instr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
|
|
|
|
}
|
|
|
|
pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
|
|
|
|
unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMAddClause(landing_pad, clause);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
let ret =
|
|
|
|
unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) };
|
2018-12-02 17:54:46 +00:00
|
|
|
ret.expect("LLVM does not have support for catchret")
|
|
|
|
}
|
|
|
|
|
2019-06-12 12:59:10 +00:00
|
|
|
fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
|
2018-12-02 17:54:46 +00:00
|
|
|
let dest_ptr_ty = self.cx.val_ty(ptr);
|
|
|
|
let stored_ty = self.cx.val_ty(val);
|
|
|
|
let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
|
|
|
|
|
|
|
|
assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
|
|
|
|
|
|
|
|
if dest_ptr_ty == stored_ptr_ty {
|
|
|
|
ptr
|
|
|
|
} else {
|
2019-12-22 22:42:04 +00:00
|
|
|
debug!(
|
|
|
|
"type mismatch in store. \
|
2018-12-02 17:54:46 +00:00
|
|
|
Expected {:?}, got {:?}; inserting bitcast",
|
2019-12-22 22:42:04 +00:00
|
|
|
dest_ptr_ty, stored_ptr_ty
|
|
|
|
);
|
2018-12-02 17:54:46 +00:00
|
|
|
self.bitcast(ptr, stored_ptr_ty)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
fn check_call<'b>(
|
|
|
|
&mut self,
|
|
|
|
typ: &str,
|
|
|
|
llfn: &'ll Value,
|
|
|
|
args: &'b [&'ll Value],
|
|
|
|
) -> Cow<'b, [&'ll Value]> {
|
2018-12-02 17:54:46 +00:00
|
|
|
let mut fn_ty = self.cx.val_ty(llfn);
|
|
|
|
// Strip off pointers
|
|
|
|
while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
|
|
|
|
fn_ty = self.cx.element_type(fn_ty);
|
|
|
|
}
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
assert!(
|
|
|
|
self.cx.type_kind(fn_ty) == TypeKind::Function,
|
|
|
|
"builder::{} not passed a function, but {:?}",
|
|
|
|
typ,
|
|
|
|
fn_ty
|
|
|
|
);
|
2018-12-02 17:54:46 +00:00
|
|
|
|
|
|
|
let param_tys = self.cx.func_params_types(fn_ty);
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
let all_args_match = param_tys
|
|
|
|
.iter()
|
2018-12-02 17:54:46 +00:00
|
|
|
.zip(args.iter().map(|&v| self.val_ty(v)))
|
|
|
|
.all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
|
|
|
|
|
|
|
|
if all_args_match {
|
|
|
|
return Cow::Borrowed(args);
|
|
|
|
}
|
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
let casted_args: Vec<_> = param_tys
|
|
|
|
.into_iter()
|
2018-12-02 17:54:46 +00:00
|
|
|
.zip(args.iter())
|
|
|
|
.enumerate()
|
|
|
|
.map(|(i, (expected_ty, &actual_val))| {
|
|
|
|
let actual_ty = self.val_ty(actual_val);
|
|
|
|
if expected_ty != actual_ty {
|
2019-12-22 22:42:04 +00:00
|
|
|
debug!(
|
|
|
|
"type mismatch in function call of {:?}. \
|
2018-12-02 17:54:46 +00:00
|
|
|
Expected {:?} for param {}, got {:?}; injecting bitcast",
|
2019-12-22 22:42:04 +00:00
|
|
|
llfn, expected_ty, i, actual_ty
|
|
|
|
);
|
2018-12-02 17:54:46 +00:00
|
|
|
self.bitcast(actual_val, expected_ty)
|
|
|
|
} else {
|
|
|
|
actual_val
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
Cow::Owned(casted_args)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
|
2019-12-22 22:42:04 +00:00
|
|
|
unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
|
2018-12-02 17:54:46 +00:00
|
|
|
}
|
|
|
|
|
2018-11-24 15:36:41 +00:00
|
|
|
fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
|
2020-01-27 00:00:00 +00:00
|
|
|
let size = size.bytes();
|
|
|
|
if size == 0 {
|
2018-11-24 15:36:41 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-13 00:00:00 +00:00
|
|
|
if !self.cx().sess().emit_lifetime_markers() {
|
2018-11-24 15:36:41 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
|
|
|
|
|
|
|
|
let ptr = self.pointercast(ptr, self.cx.type_i8p());
|
|
|
|
self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
|
|
|
|
}
|
2018-12-08 10:48:43 +00:00
|
|
|
|
2020-06-30 08:57:59 +00:00
|
|
|
pub(crate) fn phi(
|
|
|
|
&mut self,
|
|
|
|
ty: &'ll Type,
|
|
|
|
vals: &[&'ll Value],
|
|
|
|
bbs: &[&'ll BasicBlock],
|
|
|
|
) -> &'ll Value {
|
2018-12-08 10:48:43 +00:00
|
|
|
assert_eq!(vals.len(), bbs.len());
|
2019-12-22 22:42:04 +00:00
|
|
|
let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) };
|
2018-12-08 10:48:43 +00:00
|
|
|
unsafe {
|
2019-12-22 22:42:04 +00:00
|
|
|
llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint);
|
2018-12-08 10:48:43 +00:00
|
|
|
phi
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
|
|
|
|
}
|
|
|
|
}
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
|
|
|
|
fn wasm_and_missing_nontrapping_fptoint(&self) -> bool {
|
2020-10-15 09:44:00 +00:00
|
|
|
self.sess().target.arch == "wasm32"
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 19:20:42 +00:00
|
|
|
&& !self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
|
|
|
|
}
|
2018-11-24 15:36:41 +00:00
|
|
|
}
|