2018-09-20 13:47:22 +00:00
|
|
|
use super::abi::AbiBuilderMethods;
|
|
|
|
use super::asm::AsmBuilderMethods;
|
2021-12-30 01:18:44 +00:00
|
|
|
use super::consts::ConstMethods;
|
2020-06-22 06:29:08 +00:00
|
|
|
use super::coverageinfo::CoverageInfoBuilderMethods;
|
2018-09-20 13:47:22 +00:00
|
|
|
use super::debuginfo::DebugInfoBuilderMethods;
|
|
|
|
use super::intrinsic::IntrinsicCallMethods;
|
2021-12-30 01:18:44 +00:00
|
|
|
use super::misc::MiscMethods;
|
|
|
|
use super::type_::{ArgAbiMethods, BaseTypeMethods};
|
2018-11-26 17:36:58 +00:00
|
|
|
use super::{HasCodegen, StaticBuilderMethods};
|
2019-05-17 01:20:14 +00:00
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
use crate::common::{
|
2021-12-30 01:18:44 +00:00
|
|
|
AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
|
2019-12-22 22:42:04 +00:00
|
|
|
};
|
2019-02-09 14:31:47 +00:00
|
|
|
use crate::mir::operand::OperandRef;
|
|
|
|
use crate::mir::place::PlaceRef;
|
|
|
|
use crate::MemFlags;
|
2019-05-17 01:20:14 +00:00
|
|
|
|
2021-12-30 01:18:44 +00:00
|
|
|
use rustc_apfloat::{ieee, Float, Round, Status};
|
2020-08-29 16:10:01 +00:00
|
|
|
use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout};
|
2020-03-29 14:41:09 +00:00
|
|
|
use rustc_middle::ty::Ty;
|
2020-10-06 13:39:12 +00:00
|
|
|
use rustc_span::Span;
|
2021-08-25 13:21:45 +00:00
|
|
|
use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
|
2019-05-17 01:20:14 +00:00
|
|
|
use rustc_target::spec::HasTargetSpec;
|
|
|
|
|
2018-11-24 15:44:17 +00:00
|
|
|
#[derive(Copy, Clone)]
|
|
|
|
pub enum OverflowOp {
|
|
|
|
Add,
|
|
|
|
Sub,
|
|
|
|
Mul,
|
|
|
|
}
|
|
|
|
|
2019-06-14 16:39:39 +00:00
|
|
|
pub trait BuilderMethods<'a, 'tcx>:
|
2018-09-20 13:47:22 +00:00
|
|
|
HasCodegen<'tcx>
|
2020-06-22 06:29:08 +00:00
|
|
|
+ CoverageInfoBuilderMethods<'tcx>
|
2020-01-26 16:50:13 +00:00
|
|
|
+ DebugInfoBuilderMethods
|
2019-10-29 17:29:42 +00:00
|
|
|
+ ArgAbiMethods<'tcx>
|
2018-09-20 13:47:22 +00:00
|
|
|
+ AbiBuilderMethods<'tcx>
|
|
|
|
+ IntrinsicCallMethods<'tcx>
|
|
|
|
+ AsmBuilderMethods<'tcx>
|
2019-06-11 09:49:10 +00:00
|
|
|
+ StaticBuilderMethods
|
2019-05-04 09:32:22 +00:00
|
|
|
+ HasParamEnv<'tcx>
|
2019-05-14 15:53:01 +00:00
|
|
|
+ HasTargetSpec
|
2018-09-20 13:47:22 +00:00
|
|
|
{
|
2021-05-06 15:57:04 +00:00
|
|
|
fn build(cx: &'a Self::CodegenCx, llbb: Self::BasicBlock) -> Self;
|
|
|
|
|
2018-09-20 13:47:22 +00:00
|
|
|
fn cx(&self) -> &Self::CodegenCx;
|
2018-08-22 16:57:31 +00:00
|
|
|
fn llbb(&self) -> Self::BasicBlock;
|
2021-05-06 15:57:04 +00:00
|
|
|
|
2020-10-17 11:28:58 +00:00
|
|
|
fn set_span(&mut self, span: Span);
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2021-05-06 15:57:04 +00:00
|
|
|
// FIXME(eddyb) replace uses of this with `append_sibling_block`.
|
|
|
|
fn append_block(cx: &'a Self::CodegenCx, llfn: Self::Function, name: &str) -> Self::BasicBlock;
|
|
|
|
|
|
|
|
fn append_sibling_block(&mut self, name: &str) -> Self::BasicBlock;
|
|
|
|
|
|
|
|
// FIXME(eddyb) replace with callers using `append_sibling_block`.
|
|
|
|
fn build_sibling_block(&mut self, name: &str) -> Self;
|
|
|
|
|
2018-10-04 13:23:10 +00:00
|
|
|
fn ret_void(&mut self);
|
|
|
|
fn ret(&mut self, v: Self::Value);
|
|
|
|
fn br(&mut self, dest: Self::BasicBlock);
|
|
|
|
fn cond_br(
|
|
|
|
&mut self,
|
|
|
|
cond: Self::Value,
|
|
|
|
then_llbb: Self::BasicBlock,
|
|
|
|
else_llbb: Self::BasicBlock,
|
|
|
|
);
|
2018-10-05 13:08:49 +00:00
|
|
|
fn switch(
|
|
|
|
&mut self,
|
|
|
|
v: Self::Value,
|
|
|
|
else_llbb: Self::BasicBlock,
|
2020-09-24 17:10:34 +00:00
|
|
|
cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock)>,
|
2018-12-08 17:42:31 +00:00
|
|
|
);
|
2018-08-07 15:14:40 +00:00
|
|
|
fn invoke(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2021-08-03 22:09:57 +00:00
|
|
|
llty: Self::Type,
|
2018-08-22 16:57:31 +00:00
|
|
|
llfn: Self::Value,
|
|
|
|
args: &[Self::Value],
|
|
|
|
then: Self::BasicBlock,
|
|
|
|
catch: Self::BasicBlock,
|
2018-11-13 10:51:42 +00:00
|
|
|
funclet: Option<&Self::Funclet>,
|
2018-08-22 16:57:31 +00:00
|
|
|
) -> Self::Value;
|
2018-10-05 13:08:49 +00:00
|
|
|
fn unreachable(&mut self);
|
2018-12-08 17:42:31 +00:00
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
2019-06-03 10:59:17 +00:00
|
|
|
fn unchecked_sadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn unchecked_uadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn unchecked_ssub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn unchecked_usub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn unchecked_smul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn unchecked_umul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
2018-10-05 13:08:49 +00:00
|
|
|
fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn neg(&mut self, v: Self::Value) -> Self::Value;
|
|
|
|
fn fneg(&mut self, v: Self::Value) -> Self::Value;
|
|
|
|
fn not(&mut self, v: Self::Value) -> Self::Value;
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2018-11-24 15:44:17 +00:00
|
|
|
fn checked_binop(
|
|
|
|
&mut self,
|
|
|
|
oop: OverflowOp,
|
2019-02-25 07:52:46 +00:00
|
|
|
ty: Ty<'_>,
|
2018-11-24 15:44:17 +00:00
|
|
|
lhs: Self::Value,
|
|
|
|
rhs: Self::Value,
|
|
|
|
) -> (Self::Value, Self::Value);
|
|
|
|
|
2020-08-29 16:10:01 +00:00
|
|
|
fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
|
|
|
|
fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
|
2021-08-29 09:06:55 +00:00
|
|
|
if let Abi::Scalar(scalar) = layout.abi {
|
2020-08-29 16:10:01 +00:00
|
|
|
self.to_immediate_scalar(val, scalar)
|
|
|
|
} else {
|
|
|
|
val
|
|
|
|
}
|
|
|
|
}
|
2021-08-29 09:06:55 +00:00
|
|
|
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
|
2020-08-29 16:10:01 +00:00
|
|
|
|
2019-09-12 16:04:30 +00:00
|
|
|
fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
|
|
|
|
fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
|
2019-12-22 22:42:04 +00:00
|
|
|
fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2021-07-04 16:53:04 +00:00
|
|
|
fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
|
|
|
|
fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
|
2021-07-04 15:49:51 +00:00
|
|
|
fn atomic_load(
|
|
|
|
&mut self,
|
|
|
|
ty: Self::Type,
|
|
|
|
ptr: Self::Value,
|
|
|
|
order: AtomicOrdering,
|
|
|
|
size: Size,
|
|
|
|
) -> Self::Value;
|
2018-10-05 13:08:49 +00:00
|
|
|
fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
|
2019-12-22 22:42:04 +00:00
|
|
|
-> OperandRef<'tcx, Self::Value>;
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2019-12-22 22:42:04 +00:00
|
|
|
/// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
|
2018-12-08 10:48:43 +00:00
|
|
|
fn write_operand_repeatedly(
|
|
|
|
self,
|
|
|
|
elem: OperandRef<'tcx, Self::Value>,
|
|
|
|
count: u64,
|
|
|
|
dest: PlaceRef<'tcx, Self::Value>,
|
|
|
|
) -> Self;
|
|
|
|
|
2021-08-29 09:06:55 +00:00
|
|
|
fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
|
2018-10-05 13:08:49 +00:00
|
|
|
fn nonnull_metadata(&mut self, load: Self::Value);
|
2021-10-07 22:33:13 +00:00
|
|
|
fn type_metadata(&mut self, function: Self::Function, typeid: String);
|
|
|
|
fn typeid_metadata(&mut self, typeid: String) -> Self::Value;
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2018-09-08 22:16:45 +00:00
|
|
|
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
|
2018-08-07 15:14:40 +00:00
|
|
|
fn store_with_flags(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-22 16:57:31 +00:00
|
|
|
val: Self::Value,
|
|
|
|
ptr: Self::Value,
|
2018-09-08 22:16:45 +00:00
|
|
|
align: Align,
|
2018-08-07 15:14:40 +00:00
|
|
|
flags: MemFlags,
|
2018-08-22 16:57:31 +00:00
|
|
|
) -> Self::Value;
|
2018-10-05 13:08:49 +00:00
|
|
|
fn atomic_store(
|
|
|
|
&mut self,
|
|
|
|
val: Self::Value,
|
|
|
|
ptr: Self::Value,
|
|
|
|
order: AtomicOrdering,
|
|
|
|
size: Size,
|
|
|
|
);
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2021-07-31 00:00:00 +00:00
|
|
|
fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
|
2021-08-01 00:00:00 +00:00
|
|
|
fn inbounds_gep(
|
|
|
|
&mut self,
|
|
|
|
ty: Self::Type,
|
|
|
|
ptr: Self::Value,
|
|
|
|
indices: &[Self::Value],
|
|
|
|
) -> Self::Value;
|
2021-08-01 00:00:00 +00:00
|
|
|
fn struct_gep(&mut self, ty: Self::Type, ptr: Self::Value, idx: u64) -> Self::Value;
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
|
|
|
fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
2020-06-25 15:05:12 +00:00
|
|
|
fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
|
|
|
|
fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
|
2018-10-05 13:08:49 +00:00
|
|
|
fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
|
|
|
fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
|
|
|
fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
|
|
|
fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
|
|
|
fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
|
|
|
fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
|
|
|
fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
|
|
|
fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
|
|
|
fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
|
|
|
fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
|
|
|
|
fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2021-12-30 01:18:44 +00:00
|
|
|
fn cast_float_to_int(
|
|
|
|
&mut self,
|
|
|
|
signed: bool,
|
|
|
|
x: Self::Value,
|
|
|
|
dest_ty: Self::Type,
|
|
|
|
) -> Self::Value {
|
|
|
|
let in_ty = self.cx().val_ty(x);
|
|
|
|
let (float_ty, int_ty) = if self.cx().type_kind(dest_ty) == TypeKind::Vector
|
|
|
|
&& self.cx().type_kind(in_ty) == TypeKind::Vector
|
|
|
|
{
|
|
|
|
(self.cx().element_type(in_ty), self.cx().element_type(dest_ty))
|
|
|
|
} else {
|
|
|
|
(in_ty, dest_ty)
|
|
|
|
};
|
|
|
|
assert!(matches!(self.cx().type_kind(float_ty), TypeKind::Float | TypeKind::Double));
|
|
|
|
assert_eq!(self.cx().type_kind(int_ty), TypeKind::Integer);
|
|
|
|
|
|
|
|
if let Some(false) = self.cx().sess().opts.debugging_opts.saturating_float_casts {
|
|
|
|
return if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
|
|
|
|
}
|
|
|
|
|
|
|
|
let try_sat_result =
|
|
|
|
if signed { self.fptosi_sat(x, dest_ty) } else { self.fptoui_sat(x, dest_ty) };
|
|
|
|
if let Some(try_sat_result) = try_sat_result {
|
|
|
|
return try_sat_result;
|
|
|
|
}
|
|
|
|
|
|
|
|
let int_width = self.cx().int_width(int_ty);
|
|
|
|
let float_width = self.cx().float_width(float_ty);
|
|
|
|
// LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
|
|
|
|
// destination integer type after rounding towards zero. This `undef` value can cause UB in
|
|
|
|
// safe code (see issue #10184), so we implement a saturating conversion on top of it:
|
|
|
|
// Semantically, the mathematical value of the input is rounded towards zero to the next
|
|
|
|
// mathematical integer, and then the result is clamped into the range of the destination
|
|
|
|
// integer type. Positive and negative infinity are mapped to the maximum and minimum value of
|
|
|
|
// the destination integer type. NaN is mapped to 0.
|
|
|
|
//
|
|
|
|
// Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
|
|
|
|
// a value representable in int_ty.
|
|
|
|
// They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
|
|
|
|
// Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
|
|
|
|
// int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
|
|
|
|
// representable. Note that this only works if float_ty's exponent range is sufficiently large.
|
|
|
|
// f16 or 256 bit integers would break this property. Right now the smallest float type is f32
|
|
|
|
// with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
|
|
|
|
// On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
|
|
|
|
// we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
|
|
|
|
// This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
|
|
|
|
let int_max = |signed: bool, int_width: u64| -> u128 {
|
|
|
|
let shift_amount = 128 - int_width;
|
|
|
|
if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
|
|
|
|
};
|
|
|
|
let int_min = |signed: bool, int_width: u64| -> i128 {
|
|
|
|
if signed { i128::MIN >> (128 - int_width) } else { 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
|
|
|
|
let rounded_min =
|
|
|
|
ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
|
|
|
|
assert_eq!(rounded_min.status, Status::OK);
|
|
|
|
let rounded_max =
|
|
|
|
ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
|
|
|
|
assert!(rounded_max.value.is_finite());
|
|
|
|
(rounded_min.value.to_bits(), rounded_max.value.to_bits())
|
|
|
|
};
|
|
|
|
let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
|
|
|
|
let rounded_min =
|
|
|
|
ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
|
|
|
|
assert_eq!(rounded_min.status, Status::OK);
|
|
|
|
let rounded_max =
|
|
|
|
ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
|
|
|
|
assert!(rounded_max.value.is_finite());
|
|
|
|
(rounded_min.value.to_bits(), rounded_max.value.to_bits())
|
|
|
|
};
|
|
|
|
// To implement saturation, we perform the following steps:
|
|
|
|
//
|
|
|
|
// 1. Cast x to an integer with fpto[su]i. This may result in undef.
|
|
|
|
// 2. Compare x to f_min and f_max, and use the comparison results to select:
|
|
|
|
// a) int_ty::MIN if x < f_min or x is NaN
|
|
|
|
// b) int_ty::MAX if x > f_max
|
|
|
|
// c) the result of fpto[su]i otherwise
|
|
|
|
// 3. If x is NaN, return 0.0, otherwise return the result of step 2.
|
|
|
|
//
|
|
|
|
// This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
|
|
|
|
// destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
|
|
|
|
// undef does not introduce any non-determinism either.
|
|
|
|
// More importantly, the above procedure correctly implements saturating conversion.
|
|
|
|
// Proof (sketch):
|
|
|
|
// If x is NaN, 0 is returned by definition.
|
|
|
|
// Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
|
|
|
|
// This yields three cases to consider:
|
|
|
|
// (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
|
|
|
|
// saturating conversion for inputs in that range.
|
|
|
|
// (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
|
|
|
|
// (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
|
|
|
|
// than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
|
|
|
|
// is correct.
|
|
|
|
// (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
|
|
|
|
// int_ty::MIN and therefore the return value of int_ty::MIN is correct.
|
|
|
|
// QED.
|
|
|
|
|
2022-01-18 03:51:04 +00:00
|
|
|
let float_bits_to_llval = |bx: &mut Self, bits| {
|
|
|
|
let bits_llval = match float_width {
|
|
|
|
32 => bx.cx().const_u32(bits as u32),
|
|
|
|
64 => bx.cx().const_u64(bits as u64),
|
|
|
|
n => bug!("unsupported float width {}", n),
|
|
|
|
};
|
|
|
|
bx.bitcast(bits_llval, float_ty)
|
|
|
|
};
|
|
|
|
let (f_min, f_max) = match float_width {
|
|
|
|
32 => compute_clamp_bounds_single(signed, int_width),
|
|
|
|
64 => compute_clamp_bounds_double(signed, int_width),
|
|
|
|
n => bug!("unsupported float width {}", n),
|
|
|
|
};
|
|
|
|
let f_min = float_bits_to_llval(self, f_min);
|
|
|
|
let f_max = float_bits_to_llval(self, f_max);
|
2021-12-30 01:18:44 +00:00
|
|
|
let int_max = self.cx().const_uint_big(int_ty, int_max(signed, int_width));
|
|
|
|
let int_min = self.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
|
|
|
|
let zero = self.cx().const_uint(int_ty, 0);
|
2022-01-18 03:51:04 +00:00
|
|
|
|
|
|
|
// If we're working with vectors, constants must be "splatted": the constant is duplicated
|
|
|
|
// into each lane of the vector. The algorithm stays the same, we are just using the
|
|
|
|
// same constant across all lanes.
|
|
|
|
let maybe_splat = |bx: &mut Self, val| {
|
|
|
|
if bx.cx().type_kind(dest_ty) == TypeKind::Vector {
|
|
|
|
bx.vector_splat(bx.vector_length(dest_ty), val)
|
|
|
|
} else {
|
|
|
|
val
|
|
|
|
}
|
|
|
|
};
|
|
|
|
let f_min = maybe_splat(self, f_min);
|
|
|
|
let f_max = maybe_splat(self, f_max);
|
2021-12-30 01:18:44 +00:00
|
|
|
let int_max = maybe_splat(self, int_max);
|
|
|
|
let int_min = maybe_splat(self, int_min);
|
|
|
|
let zero = maybe_splat(self, zero);
|
|
|
|
|
|
|
|
// Step 1 ...
|
|
|
|
let fptosui_result = if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
|
|
|
|
let less_or_nan = self.fcmp(RealPredicate::RealULT, x, f_min);
|
|
|
|
let greater = self.fcmp(RealPredicate::RealOGT, x, f_max);
|
|
|
|
|
|
|
|
// Step 2: We use two comparisons and two selects, with %s1 being the
|
|
|
|
// result:
|
|
|
|
// %less_or_nan = fcmp ult %x, %f_min
|
|
|
|
// %greater = fcmp olt %x, %f_max
|
|
|
|
// %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
|
|
|
|
// %s1 = select %greater, int_ty::MAX, %s0
|
|
|
|
// Note that %less_or_nan uses an *unordered* comparison. This
|
|
|
|
// comparison is true if the operands are not comparable (i.e., if x is
|
|
|
|
// NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
|
|
|
|
// x is NaN.
|
|
|
|
//
|
|
|
|
// Performance note: Unordered comparison can be lowered to a "flipped"
|
|
|
|
// comparison and a negation, and the negation can be merged into the
|
|
|
|
// select. Therefore, it not necessarily any more expensive than an
|
|
|
|
// ordered ("normal") comparison. Whether these optimizations will be
|
|
|
|
// performed is ultimately up to the backend, but at least x86 does
|
|
|
|
// perform them.
|
|
|
|
let s0 = self.select(less_or_nan, int_min, fptosui_result);
|
|
|
|
let s1 = self.select(greater, int_max, s0);
|
|
|
|
|
|
|
|
// Step 3: NaN replacement.
|
|
|
|
// For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
|
|
|
|
// Therefore we only need to execute this step for signed integer types.
|
|
|
|
if signed {
|
|
|
|
// LLVM has no isNaN predicate, so we use (x == x) instead
|
|
|
|
let cmp = self.fcmp(RealPredicate::RealOEQ, x, x);
|
|
|
|
self.select(cmp, s1, zero)
|
|
|
|
} else {
|
|
|
|
s1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:08:49 +00:00
|
|
|
fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
|
|
|
fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2018-09-13 12:58:19 +00:00
|
|
|
fn memcpy(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-09-13 12:58:19 +00:00
|
|
|
dst: Self::Value,
|
2018-09-08 22:16:45 +00:00
|
|
|
dst_align: Align,
|
2018-09-13 12:58:19 +00:00
|
|
|
src: Self::Value,
|
2018-09-08 22:16:45 +00:00
|
|
|
src_align: Align,
|
2018-09-13 12:58:19 +00:00
|
|
|
size: Self::Value,
|
|
|
|
flags: MemFlags,
|
|
|
|
);
|
|
|
|
fn memmove(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-09-13 12:58:19 +00:00
|
|
|
dst: Self::Value,
|
2018-09-08 22:16:45 +00:00
|
|
|
dst_align: Align,
|
2018-09-13 12:58:19 +00:00
|
|
|
src: Self::Value,
|
2018-09-08 22:16:45 +00:00
|
|
|
src_align: Align,
|
2018-09-13 12:58:19 +00:00
|
|
|
size: Self::Value,
|
|
|
|
flags: MemFlags,
|
|
|
|
);
|
2018-09-10 15:59:20 +00:00
|
|
|
fn memset(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-09-10 15:59:20 +00:00
|
|
|
ptr: Self::Value,
|
|
|
|
fill_byte: Self::Value,
|
|
|
|
size: Self::Value,
|
2018-09-08 22:16:45 +00:00
|
|
|
align: Align,
|
2018-09-10 15:59:20 +00:00
|
|
|
flags: MemFlags,
|
|
|
|
);
|
2018-08-07 15:14:40 +00:00
|
|
|
|
|
|
|
fn select(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-09-13 12:58:19 +00:00
|
|
|
cond: Self::Value,
|
2018-08-22 16:57:31 +00:00
|
|
|
then_val: Self::Value,
|
|
|
|
else_val: Self::Value,
|
|
|
|
) -> Self::Value;
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2018-12-04 19:20:45 +00:00
|
|
|
fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
|
2018-10-05 13:08:49 +00:00
|
|
|
fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
|
|
|
|
fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
|
|
|
|
fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
|
|
|
|
fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2022-01-24 12:45:34 +00:00
|
|
|
fn set_personality_fn(&mut self, personality: Self::Value);
|
|
|
|
|
|
|
|
// These are used by everyone except msvc
|
2018-10-05 13:08:49 +00:00
|
|
|
fn landing_pad(
|
|
|
|
&mut self,
|
|
|
|
ty: Self::Type,
|
|
|
|
pers_fn: Self::Value,
|
|
|
|
num_clauses: usize,
|
|
|
|
) -> Self::Value;
|
|
|
|
fn set_cleanup(&mut self, landing_pad: Self::Value);
|
2022-01-24 12:48:09 +00:00
|
|
|
fn resume(&mut self, exn: Self::Value);
|
2022-01-24 12:45:34 +00:00
|
|
|
|
|
|
|
// These are used only by msvc
|
2018-10-05 13:08:49 +00:00
|
|
|
fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
|
2022-01-24 12:48:09 +00:00
|
|
|
fn cleanup_ret(&mut self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>);
|
2018-10-05 13:08:49 +00:00
|
|
|
fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
|
2018-08-07 15:14:40 +00:00
|
|
|
fn catch_switch(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-22 16:57:31 +00:00
|
|
|
parent: Option<Self::Value>,
|
|
|
|
unwind: Option<Self::BasicBlock>,
|
2022-01-24 12:50:12 +00:00
|
|
|
handlers: &[Self::BasicBlock],
|
2018-08-22 16:57:31 +00:00
|
|
|
) -> Self::Value;
|
2018-08-07 15:14:40 +00:00
|
|
|
|
|
|
|
fn atomic_cmpxchg(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-22 16:57:31 +00:00
|
|
|
dst: Self::Value,
|
|
|
|
cmp: Self::Value,
|
|
|
|
src: Self::Value,
|
2018-08-07 15:14:40 +00:00
|
|
|
order: AtomicOrdering,
|
|
|
|
failure_order: AtomicOrdering,
|
2018-08-20 16:16:51 +00:00
|
|
|
weak: bool,
|
2018-08-22 16:57:31 +00:00
|
|
|
) -> Self::Value;
|
2018-08-07 15:14:40 +00:00
|
|
|
fn atomic_rmw(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2018-08-07 15:14:40 +00:00
|
|
|
op: AtomicRmwBinOp,
|
2018-08-22 16:57:31 +00:00
|
|
|
dst: Self::Value,
|
|
|
|
src: Self::Value,
|
2018-08-07 15:14:40 +00:00
|
|
|
order: AtomicOrdering,
|
2018-08-22 16:57:31 +00:00
|
|
|
) -> Self::Value;
|
2018-10-05 13:08:49 +00:00
|
|
|
fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
|
|
|
|
fn set_invariant_load(&mut self, load: Self::Value);
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2018-11-24 15:36:41 +00:00
|
|
|
/// Called for `StorageLive`
|
2018-10-05 13:08:49 +00:00
|
|
|
fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2018-11-24 15:36:41 +00:00
|
|
|
/// Called for `StorageDead`
|
|
|
|
fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
|
2018-08-07 15:14:40 +00:00
|
|
|
|
2020-06-04 04:19:34 +00:00
|
|
|
fn instrprof_increment(
|
|
|
|
&mut self,
|
|
|
|
fn_name: Self::Value,
|
|
|
|
hash: Self::Value,
|
|
|
|
num_counters: Self::Value,
|
|
|
|
index: Self::Value,
|
2020-08-15 11:42:13 +00:00
|
|
|
);
|
2020-06-04 04:19:34 +00:00
|
|
|
|
2018-09-13 12:58:19 +00:00
|
|
|
fn call(
|
2018-10-05 13:08:49 +00:00
|
|
|
&mut self,
|
2021-08-03 22:09:57 +00:00
|
|
|
llty: Self::Type,
|
2018-09-13 12:58:19 +00:00
|
|
|
llfn: Self::Value,
|
|
|
|
args: &[Self::Value],
|
2018-11-13 10:51:42 +00:00
|
|
|
funclet: Option<&Self::Funclet>,
|
2018-09-13 12:58:19 +00:00
|
|
|
) -> Self::Value;
|
2018-10-05 13:08:49 +00:00
|
|
|
fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
2018-09-20 13:47:22 +00:00
|
|
|
|
2021-12-30 04:19:55 +00:00
|
|
|
fn apply_attrs_to_cleanup_callsite(&mut self, llret: Self::Value);
|
2018-08-07 15:14:40 +00:00
|
|
|
}
|