Rollup merge of #136457 - calder:master, r=tgross35

Expose algebraic floating point intrinsics

# Problem

A stable Rust implementation of a simple dot product is 8x slower than C++ on modern x86-64 CPUs. The root cause is an inability to let the compiler reorder floating point operations for better vectorization.

See https://github.com/calder/dot-bench for benchmarks. Measurements below were performed on a i7-10875H.

### C++: 10us 

With Clang 18.1.3 and `-O2 -march=haswell`:
<table>
<tr>
    <th>C++</th>
    <th>Assembly</th>
</tr>
<tr>
<td>
<pre lang="cc">
float dot(float *a, float *b, size_t len) {
    #pragma clang fp reassociate(on)
    float sum = 0.0;
    for (size_t i = 0; i < len; ++i) {
        sum += a[i] * b[i];
    }
    return sum;
}
</pre>
</td>
<td>
<img src="https://github.com/user-attachments/assets/739573c0-380a-4d84-9fd9-141343ce7e68" />
</td>
</tr>
</table>

### Nightly Rust: 10us 

With rustc 1.86.0-nightly (8239a37f9) and `-C opt-level=3 -C target-feature=+avx2,+fma`:
<table>
<tr>
    <th>Rust</th>
    <th>Assembly</th>
</tr>
<tr>
<td>
<pre lang="rust">
fn dot(a: &[f32], b: &[f32]) -> f32 {
    let mut sum = 0.0;
    for i in 0..a.len() {
        sum = fadd_algebraic(sum, fmul_algebraic(a[i], b[i]));
    }
    sum
}
</pre>
</td>
<td>
<img src="https://github.com/user-attachments/assets/9dcf953a-2cd7-42f3-bc34-7117de4c5fb9" />
</td>
</tr>
</table>

### Stable Rust: 84us 

With rustc 1.84.1 (e71f9a9a9) and `-C opt-level=3 -C target-feature=+avx2,+fma`:
<table>
<tr>
    <th>Rust</th>
    <th>Assembly</th>
</tr>
<tr>
<td>
<pre lang="rust">
fn dot(a: &[f32], b: &[f32]) -> f32 {
    let mut sum = 0.0;
    for i in 0..a.len() {
        sum += a[i] * b[i];
    }
    sum
}
</pre>
</td>
<td>
<img src="https://github.com/user-attachments/assets/936a1f7e-33e4-4ff8-a732-c3cdfe068dca" />
</td>
</tr>
</table>

# Proposed Change

Add `core::intrinsics::f*_algebraic` wrappers to `f16`, `f32`, `f64`, and `f128` gated on a new `float_algebraic` feature.

# Alternatives Considered

https://github.com/rust-lang/rust/issues/21690 has a lot of good discussion of various options for supporting fast math in Rust, but is still open a decade later because any choice that opts in more than individual operations is ultimately contrary to Rust's design principles.

In the mean time, processors have evolved and we're leaving major performance on the table by not supporting vectorization. We shouldn't make users choose between an unstable compiler and an 8x performance hit.

# References

* https://github.com/rust-lang/rust/issues/21690
* https://github.com/rust-lang/libs-team/issues/532
* https://github.com/rust-lang/rust/issues/136469
* https://github.com/calder/dot-bench
* https://www.felixcloutier.com/x86/vfmadd132ps:vfmadd213ps:vfmadd231ps

try-job: x86_64-gnu-nopt
try-job: x86_64-gnu-aux
This commit is contained in:
Stuart Cook 2025-04-05 13:18:12 +11:00 committed by GitHub
commit 2e4e196a5b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 544 additions and 24 deletions

View File

@ -2475,35 +2475,35 @@ pub unsafe fn float_to_int_unchecked<Float: Copy, Int: Copy>(value: Float) -> In
/// Float addition that allows optimizations based on algebraic rules.
///
/// This intrinsic does not have a stable counterpart.
/// Stabilized as [`f16::algebraic_add`], [`f32::algebraic_add`], [`f64::algebraic_add`] and [`f128::algebraic_add`].
#[rustc_nounwind]
#[rustc_intrinsic]
pub fn fadd_algebraic<T: Copy>(a: T, b: T) -> T;
/// Float subtraction that allows optimizations based on algebraic rules.
///
/// This intrinsic does not have a stable counterpart.
/// Stabilized as [`f16::algebraic_sub`], [`f32::algebraic_sub`], [`f64::algebraic_sub`] and [`f128::algebraic_sub`].
#[rustc_nounwind]
#[rustc_intrinsic]
pub fn fsub_algebraic<T: Copy>(a: T, b: T) -> T;
/// Float multiplication that allows optimizations based on algebraic rules.
///
/// This intrinsic does not have a stable counterpart.
/// Stabilized as [`f16::algebraic_mul`], [`f32::algebraic_mul`], [`f64::algebraic_mul`] and [`f128::algebraic_mul`].
#[rustc_nounwind]
#[rustc_intrinsic]
pub fn fmul_algebraic<T: Copy>(a: T, b: T) -> T;
/// Float division that allows optimizations based on algebraic rules.
///
/// This intrinsic does not have a stable counterpart.
/// Stabilized as [`f16::algebraic_div`], [`f32::algebraic_div`], [`f64::algebraic_div`] and [`f128::algebraic_div`].
#[rustc_nounwind]
#[rustc_intrinsic]
pub fn fdiv_algebraic<T: Copy>(a: T, b: T) -> T;
/// Float remainder that allows optimizations based on algebraic rules.
///
/// This intrinsic does not have a stable counterpart.
/// Stabilized as [`f16::algebraic_rem`], [`f32::algebraic_rem`], [`f64::algebraic_rem`] and [`f128::algebraic_rem`].
#[rustc_nounwind]
#[rustc_intrinsic]
pub fn frem_algebraic<T: Copy>(a: T, b: T) -> T;

View File

@ -1362,4 +1362,54 @@ impl f128 {
// SAFETY: this is actually a safe intrinsic
unsafe { intrinsics::copysignf128(self, sign) }
}
/// Float addition that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_add(self, rhs: f128) -> f128 {
intrinsics::fadd_algebraic(self, rhs)
}
/// Float subtraction that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_sub(self, rhs: f128) -> f128 {
intrinsics::fsub_algebraic(self, rhs)
}
/// Float multiplication that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_mul(self, rhs: f128) -> f128 {
intrinsics::fmul_algebraic(self, rhs)
}
/// Float division that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_div(self, rhs: f128) -> f128 {
intrinsics::fdiv_algebraic(self, rhs)
}
/// Float remainder that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_rem(self, rhs: f128) -> f128 {
intrinsics::frem_algebraic(self, rhs)
}
}

View File

@ -1338,4 +1338,54 @@ impl f16 {
// SAFETY: this is actually a safe intrinsic
unsafe { intrinsics::copysignf16(self, sign) }
}
/// Float addition that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_add(self, rhs: f16) -> f16 {
intrinsics::fadd_algebraic(self, rhs)
}
/// Float subtraction that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_sub(self, rhs: f16) -> f16 {
intrinsics::fsub_algebraic(self, rhs)
}
/// Float multiplication that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_mul(self, rhs: f16) -> f16 {
intrinsics::fmul_algebraic(self, rhs)
}
/// Float division that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_div(self, rhs: f16) -> f16 {
intrinsics::fdiv_algebraic(self, rhs)
}
/// Float remainder that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_rem(self, rhs: f16) -> f16 {
intrinsics::frem_algebraic(self, rhs)
}
}

View File

@ -1504,4 +1504,54 @@ impl f32 {
// SAFETY: this is actually a safe intrinsic
unsafe { intrinsics::copysignf32(self, sign) }
}
/// Float addition that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_add(self, rhs: f32) -> f32 {
intrinsics::fadd_algebraic(self, rhs)
}
/// Float subtraction that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_sub(self, rhs: f32) -> f32 {
intrinsics::fsub_algebraic(self, rhs)
}
/// Float multiplication that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_mul(self, rhs: f32) -> f32 {
intrinsics::fmul_algebraic(self, rhs)
}
/// Float division that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_div(self, rhs: f32) -> f32 {
intrinsics::fdiv_algebraic(self, rhs)
}
/// Float remainder that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_rem(self, rhs: f32) -> f32 {
intrinsics::frem_algebraic(self, rhs)
}
}

View File

@ -1503,4 +1503,54 @@ impl f64 {
// SAFETY: this is actually a safe intrinsic
unsafe { intrinsics::copysignf64(self, sign) }
}
/// Float addition that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_add(self, rhs: f64) -> f64 {
intrinsics::fadd_algebraic(self, rhs)
}
/// Float subtraction that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_sub(self, rhs: f64) -> f64 {
intrinsics::fsub_algebraic(self, rhs)
}
/// Float multiplication that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_mul(self, rhs: f64) -> f64 {
intrinsics::fmul_algebraic(self, rhs)
}
/// Float division that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_div(self, rhs: f64) -> f64 {
intrinsics::fdiv_algebraic(self, rhs)
}
/// Float remainder that allows optimizations based on algebraic rules.
///
/// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
#[must_use = "method returns a new number and does not mutate the original value"]
#[unstable(feature = "float_algebraic", issue = "136469")]
#[inline]
pub fn algebraic_rem(self, rhs: f64) -> f64 {
intrinsics::frem_algebraic(self, rhs)
}
}

View File

@ -1313,6 +1313,51 @@ mod prim_f16 {}
/// | `wasm32`, `wasm64` | If all input NaNs are quiet with all-zero payload: None.<br> Otherwise: all possible payloads. |
///
/// For targets not in this table, all payloads are possible.
///
/// # Algebraic operators
///
/// Algebraic operators of the form `a.algebraic_*(b)` allow the compiler to optimize
/// floating point operations using all the usual algebraic properties of real numbers --
/// despite the fact that those properties do *not* hold on floating point numbers.
/// This can give a great performance boost since it may unlock vectorization.
///
/// The exact set of optimizations is unspecified but typically allows combining operations,
/// rearranging series of operations based on mathematical properties, converting between division
/// and reciprocal multiplication, and disregarding the sign of zero. This means that the results of
/// elementary operations may have undefined precision, and "non-mathematical" values
/// such as NaN, +/-Inf, or -0.0 may behave in unexpected ways, but these operations
/// will never cause undefined behavior.
///
/// Because of the unpredictable nature of compiler optimizations, the same inputs may produce
/// different results even within a single program run. **Unsafe code must not rely on any property
/// of the return value for soundness.** However, implementations will generally do their best to
/// pick a reasonable tradeoff between performance and accuracy of the result.
///
/// For example:
///
/// ```
/// # #![feature(float_algebraic)]
/// # #![allow(unused_assignments)]
/// # let mut x: f32 = 0.0;
/// # let a: f32 = 1.0;
/// # let b: f32 = 2.0;
/// # let c: f32 = 3.0;
/// # let d: f32 = 4.0;
/// x = a.algebraic_add(b).algebraic_add(c).algebraic_add(d);
/// ```
///
/// May be rewritten as:
///
/// ```
/// # #![allow(unused_assignments)]
/// # let mut x: f32 = 0.0;
/// # let a: f32 = 1.0;
/// # let b: f32 = 2.0;
/// # let c: f32 = 3.0;
/// # let d: f32 = 4.0;
/// x = a + b + c + d; // As written
/// x = (a + c) + (b + d); // Reordered to shorten critical path and enable vectorization
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_f32 {}

View File

@ -341,6 +341,7 @@
#![feature(exact_size_is_empty)]
#![feature(exclusive_wrapper)]
#![feature(extend_one)]
#![feature(float_algebraic)]
#![feature(float_gamma)]
#![feature(float_minimum_maximum)]
#![feature(fmt_internals)]

View File

@ -984,6 +984,25 @@ fn test_total_cmp() {
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
}
#[test]
fn test_algebraic() {
let a: f128 = 123.0;
let b: f128 = 456.0;
// Check that individual operations match their primitive counterparts.
//
// This is a check of current implementations and does NOT imply any form of
// guarantee about future behavior. The compiler reserves the right to make
// these operations inexact matches in the future.
let eps = if cfg!(miri) { 1e-6 } else { 0.0 };
assert_approx_eq!(a.algebraic_add(b), a + b, eps);
assert_approx_eq!(a.algebraic_sub(b), a - b, eps);
assert_approx_eq!(a.algebraic_mul(b), a * b, eps);
assert_approx_eq!(a.algebraic_div(b), a / b, eps);
assert_approx_eq!(a.algebraic_rem(b), a % b, eps);
}
#[test]
fn test_from() {
assert_eq!(f128::from(false), 0.0);

View File

@ -954,6 +954,27 @@ fn test_total_cmp() {
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
}
#[test]
fn test_algebraic() {
let a: f16 = 123.0;
let b: f16 = 456.0;
// Check that individual operations match their primitive counterparts.
//
// This is a check of current implementations and does NOT imply any form of
// guarantee about future behavior. The compiler reserves the right to make
// these operations inexact matches in the future.
let eps_add = if cfg!(miri) { 1e1 } else { 0.0 };
let eps_mul = if cfg!(miri) { 1e3 } else { 0.0 };
let eps_div = if cfg!(miri) { 1e0 } else { 0.0 };
assert_approx_eq!(a.algebraic_add(b), a + b, eps_add);
assert_approx_eq!(a.algebraic_sub(b), a - b, eps_add);
assert_approx_eq!(a.algebraic_mul(b), a * b, eps_mul);
assert_approx_eq!(a.algebraic_div(b), a / b, eps_div);
assert_approx_eq!(a.algebraic_rem(b), a % b, eps_div);
}
#[test]
fn test_from() {
assert_eq!(f16::from(false), 0.0);

View File

@ -915,3 +915,24 @@ fn test_total_cmp() {
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f32::INFINITY));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
}
#[test]
fn test_algebraic() {
let a: f32 = 123.0;
let b: f32 = 456.0;
// Check that individual operations match their primitive counterparts.
//
// This is a check of current implementations and does NOT imply any form of
// guarantee about future behavior. The compiler reserves the right to make
// these operations inexact matches in the future.
let eps_add = if cfg!(miri) { 1e-3 } else { 0.0 };
let eps_mul = if cfg!(miri) { 1e-1 } else { 0.0 };
let eps_div = if cfg!(miri) { 1e-4 } else { 0.0 };
assert_approx_eq!(a.algebraic_add(b), a + b, eps_add);
assert_approx_eq!(a.algebraic_sub(b), a - b, eps_add);
assert_approx_eq!(a.algebraic_mul(b), a * b, eps_mul);
assert_approx_eq!(a.algebraic_div(b), a / b, eps_div);
assert_approx_eq!(a.algebraic_rem(b), a % b, eps_div);
}

View File

@ -894,3 +894,22 @@ fn test_total_cmp() {
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f64::INFINITY));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
}
#[test]
fn test_algebraic() {
let a: f64 = 123.0;
let b: f64 = 456.0;
// Check that individual operations match their primitive counterparts.
//
// This is a check of current implementations and does NOT imply any form of
// guarantee about future behavior. The compiler reserves the right to make
// these operations inexact matches in the future.
let eps = if cfg!(miri) { 1e-6 } else { 0.0 };
assert_approx_eq!(a.algebraic_add(b), a + b, eps);
assert_approx_eq!(a.algebraic_sub(b), a - b, eps);
assert_approx_eq!(a.algebraic_mul(b), a * b, eps);
assert_approx_eq!(a.algebraic_div(b), a / b, eps);
assert_approx_eq!(a.algebraic_rem(b), a % b, eps);
}

View File

@ -1,4 +1,4 @@
#![feature(f16, f128, float_gamma, float_minimum_maximum)]
#![feature(f16, f128, float_algebraic, float_gamma, float_minimum_maximum)]
use std::fmt;
use std::ops::{Add, Div, Mul, Rem, Sub};
@ -10,7 +10,7 @@ macro_rules! assert_approx_eq {
let (a, b) = (&$a, &$b);
let diff = (*a - *b).abs();
assert!(
diff < $lim,
diff <= $lim,
"{a:?} is not approximately equal to {b:?} (threshold {lim:?}, difference {diff:?})",
lim = $lim
);

View File

@ -411,9 +411,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
};
let res = this.binary_op(op, &a, &b)?;
// `binary_op` already called `generate_nan` if needed.
// Apply a relative error of 16ULP to simulate non-deterministic precision loss
// Apply a relative error of 4ULP to simulate non-deterministic precision loss
// due to optimizations.
let res = apply_random_float_error_to_imm(this, res, 4 /* log2(16) */)?;
let res = apply_random_float_error_to_imm(this, res, 2 /* log2(4) */)?;
this.write_immediate(*res, dest)?;
}
@ -464,9 +464,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
if !float_finite(&res)? {
throw_ub_format!("`{intrinsic_name}` intrinsic produced non-finite value as result");
}
// Apply a relative error of 16ULP to simulate non-deterministic precision loss
// Apply a relative error of 4ULP to simulate non-deterministic precision loss
// due to optimizations.
let res = apply_random_float_error_to_imm(this, res, 4 /* log2(16) */)?;
let res = apply_random_float_error_to_imm(this, res, 2 /* log2(4) */)?;
this.write_immediate(*res, dest)?;
}

View File

@ -0,0 +1,149 @@
// Verify that algebraic intrinsics generate the correct LLVM calls
// Ensure operations get inlined
//@ compile-flags: -Copt-level=1
#![crate_type = "lib"]
#![feature(f16)]
#![feature(f128)]
#![feature(float_algebraic)]
// CHECK-LABEL: @f16_algebraic_add
#[no_mangle]
pub fn f16_algebraic_add(a: f16, b: f16) -> f16 {
// CHECK: fadd reassoc nsz arcp contract half %{{.+}}, %{{.+}}
a.algebraic_add(b)
}
// CHECK-LABEL: @f16_algebraic_sub
#[no_mangle]
pub fn f16_algebraic_sub(a: f16, b: f16) -> f16 {
// CHECK: fsub reassoc nsz arcp contract half %{{.+}}, %{{.+}}
a.algebraic_sub(b)
}
// CHECK-LABEL: @f16_algebraic_mul
#[no_mangle]
pub fn f16_algebraic_mul(a: f16, b: f16) -> f16 {
// CHECK: fmul reassoc nsz arcp contract half %{{.+}}, %{{.+}}
a.algebraic_mul(b)
}
// CHECK-LABEL: @f16_algebraic_div
#[no_mangle]
pub fn f16_algebraic_div(a: f16, b: f16) -> f16 {
// CHECK: fdiv reassoc nsz arcp contract half %{{.+}}, %{{.+}}
a.algebraic_div(b)
}
// CHECK-LABEL: @f16_algebraic_rem
#[no_mangle]
pub fn f16_algebraic_rem(a: f16, b: f16) -> f16 {
// CHECK: frem reassoc nsz arcp contract half %{{.+}}, %{{.+}}
a.algebraic_rem(b)
}
// CHECK-LABEL: @f32_algebraic_add
#[no_mangle]
pub fn f32_algebraic_add(a: f32, b: f32) -> f32 {
// CHECK: fadd reassoc nsz arcp contract float %{{.+}}, %{{.+}}
a.algebraic_add(b)
}
// CHECK-LABEL: @f32_algebraic_sub
#[no_mangle]
pub fn f32_algebraic_sub(a: f32, b: f32) -> f32 {
// CHECK: fsub reassoc nsz arcp contract float %{{.+}}, %{{.+}}
a.algebraic_sub(b)
}
// CHECK-LABEL: @f32_algebraic_mul
#[no_mangle]
pub fn f32_algebraic_mul(a: f32, b: f32) -> f32 {
// CHECK: fmul reassoc nsz arcp contract float %{{.+}}, %{{.+}}
a.algebraic_mul(b)
}
// CHECK-LABEL: @f32_algebraic_div
#[no_mangle]
pub fn f32_algebraic_div(a: f32, b: f32) -> f32 {
// CHECK: fdiv reassoc nsz arcp contract float %{{.+}}, %{{.+}}
a.algebraic_div(b)
}
// CHECK-LABEL: @f32_algebraic_rem
#[no_mangle]
pub fn f32_algebraic_rem(a: f32, b: f32) -> f32 {
// CHECK: frem reassoc nsz arcp contract float %{{.+}}, %{{.+}}
a.algebraic_rem(b)
}
// CHECK-LABEL: @f64_algebraic_add
#[no_mangle]
pub fn f64_algebraic_add(a: f64, b: f64) -> f64 {
// CHECK: fadd reassoc nsz arcp contract double %{{.+}}, %{{.+}}
a.algebraic_add(b)
}
// CHECK-LABEL: @f64_algebraic_sub
#[no_mangle]
pub fn f64_algebraic_sub(a: f64, b: f64) -> f64 {
// CHECK: fsub reassoc nsz arcp contract double %{{.+}}, %{{.+}}
a.algebraic_sub(b)
}
// CHECK-LABEL: @f64_algebraic_mul
#[no_mangle]
pub fn f64_algebraic_mul(a: f64, b: f64) -> f64 {
// CHECK: fmul reassoc nsz arcp contract double %{{.+}}, %{{.+}}
a.algebraic_mul(b)
}
// CHECK-LABEL: @f64_algebraic_div
#[no_mangle]
pub fn f64_algebraic_div(a: f64, b: f64) -> f64 {
// CHECK: fdiv reassoc nsz arcp contract double %{{.+}}, %{{.+}}
a.algebraic_div(b)
}
// CHECK-LABEL: @f64_algebraic_rem
#[no_mangle]
pub fn f64_algebraic_rem(a: f64, b: f64) -> f64 {
// CHECK: frem reassoc nsz arcp contract double %{{.+}}, %{{.+}}
a.algebraic_rem(b)
}
// CHECK-LABEL: @f128_algebraic_add
#[no_mangle]
pub fn f128_algebraic_add(a: f128, b: f128) -> f128 {
// CHECK: fadd reassoc nsz arcp contract fp128 %{{.+}}, %{{.+}}
a.algebraic_add(b)
}
// CHECK-LABEL: @f128_algebraic_sub
#[no_mangle]
pub fn f128_algebraic_sub(a: f128, b: f128) -> f128 {
// CHECK: fsub reassoc nsz arcp contract fp128 %{{.+}}, %{{.+}}
a.algebraic_sub(b)
}
// CHECK-LABEL: @f128_algebraic_mul
#[no_mangle]
pub fn f128_algebraic_mul(a: f128, b: f128) -> f128 {
// CHECK: fmul reassoc nsz arcp contract fp128 %{{.+}}, %{{.+}}
a.algebraic_mul(b)
}
// CHECK-LABEL: @f128_algebraic_div
#[no_mangle]
pub fn f128_algebraic_div(a: f128, b: f128) -> f128 {
// CHECK: fdiv reassoc nsz arcp contract fp128 %{{.+}}, %{{.+}}
a.algebraic_div(b)
}
// CHECK-LABEL: @f128_algebraic_rem
#[no_mangle]
pub fn f128_algebraic_rem(a: f128, b: f128) -> f128 {
// CHECK: frem reassoc nsz arcp contract fp128 %{{.+}}, %{{.+}}
a.algebraic_rem(b)
}

View File

@ -3,7 +3,10 @@
#![crate_type = "lib"]
#![feature(core_intrinsics)]
use std::intrinsics::{fadd_fast, fdiv_fast, fmul_fast, frem_fast, fsub_fast};
use std::intrinsics::{
fadd_algebraic, fadd_fast, fdiv_algebraic, fdiv_fast, fmul_algebraic, fmul_fast,
frem_algebraic, frem_fast, fsub_algebraic, fsub_fast,
};
// CHECK-LABEL: @add
#[no_mangle]
@ -13,30 +16,72 @@ pub fn add(x: f32, y: f32) -> f32 {
x + y
}
// CHECK-LABEL: @addition
// CHECK-LABEL: @test_fadd_algebraic
#[no_mangle]
pub fn addition(x: f32, y: f32) -> f32 {
// CHECK: fadd fast float
pub fn test_fadd_algebraic(x: f32, y: f32) -> f32 {
// CHECK: fadd reassoc nsz arcp contract float %x, %y
fadd_algebraic(x, y)
}
// CHECK-LABEL: @test_fsub_algebraic
#[no_mangle]
pub fn test_fsub_algebraic(x: f32, y: f32) -> f32 {
// CHECK: fsub reassoc nsz arcp contract float %x, %y
fsub_algebraic(x, y)
}
// CHECK-LABEL: @test_fmul_algebraic
#[no_mangle]
pub fn test_fmul_algebraic(x: f32, y: f32) -> f32 {
// CHECK: fmul reassoc nsz arcp contract float %x, %y
fmul_algebraic(x, y)
}
// CHECK-LABEL: @test_fdiv_algebraic
#[no_mangle]
pub fn test_fdiv_algebraic(x: f32, y: f32) -> f32 {
// CHECK: fdiv reassoc nsz arcp contract float %x, %y
fdiv_algebraic(x, y)
}
// CHECK-LABEL: @test_frem_algebraic
#[no_mangle]
pub fn test_frem_algebraic(x: f32, y: f32) -> f32 {
// CHECK: frem reassoc nsz arcp contract float %x, %y
frem_algebraic(x, y)
}
// CHECK-LABEL: @test_fadd_fast
#[no_mangle]
pub fn test_fadd_fast(x: f32, y: f32) -> f32 {
// CHECK: fadd fast float %x, %y
unsafe { fadd_fast(x, y) }
}
// CHECK-LABEL: @subtraction
// CHECK-LABEL: @test_fsub_fast
#[no_mangle]
pub fn subtraction(x: f32, y: f32) -> f32 {
// CHECK: fsub fast float
pub fn test_fsub_fast(x: f32, y: f32) -> f32 {
// CHECK: fsub fast float %x, %y
unsafe { fsub_fast(x, y) }
}
// CHECK-LABEL: @multiplication
// CHECK-LABEL: @test_fmul_fast
#[no_mangle]
pub fn multiplication(x: f32, y: f32) -> f32 {
// CHECK: fmul fast float
pub fn test_fmul_fast(x: f32, y: f32) -> f32 {
// CHECK: fmul fast float %x, %y
unsafe { fmul_fast(x, y) }
}
// CHECK-LABEL: @division
// CHECK-LABEL: @test_fdiv_fast
#[no_mangle]
pub fn division(x: f32, y: f32) -> f32 {
// CHECK: fdiv fast float
pub fn test_fdiv_fast(x: f32, y: f32) -> f32 {
// CHECK: fdiv fast float %x, %y
unsafe { fdiv_fast(x, y) }
}
// CHECK-LABEL: @test_frem_fast
#[no_mangle]
pub fn test_frem_fast(x: f32, y: f32) -> f32 {
// CHECK: frem fast float %x, %y
unsafe { frem_fast(x, y) }
}