Auto merge of #126518 - matthiaskrgr:rollup-wb70rzq, r=matthiaskrgr

Rollup of 9 pull requests

Successful merges:

 - #125829 (rustc_span: Add conveniences for working with span formats)
 - #126361 (Unify intrinsics body handling in StableMIR)
 - #126417 (Add `f16` and `f128` inline ASM support for `x86` and `x86-64`)
 - #126424 ( Also sort `crt-static` in `--print target-features` output)
 - #126428 (Polish `std::path::absolute` documentation.)
 - #126429 (Add `f16` and `f128` const eval for binary and unary operationations)
 - #126448 (End support for Python 3.8 in tidy)
 - #126488 (Use `std::path::absolute` in bootstrap)
 - #126511 (.mailmap: Associate both my work and my private email with me)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2024-06-15 14:51:12 +00:00
commit 92af831290
30 changed files with 719 additions and 512 deletions

View File

@ -379,6 +379,7 @@ Markus Westerlind <marwes91@gmail.com> Markus <marwes91@gmail.com>
Martin Carton <cartonmartin+git@gmail.com>
Martin Habovštiak <martin.habovstiak@gmail.com>
Martin Hafskjold Thoresen <martinhath@gmail.com>
Martin Nordholts <martin.nordholts@codetale.se> <enselic@gmail.com>
Matej Lach <matej.lach@gmail.com> Matej Ľach <matej.lach@gmail.com>
Mateusz Mikuła <mati865@gmail.com>
Mateusz Mikuła <mati865@gmail.com> <mati865@users.noreply.github.com>

View File

@ -959,6 +959,43 @@ fn llvm_fixup_input<'ll, 'tcx>(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
bx.bitcast(value, bx.type_vector(bx.type_i32(), 4))
}
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => {
let value = bx.insert_element(
bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
value,
bx.const_usize(0),
);
bx.bitcast(value, bx.type_vector(bx.type_i16(), 8))
}
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
}
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
@ -1036,6 +1073,39 @@ fn llvm_fixup_output<'ll, 'tcx>(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
bx.bitcast(value, bx.type_f128())
}
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => {
let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
bx.extract_element(value, bx.const_usize(0))
}
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
}
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
@ -1109,6 +1179,36 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
cx.type_vector(cx.type_i32(), 4)
}
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
(
InlineAsmRegClass::X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
cx.type_vector(cx.type_i16(), count)
}
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),

View File

@ -394,10 +394,15 @@ fn print_target_features(out: &mut dyn PrintBackendInfo, sess: &Session, tm: &ll
(*feature, desc)
})
.collect::<Vec<_>>();
// Since we add this at the end ...
rustc_target_features.extend_from_slice(&[(
"crt-static",
"Enables C Run-time Libraries to be statically linked",
)]);
// ... we need to sort the list again.
rustc_target_features.sort();
llvm_target_features.retain(|(f, _d)| !known_llvm_target_features.contains(f));
let max_feature_len = llvm_target_features

View File

@ -357,14 +357,18 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let left = left.to_scalar();
let right = right.to_scalar();
Ok(match fty {
FloatTy::F16 => unimplemented!("f16_f128"),
FloatTy::F16 => {
self.binary_float_op(bin_op, layout, left.to_f16()?, right.to_f16()?)
}
FloatTy::F32 => {
self.binary_float_op(bin_op, layout, left.to_f32()?, right.to_f32()?)
}
FloatTy::F64 => {
self.binary_float_op(bin_op, layout, left.to_f64()?, right.to_f64()?)
}
FloatTy::F128 => unimplemented!("f16_f128"),
FloatTy::F128 => {
self.binary_float_op(bin_op, layout, left.to_f128()?, right.to_f128()?)
}
})
}
_ if left.layout.ty.is_integral() => {
@ -424,11 +428,16 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
ty::Float(fty) => {
let val = val.to_scalar();
if un_op != Neg {
span_bug!(self.cur_span(), "Invalid float op {:?}", un_op);
}
// No NaN adjustment here, `-` is a bitwise operation!
let res = match (un_op, fty) {
(Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
_ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
let res = match fty {
FloatTy::F16 => Scalar::from_f16(-val.to_f16()?),
FloatTy::F32 => Scalar::from_f32(-val.to_f32()?),
FloatTy::F64 => Scalar::from_f64(-val.to_f64()?),
FloatTy::F128 => Scalar::from_f128(-val.to_f128()?),
};
Ok(ImmTy::from_scalar(res, layout))
}

View File

@ -31,7 +31,7 @@ impl MutVisitor for Marker {
// it's some advanced case with macro-generated macros. So if we cache the marked version
// of that context once, we'll typically have a 100% cache hit rate after that.
let Marker(expn_id, transparency, ref mut cache) = *self;
span.update_ctxt(|ctxt| {
*span = span.map_ctxt(|ctxt| {
*cache
.entry(ctxt)
.or_insert_with(|| ctxt.apply_mark(expn_id.to_expn_id(), transparency))

View File

@ -62,8 +62,10 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => Some(InlineAsmType::I64),
ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => Some(InlineAsmType::I128),
ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => Some(asm_ty_isize),
ty::Float(FloatTy::F16) => Some(InlineAsmType::F16),
ty::Float(FloatTy::F32) => Some(InlineAsmType::F32),
ty::Float(FloatTy::F64) => Some(InlineAsmType::F64),
ty::Float(FloatTy::F128) => Some(InlineAsmType::F128),
ty::FnPtr(_) => Some(asm_ty_isize),
ty::RawPtr(ty, _) if self.is_thin_ptr_ty(ty) => Some(asm_ty_isize),
ty::Adt(adt, args) if adt.repr().simd() => {
@ -105,8 +107,10 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
width => bug!("unsupported pointer width: {width}"),
})
}
ty::Float(FloatTy::F16) => Some(InlineAsmType::VecF16(size)),
ty::Float(FloatTy::F32) => Some(InlineAsmType::VecF32(size)),
ty::Float(FloatTy::F64) => Some(InlineAsmType::VecF64(size)),
ty::Float(FloatTy::F128) => Some(InlineAsmType::VecF128(size)),
_ => None,
}
}

View File

@ -69,6 +69,13 @@ impl<Prov: Provenance> fmt::LowerHex for Scalar<Prov> {
}
}
impl<Prov> From<Half> for Scalar<Prov> {
#[inline(always)]
fn from(f: Half) -> Self {
Scalar::from_f16(f)
}
}
impl<Prov> From<Single> for Scalar<Prov> {
#[inline(always)]
fn from(f: Single) -> Self {
@ -83,6 +90,13 @@ impl<Prov> From<Double> for Scalar<Prov> {
}
}
impl<Prov> From<Quad> for Scalar<Prov> {
#[inline(always)]
fn from(f: Quad) -> Self {
Scalar::from_f128(f)
}
}
impl<Prov> From<ScalarInt> for Scalar<Prov> {
#[inline(always)]
fn from(ptr: ScalarInt) -> Self {

View File

@ -64,9 +64,10 @@ impl<'tcx> Context for TablesWrapper<'tcx> {
}
fn has_body(&self, def: DefId) -> bool {
let tables = self.0.borrow();
let def_id = tables[def];
tables.tcx.is_mir_available(def_id)
let mut tables = self.0.borrow_mut();
let tcx = tables.tcx;
let def_id = def.internal(&mut *tables, tcx);
tables.item_has_body(def_id)
}
fn foreign_modules(&self, crate_num: CrateNum) -> Vec<stable_mir::ty::ForeignModuleDef> {
@ -323,13 +324,6 @@ impl<'tcx> Context for TablesWrapper<'tcx> {
tcx.intrinsic(def_id).unwrap().name.to_string()
}
fn intrinsic_must_be_overridden(&self, def: IntrinsicDef) -> bool {
let mut tables = self.0.borrow_mut();
let tcx = tables.tcx;
let def_id = def.0.internal(&mut *tables, tcx);
tcx.intrinsic_raw(def_id).unwrap().must_be_overridden
}
fn closure_sig(&self, args: &GenericArgs) -> PolyFnSig {
let mut tables = self.0.borrow_mut();
let tcx = tables.tcx;
@ -516,7 +510,7 @@ impl<'tcx> Context for TablesWrapper<'tcx> {
let mut tables = self.0.borrow_mut();
let instance = tables.instances[def];
tables
.has_body(instance)
.instance_has_body(instance)
.then(|| BodyBuilder::new(tables.tcx, instance).build(&mut *tables))
}

View File

@ -51,9 +51,13 @@ impl<'tcx> Tables<'tcx> {
self.mir_consts.create_or_fetch(constant)
}
pub(crate) fn has_body(&self, instance: Instance<'tcx>) -> bool {
/// Return whether the instance as a body available.
///
/// Items and intrinsics may have a body available from its definition.
/// Shims body may be generated depending on their type.
pub(crate) fn instance_has_body(&self, instance: Instance<'tcx>) -> bool {
let def_id = instance.def_id();
self.tcx.is_mir_available(def_id)
self.item_has_body(def_id)
|| !matches!(
instance.def,
ty::InstanceDef::Virtual(..)
@ -61,6 +65,19 @@ impl<'tcx> Tables<'tcx> {
| ty::InstanceDef::Item(..)
)
}
/// Return whether the item has a body defined by the user.
///
/// Note that intrinsics may have a placeholder body that shouldn't be used in practice.
/// In StableMIR, we handle this case as if the body is not available.
pub(crate) fn item_has_body(&self, def_id: DefId) -> bool {
let must_override = if let Some(intrinsic) = self.tcx.intrinsic(def_id) {
intrinsic.must_be_overridden
} else {
false
};
!must_override && self.tcx.is_mir_available(def_id)
}
}
/// Build a stable mir crate from a given crate number.

View File

@ -520,6 +520,7 @@ impl SpanData {
pub fn with_hi(&self, hi: BytePos) -> Span {
Span::new(self.lo, hi, self.ctxt, self.parent)
}
/// Avoid if possible, `Span::map_ctxt` should be preferred.
#[inline]
fn with_ctxt(&self, ctxt: SyntaxContext) -> Span {
Span::new(self.lo, self.hi, ctxt, self.parent)
@ -576,9 +577,8 @@ impl Span {
self.data().with_hi(hi)
}
#[inline]
pub fn with_ctxt(mut self, ctxt: SyntaxContext) -> Span {
self.update_ctxt(|_| ctxt);
self
pub fn with_ctxt(self, ctxt: SyntaxContext) -> Span {
self.map_ctxt(|_| ctxt)
}
#[inline]
pub fn parent(self) -> Option<LocalDefId> {
@ -1059,9 +1059,8 @@ impl Span {
}
#[inline]
pub fn apply_mark(mut self, expn_id: ExpnId, transparency: Transparency) -> Span {
self.update_ctxt(|ctxt| ctxt.apply_mark(expn_id, transparency));
self
pub fn apply_mark(self, expn_id: ExpnId, transparency: Transparency) -> Span {
self.map_ctxt(|ctxt| ctxt.apply_mark(expn_id, transparency))
}
#[inline]
@ -1109,15 +1108,13 @@ impl Span {
}
#[inline]
pub fn normalize_to_macros_2_0(mut self) -> Span {
self.update_ctxt(|ctxt| ctxt.normalize_to_macros_2_0());
self
pub fn normalize_to_macros_2_0(self) -> Span {
self.map_ctxt(|ctxt| ctxt.normalize_to_macros_2_0())
}
#[inline]
pub fn normalize_to_macro_rules(mut self) -> Span {
self.update_ctxt(|ctxt| ctxt.normalize_to_macro_rules());
self
pub fn normalize_to_macro_rules(self) -> Span {
self.map_ctxt(|ctxt| ctxt.normalize_to_macro_rules())
}
}

View File

@ -87,43 +87,150 @@ pub struct Span {
ctxt_or_parent_or_marker: u16,
}
impl Span {
// Convenience structures for all span formats.
#[derive(Clone, Copy)]
struct InlineCtxt {
lo: u32,
len: u16,
ctxt: u16,
}
#[derive(Clone, Copy)]
struct InlineParent {
lo: u32,
len_with_tag: u16,
parent: u16,
}
#[derive(Clone, Copy)]
struct PartiallyInterned {
index: u32,
ctxt: u16,
}
#[derive(Clone, Copy)]
struct Interned {
index: u32,
}
impl InlineCtxt {
#[inline]
fn data_inline_ctxt(self) -> SpanData {
let len = self.len_with_tag_or_marker as u32;
fn data(self) -> SpanData {
let len = self.len as u32;
debug_assert!(len <= MAX_LEN);
SpanData {
lo: BytePos(self.lo_or_index),
hi: BytePos(self.lo_or_index.debug_strict_add(len)),
ctxt: SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32),
lo: BytePos(self.lo),
hi: BytePos(self.lo.debug_strict_add(len)),
ctxt: SyntaxContext::from_u32(self.ctxt as u32),
parent: None,
}
}
#[inline]
fn data_inline_parent(self) -> SpanData {
let len = (self.len_with_tag_or_marker & !PARENT_TAG) as u32;
fn span(lo: u32, len: u16, ctxt: u16) -> Span {
Span { lo_or_index: lo, len_with_tag_or_marker: len, ctxt_or_parent_or_marker: ctxt }
}
#[inline]
fn from_span(span: Span) -> InlineCtxt {
let (lo, len, ctxt) =
(span.lo_or_index, span.len_with_tag_or_marker, span.ctxt_or_parent_or_marker);
InlineCtxt { lo, len, ctxt }
}
}
impl InlineParent {
#[inline]
fn data(self) -> SpanData {
let len = (self.len_with_tag & !PARENT_TAG) as u32;
debug_assert!(len <= MAX_LEN);
let parent = LocalDefId {
local_def_index: DefIndex::from_u32(self.ctxt_or_parent_or_marker as u32),
};
SpanData {
lo: BytePos(self.lo_or_index),
hi: BytePos(self.lo_or_index.debug_strict_add(len)),
lo: BytePos(self.lo),
hi: BytePos(self.lo.debug_strict_add(len)),
ctxt: SyntaxContext::root(),
parent: Some(parent),
parent: Some(LocalDefId { local_def_index: DefIndex::from_u32(self.parent as u32) }),
}
}
#[inline]
fn data_partially_interned(self) -> SpanData {
fn span(lo: u32, len: u16, parent: u16) -> Span {
let (lo_or_index, len_with_tag_or_marker, ctxt_or_parent_or_marker) =
(lo, PARENT_TAG | len, parent);
Span { lo_or_index, len_with_tag_or_marker, ctxt_or_parent_or_marker }
}
#[inline]
fn from_span(span: Span) -> InlineParent {
let (lo, len_with_tag, parent) =
(span.lo_or_index, span.len_with_tag_or_marker, span.ctxt_or_parent_or_marker);
InlineParent { lo, len_with_tag, parent }
}
}
impl PartiallyInterned {
#[inline]
fn data(self) -> SpanData {
SpanData {
ctxt: SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32),
..with_span_interner(|interner| interner.spans[self.lo_or_index as usize])
ctxt: SyntaxContext::from_u32(self.ctxt as u32),
..with_span_interner(|interner| interner.spans[self.index as usize])
}
}
#[inline]
fn data_interned(self) -> SpanData {
with_span_interner(|interner| interner.spans[self.lo_or_index as usize])
fn span(index: u32, ctxt: u16) -> Span {
let (lo_or_index, len_with_tag_or_marker, ctxt_or_parent_or_marker) =
(index, BASE_LEN_INTERNED_MARKER, ctxt);
Span { lo_or_index, len_with_tag_or_marker, ctxt_or_parent_or_marker }
}
#[inline]
fn from_span(span: Span) -> PartiallyInterned {
PartiallyInterned { index: span.lo_or_index, ctxt: span.ctxt_or_parent_or_marker }
}
}
impl Interned {
#[inline]
fn data(self) -> SpanData {
with_span_interner(|interner| interner.spans[self.index as usize])
}
#[inline]
fn span(index: u32) -> Span {
let (lo_or_index, len_with_tag_or_marker, ctxt_or_parent_or_marker) =
(index, BASE_LEN_INTERNED_MARKER, CTXT_INTERNED_MARKER);
Span { lo_or_index, len_with_tag_or_marker, ctxt_or_parent_or_marker }
}
#[inline]
fn from_span(span: Span) -> Interned {
Interned { index: span.lo_or_index }
}
}
// This code is very hot, and converting span to an enum and matching on it doesn't optimize away
// properly. So we are using a macro emulating such a match, but expand it directly to an if-else
// chain.
macro_rules! match_span_kind {
(
$span:expr,
InlineCtxt($span1:ident) => $arm1:expr,
InlineParent($span2:ident) => $arm2:expr,
PartiallyInterned($span3:ident) => $arm3:expr,
Interned($span4:ident) => $arm4:expr,
) => {
if $span.len_with_tag_or_marker != BASE_LEN_INTERNED_MARKER {
if $span.len_with_tag_or_marker & PARENT_TAG == 0 {
// Inline-context format.
let $span1 = InlineCtxt::from_span($span);
$arm1
} else {
// Inline-parent format.
let $span2 = InlineParent::from_span($span);
$arm2
}
} else if $span.ctxt_or_parent_or_marker != CTXT_INTERNED_MARKER {
// Partially-interned format.
let $span3 = PartiallyInterned::from_span($span);
$arm3
} else {
// Interned format.
let $span4 = Interned::from_span($span);
$arm4
}
};
}
// `MAX_LEN` is chosen so that `PARENT_TAG | MAX_LEN` is distinct from
@ -154,23 +261,13 @@ impl Span {
let (len, ctxt32) = (hi.0 - lo.0, ctxt.as_u32());
if len <= MAX_LEN {
if ctxt32 <= MAX_CTXT && parent.is_none() {
// Inline-context format.
return Span {
lo_or_index: lo.0,
len_with_tag_or_marker: len as u16,
ctxt_or_parent_or_marker: ctxt32 as u16,
};
return InlineCtxt::span(lo.0, len as u16, ctxt32 as u16);
} else if ctxt32 == 0
&& let Some(parent) = parent
&& let parent32 = parent.local_def_index.as_u32()
&& parent32 <= MAX_CTXT
{
// Inline-parent format.
return Span {
lo_or_index: lo.0,
len_with_tag_or_marker: PARENT_TAG | len as u16,
ctxt_or_parent_or_marker: parent32 as u16,
};
return InlineParent::span(lo.0, len as u16, parent32 as u16);
}
}
@ -179,20 +276,10 @@ impl Span {
with_span_interner(|interner| interner.intern(&SpanData { lo, hi, ctxt, parent }))
};
if ctxt32 <= MAX_CTXT {
// Partially-interned format.
Span {
// Interned ctxt should never be read, so it can use any value.
lo_or_index: index(SyntaxContext::from_u32(u32::MAX)),
len_with_tag_or_marker: BASE_LEN_INTERNED_MARKER,
ctxt_or_parent_or_marker: ctxt32 as u16,
}
// Interned ctxt should never be read, so it can use any value.
PartiallyInterned::span(index(SyntaxContext::from_u32(u32::MAX)), ctxt32 as u16)
} else {
// Interned format.
Span {
lo_or_index: index(ctxt),
len_with_tag_or_marker: BASE_LEN_INTERNED_MARKER,
ctxt_or_parent_or_marker: CTXT_INTERNED_MARKER,
}
Interned::span(index(ctxt))
}
}
@ -209,20 +296,12 @@ impl Span {
/// This function must not be used outside the incremental engine.
#[inline]
pub fn data_untracked(self) -> SpanData {
if self.len_with_tag_or_marker != BASE_LEN_INTERNED_MARKER {
if self.len_with_tag_or_marker & PARENT_TAG == 0 {
// Inline-context format.
self.data_inline_ctxt()
} else {
// Inline-parent format.
self.data_inline_parent()
}
} else if self.ctxt_or_parent_or_marker != CTXT_INTERNED_MARKER {
// Partially-interned format.
self.data_partially_interned()
} else {
// Interned format.
self.data_interned()
match_span_kind! {
self,
InlineCtxt(span) => span.data(),
InlineParent(span) => span.data(),
PartiallyInterned(span) => span.data(),
Interned(span) => span.data(),
}
}
@ -247,68 +326,57 @@ impl Span {
// update doesn't change format. All non-inline or format changing scenarios require accessing
// interner and can fall back to `Span::new`.
#[inline]
pub fn update_ctxt(&mut self, update: impl FnOnce(SyntaxContext) -> SyntaxContext) {
pub fn map_ctxt(self, update: impl FnOnce(SyntaxContext) -> SyntaxContext) -> Span {
let (updated_ctxt32, data);
if self.len_with_tag_or_marker != BASE_LEN_INTERNED_MARKER {
if self.len_with_tag_or_marker & PARENT_TAG == 0 {
// Inline-context format.
match_span_kind! {
self,
InlineCtxt(span) => {
updated_ctxt32 =
update(SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32)).as_u32();
update(SyntaxContext::from_u32(span.ctxt as u32)).as_u32();
// Any small new context including zero will preserve the format.
if updated_ctxt32 <= MAX_CTXT {
self.ctxt_or_parent_or_marker = updated_ctxt32 as u16;
return;
return InlineCtxt::span(span.lo, span.len, updated_ctxt32 as u16);
}
data = self.data_inline_ctxt();
} else {
// Inline-parent format.
data = span.data();
},
InlineParent(span) => {
updated_ctxt32 = update(SyntaxContext::root()).as_u32();
// Only if the new context is zero the format will be preserved.
if updated_ctxt32 == 0 {
// Do nothing.
return;
return self;
}
data = self.data_inline_parent();
}
} else if self.ctxt_or_parent_or_marker != CTXT_INTERNED_MARKER {
// Partially-interned format.
updated_ctxt32 =
update(SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32)).as_u32();
// Any small new context excluding zero will preserve the format.
// Zero may change the format to `InlineParent` if parent and len are small enough.
if updated_ctxt32 <= MAX_CTXT && updated_ctxt32 != 0 {
self.ctxt_or_parent_or_marker = updated_ctxt32 as u16;
return;
}
data = self.data_partially_interned();
} else {
// Interned format.
data = self.data_interned();
updated_ctxt32 = update(data.ctxt).as_u32();
data = span.data();
},
PartiallyInterned(span) => {
updated_ctxt32 = update(SyntaxContext::from_u32(span.ctxt as u32)).as_u32();
// Any small new context excluding zero will preserve the format.
// Zero may change the format to `InlineParent` if parent and len are small enough.
if updated_ctxt32 <= MAX_CTXT && updated_ctxt32 != 0 {
return PartiallyInterned::span(span.index, updated_ctxt32 as u16);
}
data = span.data();
},
Interned(span) => {
data = span.data();
updated_ctxt32 = update(data.ctxt).as_u32();
},
}
// We could not keep the span in the same inline format, fall back to the complete logic.
*self = data.with_ctxt(SyntaxContext::from_u32(updated_ctxt32));
data.with_ctxt(SyntaxContext::from_u32(updated_ctxt32))
}
// Returns either syntactic context, if it can be retrieved without taking the interner lock,
// or an index into the interner if it cannot.
#[inline]
fn inline_ctxt(self) -> Result<SyntaxContext, usize> {
if self.len_with_tag_or_marker != BASE_LEN_INTERNED_MARKER {
if self.len_with_tag_or_marker & PARENT_TAG == 0 {
// Inline-context format.
Ok(SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32))
} else {
// Inline-parent format.
Ok(SyntaxContext::root())
}
} else if self.ctxt_or_parent_or_marker != CTXT_INTERNED_MARKER {
// Partially-interned format.
Ok(SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32))
} else {
// Interned format.
Err(self.lo_or_index as usize)
match_span_kind! {
self,
InlineCtxt(span) => Ok(SyntaxContext::from_u32(span.ctxt as u32)),
InlineParent(_span) => Ok(SyntaxContext::root()),
PartiallyInterned(span) => Ok(SyntaxContext::from_u32(span.ctxt as u32)),
Interned(span) => Err(span.index as usize),
}
}

View File

@ -707,15 +707,19 @@ pub enum InlineAsmType {
I32,
I64,
I128,
F16,
F32,
F64,
F128,
VecI8(u64),
VecI16(u64),
VecI32(u64),
VecI64(u64),
VecI128(u64),
VecF16(u64),
VecF32(u64),
VecF64(u64),
VecF128(u64),
}
impl InlineAsmType {
@ -730,15 +734,19 @@ impl InlineAsmType {
Self::I32 => 4,
Self::I64 => 8,
Self::I128 => 16,
Self::F16 => 2,
Self::F32 => 4,
Self::F64 => 8,
Self::F128 => 16,
Self::VecI8(n) => n * 1,
Self::VecI16(n) => n * 2,
Self::VecI32(n) => n * 4,
Self::VecI64(n) => n * 8,
Self::VecI128(n) => n * 16,
Self::VecF16(n) => n * 2,
Self::VecF32(n) => n * 4,
Self::VecF64(n) => n * 8,
Self::VecF128(n) => n * 16,
})
}
}
@ -751,15 +759,19 @@ impl fmt::Display for InlineAsmType {
Self::I32 => f.write_str("i32"),
Self::I64 => f.write_str("i64"),
Self::I128 => f.write_str("i128"),
Self::F16 => f.write_str("f16"),
Self::F32 => f.write_str("f32"),
Self::F64 => f.write_str("f64"),
Self::F128 => f.write_str("f128"),
Self::VecI8(n) => write!(f, "i8x{n}"),
Self::VecI16(n) => write!(f, "i16x{n}"),
Self::VecI32(n) => write!(f, "i32x{n}"),
Self::VecI64(n) => write!(f, "i64x{n}"),
Self::VecI128(n) => write!(f, "i128x{n}"),
Self::VecF16(n) => write!(f, "f16x{n}"),
Self::VecF32(n) => write!(f, "f32x{n}"),
Self::VecF64(n) => write!(f, "f64x{n}"),
Self::VecF128(n) => write!(f, "f128x{n}"),
}
}
}

View File

@ -107,26 +107,26 @@ impl X86InlineAsmRegClass {
match self {
Self::reg | Self::reg_abcd => {
if arch == InlineAsmArch::X86_64 {
types! { _: I16, I32, I64, F32, F64; }
types! { _: I16, I32, I64, F16, F32, F64; }
} else {
types! { _: I16, I32, F32; }
types! { _: I16, I32, F16, F32; }
}
}
Self::reg_byte => types! { _: I8; },
Self::xmm_reg => types! {
sse: I32, I64, F32, F64,
VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2);
sse: I32, I64, F16, F32, F64, F128,
VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2);
},
Self::ymm_reg => types! {
avx: I32, I64, F32, F64,
VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2),
VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF32(8), VecF64(4);
avx: I32, I64, F16, F32, F64, F128,
VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2),
VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF16(16), VecF32(8), VecF64(4);
},
Self::zmm_reg => types! {
avx512f: I32, I64, F32, F64,
VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2),
VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF32(8), VecF64(4),
VecI8(64), VecI16(32), VecI32(16), VecI64(8), VecF32(16), VecF64(8);
avx512f: I32, I64, F16, F32, F64, F128,
VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2),
VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF16(16), VecF32(8), VecF64(4),
VecI8(64), VecI16(32), VecI32(16), VecI64(8), VecF16(32), VecF32(16), VecF64(8);
},
Self::kreg => types! {
avx512f: I8, I16;

View File

@ -94,10 +94,6 @@ pub trait Context {
/// Retrieve the plain function name of an intrinsic.
fn intrinsic_name(&self, def: IntrinsicDef) -> Symbol;
/// Returns whether the intrinsic has no meaningful body and all backends
/// need to shim all calls to it.
fn intrinsic_must_be_overridden(&self, def: IntrinsicDef) -> bool;
/// Retrieve the closure signature for the given generic arguments.
fn closure_sig(&self, args: &GenericArgs) -> PolyFnSig;

View File

@ -668,6 +668,11 @@ impl FnDef {
with(|ctx| ctx.has_body(self.0).then(|| ctx.mir_body(self.0)))
}
// Check if the function body is available.
pub fn has_body(&self) -> bool {
with(|ctx| ctx.has_body(self.0))
}
/// Get the information of the intrinsic if this function is a definition of one.
pub fn as_intrinsic(&self) -> Option<IntrinsicDef> {
with(|cx| cx.intrinsic(self.def_id()))
@ -700,7 +705,7 @@ impl IntrinsicDef {
/// Returns whether the intrinsic has no meaningful body and all backends
/// need to shim all calls to it.
pub fn must_be_overridden(&self) -> bool {
with(|cx| cx.intrinsic_must_be_overridden(*self))
with(|cx| !cx.has_body(self.0))
}
}

View File

@ -2302,7 +2302,7 @@ pub fn read_link<P: AsRef<Path>>(path: P) -> io::Result<PathBuf> {
///
/// This function currently corresponds to the `realpath` function on Unix
/// and the `CreateFile` and `GetFinalPathNameByHandle` functions on Windows.
/// Note that, this [may change in the future][changes].
/// Note that this [may change in the future][changes].
///
/// On Windows, this converts the path to use [extended length path][path]
/// syntax, which allows your program to use longer path names, but means you

View File

@ -3345,14 +3345,33 @@ impl Error for StripPrefixError {
/// Makes the path absolute without accessing the filesystem.
///
/// If the path is relative, the current directory is used as the base directory.
/// All intermediate components will be resolved according to platforms-specific
/// rules but unlike [`canonicalize`][crate::fs::canonicalize] this does not
/// All intermediate components will be resolved according to platform-specific
/// rules, but unlike [`canonicalize`][crate::fs::canonicalize], this does not
/// resolve symlinks and may succeed even if the path does not exist.
///
/// If the `path` is empty or getting the
/// [current directory][crate::env::current_dir] fails then an error will be
/// [current directory][crate::env::current_dir] fails, then an error will be
/// returned.
///
/// # Platform-specific behavior
///
/// On POSIX platforms, the path is resolved using [POSIX semantics][posix-semantics],
/// except that it stops short of resolving symlinks. This means it will keep `..`
/// components and trailing slashes.
///
/// On Windows, for verbatim paths, this will simply return the path as given. For other
/// paths, this is currently equivalent to calling
/// [`GetFullPathNameW`][windows-path].
///
/// Note that these [may change in the future][changes].
///
/// # Errors
///
/// This function may return an error in the following situations:
///
/// * If `path` is syntactically invalid; in particular, if it is empty.
/// * If getting the [current directory][crate::env::current_dir] fails.
///
/// # Examples
///
/// ## POSIX paths
@ -3360,50 +3379,42 @@ impl Error for StripPrefixError {
/// ```
/// # #[cfg(unix)]
/// fn main() -> std::io::Result<()> {
/// use std::path::{self, Path};
/// use std::path::{self, Path};
///
/// // Relative to absolute
/// let absolute = path::absolute("foo/./bar")?;
/// assert!(absolute.ends_with("foo/bar"));
/// // Relative to absolute
/// let absolute = path::absolute("foo/./bar")?;
/// assert!(absolute.ends_with("foo/bar"));
///
/// // Absolute to absolute
/// let absolute = path::absolute("/foo//test/.././bar.rs")?;
/// assert_eq!(absolute, Path::new("/foo/test/../bar.rs"));
/// Ok(())
/// // Absolute to absolute
/// let absolute = path::absolute("/foo//test/.././bar.rs")?;
/// assert_eq!(absolute, Path::new("/foo/test/../bar.rs"));
/// Ok(())
/// }
/// # #[cfg(not(unix))]
/// # fn main() {}
/// ```
///
/// The path is resolved using [POSIX semantics][posix-semantics] except that
/// it stops short of resolving symlinks. This means it will keep `..`
/// components and trailing slashes.
///
/// ## Windows paths
///
/// ```
/// # #[cfg(windows)]
/// fn main() -> std::io::Result<()> {
/// use std::path::{self, Path};
/// use std::path::{self, Path};
///
/// // Relative to absolute
/// let absolute = path::absolute("foo/./bar")?;
/// assert!(absolute.ends_with(r"foo\bar"));
/// // Relative to absolute
/// let absolute = path::absolute("foo/./bar")?;
/// assert!(absolute.ends_with(r"foo\bar"));
///
/// // Absolute to absolute
/// let absolute = path::absolute(r"C:\foo//test\..\./bar.rs")?;
/// // Absolute to absolute
/// let absolute = path::absolute(r"C:\foo//test\..\./bar.rs")?;
///
/// assert_eq!(absolute, Path::new(r"C:\foo\bar.rs"));
/// Ok(())
/// assert_eq!(absolute, Path::new(r"C:\foo\bar.rs"));
/// Ok(())
/// }
/// # #[cfg(not(windows))]
/// # fn main() {}
/// ```
///
/// For verbatim paths this will simply return the path as given. For other
/// paths this is currently equivalent to calling
/// [`GetFullPathNameW`][windows-path].
///
/// Note that this [may change in the future][changes].
///
/// [changes]: io#platform-specific-behavior

View File

@ -10,7 +10,7 @@ use std::env;
use std::fmt::{self, Display};
use std::fs;
use std::io::IsTerminal;
use std::path::{Path, PathBuf};
use std::path::{absolute, Path, PathBuf};
use std::process::Command;
use std::str::FromStr;
use std::sync::OnceLock;
@ -1437,7 +1437,7 @@ impl Config {
// To avoid writing to random places on the file system, `config.out` needs to be an absolute path.
if !config.out.is_absolute() {
// `canonicalize` requires the path to already exist. Use our vendored copy of `absolute` instead.
config.out = crate::utils::helpers::absolute(&config.out);
config.out = absolute(&config.out).expect("can't make empty path absolute");
}
config.initial_rustc = if let Some(rustc) = rustc {

View File

@ -331,115 +331,6 @@ fn dir_up_to_date(src: &Path, threshold: SystemTime) -> bool {
})
}
/// Copied from `std::path::absolute` until it stabilizes.
///
/// FIXME: this shouldn't exist.
pub(crate) fn absolute(path: &Path) -> PathBuf {
if path.as_os_str().is_empty() {
panic!("can't make empty path absolute");
}
#[cfg(unix)]
{
t!(absolute_unix(path), format!("could not make path absolute: {}", path.display()))
}
#[cfg(windows)]
{
t!(absolute_windows(path), format!("could not make path absolute: {}", path.display()))
}
#[cfg(not(any(unix, windows)))]
{
println!("WARNING: bootstrap is not supported on non-unix platforms");
t!(std::fs::canonicalize(t!(std::env::current_dir()))).join(path)
}
}
#[cfg(unix)]
/// Make a POSIX path absolute without changing its semantics.
fn absolute_unix(path: &Path) -> io::Result<PathBuf> {
// This is mostly a wrapper around collecting `Path::components`, with
// exceptions made where this conflicts with the POSIX specification.
// See 4.13 Pathname Resolution, IEEE Std 1003.1-2017
// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13
use std::os::unix::prelude::OsStrExt;
let mut components = path.components();
let path_os = path.as_os_str().as_bytes();
let mut normalized = if path.is_absolute() {
// "If a pathname begins with two successive <slash> characters, the
// first component following the leading <slash> characters may be
// interpreted in an implementation-defined manner, although more than
// two leading <slash> characters shall be treated as a single <slash>
// character."
if path_os.starts_with(b"//") && !path_os.starts_with(b"///") {
components.next();
PathBuf::from("//")
} else {
PathBuf::new()
}
} else {
env::current_dir()?
};
normalized.extend(components);
// "Interfaces using pathname resolution may specify additional constraints
// when a pathname that does not name an existing directory contains at
// least one non- <slash> character and contains one or more trailing
// <slash> characters".
// A trailing <slash> is also meaningful if "a symbolic link is
// encountered during pathname resolution".
if path_os.ends_with(b"/") {
normalized.push("");
}
Ok(normalized)
}
#[cfg(windows)]
fn absolute_windows(path: &std::path::Path) -> std::io::Result<std::path::PathBuf> {
use std::ffi::OsString;
use std::io::Error;
use std::os::windows::ffi::{OsStrExt, OsStringExt};
use std::ptr::null_mut;
#[link(name = "kernel32")]
extern "system" {
fn GetFullPathNameW(
lpFileName: *const u16,
nBufferLength: u32,
lpBuffer: *mut u16,
lpFilePart: *mut *const u16,
) -> u32;
}
unsafe {
// encode the path as UTF-16
let path: Vec<u16> = path.as_os_str().encode_wide().chain([0]).collect();
let mut buffer = Vec::new();
// Loop until either success or failure.
loop {
// Try to get the absolute path
let len = GetFullPathNameW(
path.as_ptr(),
buffer.len().try_into().unwrap(),
buffer.as_mut_ptr(),
null_mut(),
);
match len as usize {
// Failure
0 => return Err(Error::last_os_error()),
// Buffer is too small, resize.
len if len > buffer.len() => buffer.resize(len, 0),
// Success!
len => {
buffer.truncate(len);
return Ok(OsString::from_wide(&buffer).into());
}
}
}
}
}
/// Adapted from <https://github.com/llvm/llvm-project/blob/782e91224601e461c019e0a4573bbccc6094fbcd/llvm/cmake/modules/HandleLLVMOptions.cmake#L1058-L1079>
///
/// When `clang-cl` is used with instrumentation, we need to add clang's runtime library resource

View File

@ -25,27 +25,6 @@ fn test_make() {
}
}
#[cfg(unix)]
#[test]
fn test_absolute_unix() {
use crate::utils::helpers::absolute_unix;
// Test an absolute path
let path = PathBuf::from("/home/user/file.txt");
assert_eq!(absolute_unix(&path).unwrap(), PathBuf::from("/home/user/file.txt"));
// Test an absolute path with double leading slashes
let path = PathBuf::from("//root//file.txt");
assert_eq!(absolute_unix(&path).unwrap(), PathBuf::from("//root/file.txt"));
// Test a relative path
let path = PathBuf::from("relative/path");
assert_eq!(
absolute_unix(&path).unwrap(),
std::env::current_dir().unwrap().join("relative/path")
);
}
#[test]
fn test_beta_rev_parsing() {
// single digit revision

View File

@ -7,7 +7,7 @@ use std::time::Duration;
use rand::RngCore;
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::ieee::{Double, Half, Quad, Single};
use rustc_apfloat::Float;
use rustc_hir::{
def::{DefKind, Namespace},
@ -1201,12 +1201,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
};
let (val, status) = match fty {
FloatTy::F16 => unimplemented!("f16_f128"),
FloatTy::F16 =>
float_to_int_inner::<Half>(this, src.to_scalar().to_f16()?, cast_to, round),
FloatTy::F32 =>
float_to_int_inner::<Single>(this, src.to_scalar().to_f32()?, cast_to, round),
FloatTy::F64 =>
float_to_int_inner::<Double>(this, src.to_scalar().to_f64()?, cast_to, round),
FloatTy::F128 => unimplemented!("f16_f128"),
FloatTy::F128 =>
float_to_int_inner::<Quad>(this, src.to_scalar().to_f128()?, cast_to, round),
};
if status.intersects(

View File

@ -1,6 +1,8 @@
#![feature(stmt_expr_attributes)]
#![feature(float_gamma)]
#![feature(core_intrinsics)]
#![feature(f128)]
#![feature(f16)]
#![allow(arithmetic_overflow)]
use std::fmt::Debug;
@ -41,103 +43,23 @@ trait FloatToInt<Int>: Copy {
unsafe fn cast_unchecked(self) -> Int;
}
impl FloatToInt<i8> for f32 {
fn cast(self) -> i8 {
self as _
}
unsafe fn cast_unchecked(self) -> i8 {
self.to_int_unchecked()
}
}
impl FloatToInt<i32> for f32 {
fn cast(self) -> i32 {
self as _
}
unsafe fn cast_unchecked(self) -> i32 {
self.to_int_unchecked()
}
}
impl FloatToInt<u32> for f32 {
fn cast(self) -> u32 {
self as _
}
unsafe fn cast_unchecked(self) -> u32 {
self.to_int_unchecked()
}
}
impl FloatToInt<i64> for f32 {
fn cast(self) -> i64 {
self as _
}
unsafe fn cast_unchecked(self) -> i64 {
self.to_int_unchecked()
}
}
impl FloatToInt<u64> for f32 {
fn cast(self) -> u64 {
self as _
}
unsafe fn cast_unchecked(self) -> u64 {
self.to_int_unchecked()
}
macro_rules! float_to_int {
($fty:ty => $($ity:ty),+ $(,)?) => {
$(
impl FloatToInt<$ity> for $fty {
fn cast(self) -> $ity {
self as _
}
unsafe fn cast_unchecked(self) -> $ity {
self.to_int_unchecked()
}
}
)*
};
}
impl FloatToInt<i8> for f64 {
fn cast(self) -> i8 {
self as _
}
unsafe fn cast_unchecked(self) -> i8 {
self.to_int_unchecked()
}
}
impl FloatToInt<i32> for f64 {
fn cast(self) -> i32 {
self as _
}
unsafe fn cast_unchecked(self) -> i32 {
self.to_int_unchecked()
}
}
impl FloatToInt<u32> for f64 {
fn cast(self) -> u32 {
self as _
}
unsafe fn cast_unchecked(self) -> u32 {
self.to_int_unchecked()
}
}
impl FloatToInt<i64> for f64 {
fn cast(self) -> i64 {
self as _
}
unsafe fn cast_unchecked(self) -> i64 {
self.to_int_unchecked()
}
}
impl FloatToInt<u64> for f64 {
fn cast(self) -> u64 {
self as _
}
unsafe fn cast_unchecked(self) -> u64 {
self.to_int_unchecked()
}
}
impl FloatToInt<i128> for f64 {
fn cast(self) -> i128 {
self as _
}
unsafe fn cast_unchecked(self) -> i128 {
self.to_int_unchecked()
}
}
impl FloatToInt<u128> for f64 {
fn cast(self) -> u128 {
self as _
}
unsafe fn cast_unchecked(self) -> u128 {
self.to_int_unchecked()
}
}
float_to_int!(f32 => i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
float_to_int!(f64 => i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
/// Test this cast both via `as` and via `approx_unchecked` (i.e., it must not saturate).
#[track_caller]
@ -153,18 +75,29 @@ where
fn basic() {
// basic arithmetic
assert_eq(6.0_f16 * 6.0_f16, 36.0_f16);
assert_eq(6.0_f32 * 6.0_f32, 36.0_f32);
assert_eq(6.0_f64 * 6.0_f64, 36.0_f64);
assert_eq(6.0_f128 * 6.0_f128, 36.0_f128);
assert_eq(-{ 5.0_f16 }, -5.0_f16);
assert_eq(-{ 5.0_f32 }, -5.0_f32);
assert_eq(-{ 5.0_f64 }, -5.0_f64);
assert_eq(-{ 5.0_f128 }, -5.0_f128);
// infinities, NaN
// FIXME(f16_f128): add when constants and `is_infinite` are available
assert!((5.0_f32 / 0.0).is_infinite());
assert_ne!({ 5.0_f32 / 0.0 }, { -5.0_f32 / 0.0 });
assert!((5.0_f64 / 0.0).is_infinite());
assert_ne!({ 5.0_f64 / 0.0 }, { 5.0_f64 / -0.0 });
assert_ne!(f32::NAN, f32::NAN);
assert_ne!(f64::NAN, f64::NAN);
// negative zero
let posz = 0.0f16;
let negz = -0.0f16;
assert_eq(posz, negz);
assert_ne!(posz.to_bits(), negz.to_bits());
let posz = 0.0f32;
let negz = -0.0f32;
assert_eq(posz, negz);
@ -173,15 +106,30 @@ fn basic() {
let negz = -0.0f64;
assert_eq(posz, negz);
assert_ne!(posz.to_bits(), negz.to_bits());
let posz = 0.0f128;
let negz = -0.0f128;
assert_eq(posz, negz);
assert_ne!(posz.to_bits(), negz.to_bits());
// byte-level transmute
let x: u64 = unsafe { std::mem::transmute(42.0_f64) };
let y: f64 = unsafe { std::mem::transmute(x) };
assert_eq(y, 42.0_f64);
let x: u16 = unsafe { std::mem::transmute(42.0_f16) };
let y: f16 = unsafe { std::mem::transmute(x) };
assert_eq(y, 42.0_f16);
let x: u32 = unsafe { std::mem::transmute(42.0_f32) };
let y: f32 = unsafe { std::mem::transmute(x) };
assert_eq(y, 42.0_f32);
let x: u64 = unsafe { std::mem::transmute(42.0_f64) };
let y: f64 = unsafe { std::mem::transmute(x) };
assert_eq(y, 42.0_f64);
let x: u128 = unsafe { std::mem::transmute(42.0_f128) };
let y: f128 = unsafe { std::mem::transmute(x) };
assert_eq(y, 42.0_f128);
// `%` sign behavior, some of this used to be buggy
assert!((black_box(1.0f16) % 1.0).is_sign_positive());
assert!((black_box(1.0f16) % -1.0).is_sign_positive());
assert!((black_box(-1.0f16) % 1.0).is_sign_negative());
assert!((black_box(-1.0f16) % -1.0).is_sign_negative());
assert!((black_box(1.0f32) % 1.0).is_sign_positive());
assert!((black_box(1.0f32) % -1.0).is_sign_positive());
assert!((black_box(-1.0f32) % 1.0).is_sign_negative());
@ -190,7 +138,12 @@ fn basic() {
assert!((black_box(1.0f64) % -1.0).is_sign_positive());
assert!((black_box(-1.0f64) % 1.0).is_sign_negative());
assert!((black_box(-1.0f64) % -1.0).is_sign_negative());
assert!((black_box(1.0f128) % 1.0).is_sign_positive());
assert!((black_box(1.0f128) % -1.0).is_sign_positive());
assert!((black_box(-1.0f128) % 1.0).is_sign_negative());
assert!((black_box(-1.0f128) % -1.0).is_sign_negative());
// FIXME(f16_f128): add when `abs` is available
assert_eq!((-1.0f32).abs(), 1.0f32);
assert_eq!(34.2f64.abs(), 34.2f64);
}

View File

@ -1,10 +1,10 @@
# requirements.in This is the source file for our pinned version requirements
# file "requirements.txt" To regenerate that file, pip-tools is required
# (`python -m pip install pip-tools`). Once installed, run: `pip-compile
# --generate-hashes src/tools/tidy/config/requirements.in`
# (`python -m pip install pip-tools==7.4.1`). Once installed, run: `pip-compile
# --generate-hashes --strip-extras src/tools/tidy/config/requirements.in`
#
# Note: this generation step should be run with the oldest supported python
# version (currently 3.7) to ensure backward compatibility
# version (currently 3.9) to ensure backward compatibility
black==23.3.0
ruff==0.0.272

View File

@ -1,8 +1,8 @@
#
# This file is autogenerated by pip-compile with Python 3.11
# This file is autogenerated by pip-compile with Python 3.9
# by the following command:
#
# pip-compile --generate-hashes src/tools/tidy/config/requirements.in
# pip-compile --generate-hashes --strip-extras src/tools/tidy/config/requirements.in
#
black==23.3.0 \
--hash=sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5 \
@ -35,10 +35,6 @@ click==8.1.3 \
--hash=sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e \
--hash=sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48
# via black
importlib-metadata==6.7.0 \
--hash=sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4 \
--hash=sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5
# via click
mypy-extensions==1.0.0 \
--hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \
--hash=sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782
@ -78,40 +74,7 @@ tomli==2.0.1 \
--hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \
--hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f
# via black
typed-ast==1.5.4 \
--hash=sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2 \
--hash=sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1 \
--hash=sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6 \
--hash=sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62 \
--hash=sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac \
--hash=sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d \
--hash=sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc \
--hash=sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2 \
--hash=sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97 \
--hash=sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35 \
--hash=sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6 \
--hash=sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1 \
--hash=sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4 \
--hash=sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c \
--hash=sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e \
--hash=sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec \
--hash=sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f \
--hash=sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72 \
--hash=sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47 \
--hash=sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72 \
--hash=sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe \
--hash=sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6 \
--hash=sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3 \
--hash=sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66
typing-extensions==4.12.2 \
--hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \
--hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8
# via black
typing-extensions==4.6.3 \
--hash=sha256:88a4153d8505aabbb4e13aacb7c486c2b4a33ca3b3f807914a9b4c844c471c26 \
--hash=sha256:d91d5919357fe7f681a9f2b5b4cb2a5f1ef0a1e9f59c4d8ff0d3491e05c0ffd5
# via
# black
# importlib-metadata
# platformdirs
zipp==3.15.0 \
--hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \
--hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556
# via importlib-metadata

View File

@ -24,9 +24,8 @@ use std::io;
use std::path::{Path, PathBuf};
use std::process::Command;
/// Minimum python revision is 3.7 for ruff
const MIN_PY_REV: (u32, u32) = (3, 7);
const MIN_PY_REV_STR: &str = "≥3.7";
const MIN_PY_REV: (u32, u32) = (3, 9);
const MIN_PY_REV_STR: &str = "≥3.9";
/// Path to find the python executable within a virtual environment
#[cfg(target_os = "windows")]
@ -223,17 +222,8 @@ fn get_or_create_venv(venv_path: &Path, src_reqs_path: &Path) -> Result<PathBuf,
fn create_venv_at_path(path: &Path) -> Result<(), Error> {
/// Preferred python versions in order. Newest to oldest then current
/// development versions
const TRY_PY: &[&str] = &[
"python3.11",
"python3.10",
"python3.9",
"python3.8",
"python3.7",
"python3",
"python",
"python3.12",
"python3.13",
];
const TRY_PY: &[&str] =
&["python3.11", "python3.10", "python3.9", "python3", "python", "python3.12", "python3.13"];
let mut sys_py = None;
let mut found = Vec::new();

View File

@ -7,7 +7,7 @@
//@ compile-flags: -C llvm-args=--x86-asm-syntax=intel
//@ compile-flags: -C target-feature=+avx512bw
#![feature(no_core, lang_items, rustc_attrs, repr_simd)]
#![feature(no_core, lang_items, rustc_attrs, repr_simd, f16, f128)]
#![crate_type = "rlib"]
#![no_core]
#![allow(asm_sub_register, non_camel_case_types)]
@ -41,6 +41,8 @@ pub struct i32x4(i32, i32, i32, i32);
#[repr(simd)]
pub struct i64x2(i64, i64);
#[repr(simd)]
pub struct f16x8(f16, f16, f16, f16, f16, f16, f16, f16);
#[repr(simd)]
pub struct f32x4(f32, f32, f32, f32);
#[repr(simd)]
pub struct f64x2(f64, f64);
@ -87,6 +89,8 @@ pub struct i32x8(i32, i32, i32, i32, i32, i32, i32, i32);
#[repr(simd)]
pub struct i64x4(i64, i64, i64, i64);
#[repr(simd)]
pub struct f16x16(f16, f16, f16, f16, f16, f16, f16, f16, f16, f16, f16, f16, f16, f16, f16, f16);
#[repr(simd)]
pub struct f32x8(f32, f32, f32, f32, f32, f32, f32, f32);
#[repr(simd)]
pub struct f64x4(f64, f64, f64, f64);
@ -198,35 +202,59 @@ pub struct i32x16(i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i3
#[repr(simd)]
pub struct i64x8(i64, i64, i64, i64, i64, i64, i64, i64);
#[repr(simd)]
pub struct f16x32(
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
f16,
);
#[repr(simd)]
pub struct f32x16(f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32);
#[repr(simd)]
pub struct f64x8(f64, f64, f64, f64, f64, f64, f64, f64);
impl Copy for i8 {}
impl Copy for i16 {}
impl Copy for i32 {}
impl Copy for f32 {}
impl Copy for i64 {}
impl Copy for f64 {}
impl Copy for ptr {}
impl Copy for i8x16 {}
impl Copy for i16x8 {}
impl Copy for i32x4 {}
impl Copy for i64x2 {}
impl Copy for f32x4 {}
impl Copy for f64x2 {}
impl Copy for i8x32 {}
impl Copy for i16x16 {}
impl Copy for i32x8 {}
impl Copy for i64x4 {}
impl Copy for f32x8 {}
impl Copy for f64x4 {}
impl Copy for i8x64 {}
impl Copy for i16x32 {}
impl Copy for i32x16 {}
impl Copy for i64x8 {}
impl Copy for f32x16 {}
impl Copy for f64x8 {}
macro_rules! impl_copy {
($($ty:ident)*) => {
$(
impl Copy for $ty {}
)*
};
}
impl_copy!(
i8 i16 f16 i32 f32 i64 f64 f128 ptr
i8x16 i16x8 i32x4 i64x2 f16x8 f32x4 f64x2
i8x32 i16x16 i32x8 i64x4 f16x16 f32x8 f64x4
i8x64 i16x32 i32x16 i64x8 f16x32 f32x16 f64x8
);
extern "C" {
fn extern_func();
@ -292,6 +320,13 @@ macro_rules! check_reg {
// CHECK: #NO_APP
check!(reg_i16 i16 reg "mov");
// CHECK-LABEL: reg_f16:
// CHECK: #APP
// x86_64: mov r{{[a-z0-9]+}}, r{{[a-z0-9]+}}
// i686: mov e{{[a-z0-9]+}}, e{{[a-z0-9]+}}
// CHECK: #NO_APP
check!(reg_f16 f16 reg "mov");
// CHECK-LABEL: reg_i32:
// CHECK: #APP
// x86_64: mov r{{[a-z0-9]+}}, r{{[a-z0-9]+}}
@ -334,6 +369,13 @@ check!(reg_ptr ptr reg "mov");
// CHECK: #NO_APP
check!(reg_abcd_i16 i16 reg_abcd "mov");
// CHECK-LABEL: reg_abcd_f16:
// CHECK: #APP
// x86_64: mov r{{[a-z0-9]+}}, r{{[a-z0-9]+}}
// i686: mov e{{[a-z0-9]+}}, e{{[a-z0-9]+}}
// CHECK: #NO_APP
check!(reg_abcd_f16 f16 reg_abcd "mov");
// CHECK-LABEL: reg_abcd_i32:
// CHECK: #APP
// x86_64: mov r{{[a-z0-9]+}}, r{{[a-z0-9]+}}
@ -375,6 +417,12 @@ check!(reg_abcd_ptr ptr reg_abcd "mov");
// CHECK: #NO_APP
check!(reg_byte i8 reg_byte "mov");
// CHECK-LABEL: xmm_reg_f16:
// CHECK: #APP
// CHECK: movaps xmm{{[0-9]+}}, xmm{{[0-9]+}}
// CHECK: #NO_APP
check!(xmm_reg_f16 f16 xmm_reg "movaps");
// CHECK-LABEL: xmm_reg_i32:
// CHECK: #APP
// CHECK: movaps xmm{{[0-9]+}}, xmm{{[0-9]+}}
@ -399,6 +447,12 @@ check!(xmm_reg_i64 i64 xmm_reg "movaps");
// CHECK: #NO_APP
check!(xmm_reg_f64 f64 xmm_reg "movaps");
// CHECK-LABEL: xmm_reg_f128:
// CHECK: #APP
// CHECK: movaps xmm{{[0-9]+}}, xmm{{[0-9]+}}
// CHECK: #NO_APP
check!(xmm_reg_f128 f128 xmm_reg "movaps");
// CHECK-LABEL: xmm_reg_ptr:
// CHECK: #APP
// CHECK: movaps xmm{{[0-9]+}}, xmm{{[0-9]+}}
@ -429,6 +483,12 @@ check!(xmm_reg_i32x4 i32x4 xmm_reg "movaps");
// CHECK: #NO_APP
check!(xmm_reg_i64x2 i64x2 xmm_reg "movaps");
// CHECK-LABEL: xmm_reg_f16x8:
// CHECK: #APP
// CHECK: movaps xmm{{[0-9]+}}, xmm{{[0-9]+}}
// CHECK: #NO_APP
check!(xmm_reg_f16x8 f16x8 xmm_reg "movaps");
// CHECK-LABEL: xmm_reg_f32x4:
// CHECK: #APP
// CHECK: movaps xmm{{[0-9]+}}, xmm{{[0-9]+}}
@ -441,6 +501,12 @@ check!(xmm_reg_f32x4 f32x4 xmm_reg "movaps");
// CHECK: #NO_APP
check!(xmm_reg_f64x2 f64x2 xmm_reg "movaps");
// CHECK-LABEL: ymm_reg_f16:
// CHECK: #APP
// CHECK: vmovaps ymm{{[0-9]+}}, ymm{{[0-9]+}}
// CHECK: #NO_APP
check!(ymm_reg_f16 f16 ymm_reg "vmovaps");
// CHECK-LABEL: ymm_reg_i32:
// CHECK: #APP
// CHECK: vmovaps ymm{{[0-9]+}}, ymm{{[0-9]+}}
@ -465,6 +531,12 @@ check!(ymm_reg_i64 i64 ymm_reg "vmovaps");
// CHECK: #NO_APP
check!(ymm_reg_f64 f64 ymm_reg "vmovaps");
// CHECK-LABEL: ymm_reg_f128:
// CHECK: #APP
// CHECK: vmovaps ymm{{[0-9]+}}, ymm{{[0-9]+}}
// CHECK: #NO_APP
check!(ymm_reg_f128 f128 ymm_reg "vmovaps");
// CHECK-LABEL: ymm_reg_ptr:
// CHECK: #APP
// CHECK: vmovaps ymm{{[0-9]+}}, ymm{{[0-9]+}}
@ -495,6 +567,12 @@ check!(ymm_reg_i32x4 i32x4 ymm_reg "vmovaps");
// CHECK: #NO_APP
check!(ymm_reg_i64x2 i64x2 ymm_reg "vmovaps");
// CHECK-LABEL: ymm_reg_f16x8:
// CHECK: #APP
// CHECK: vmovaps ymm{{[0-9]+}}, ymm{{[0-9]+}}
// CHECK: #NO_APP
check!(ymm_reg_f16x8 f16x8 ymm_reg "vmovaps");
// CHECK-LABEL: ymm_reg_f32x4:
// CHECK: #APP
// CHECK: vmovaps ymm{{[0-9]+}}, ymm{{[0-9]+}}
@ -531,6 +609,12 @@ check!(ymm_reg_i32x8 i32x8 ymm_reg "vmovaps");
// CHECK: #NO_APP
check!(ymm_reg_i64x4 i64x4 ymm_reg "vmovaps");
// CHECK-LABEL: ymm_reg_f16x16:
// CHECK: #APP
// CHECK: vmovaps ymm{{[0-9]+}}, ymm{{[0-9]+}}
// CHECK: #NO_APP
check!(ymm_reg_f16x16 f16x16 ymm_reg "vmovaps");
// CHECK-LABEL: ymm_reg_f32x8:
// CHECK: #APP
// CHECK: vmovaps ymm{{[0-9]+}}, ymm{{[0-9]+}}
@ -543,6 +627,12 @@ check!(ymm_reg_f32x8 f32x8 ymm_reg "vmovaps");
// CHECK: #NO_APP
check!(ymm_reg_f64x4 f64x4 ymm_reg "vmovaps");
// CHECK-LABEL: zmm_reg_f16:
// CHECK: #APP
// CHECK: vmovaps zmm{{[0-9]+}}, zmm{{[0-9]+}}
// CHECK: #NO_APP
check!(zmm_reg_f16 f16 zmm_reg "vmovaps");
// CHECK-LABEL: zmm_reg_i32:
// CHECK: #APP
// CHECK: vmovaps zmm{{[0-9]+}}, zmm{{[0-9]+}}
@ -567,6 +657,12 @@ check!(zmm_reg_i64 i64 zmm_reg "vmovaps");
// CHECK: #NO_APP
check!(zmm_reg_f64 f64 zmm_reg "vmovaps");
// CHECK-LABEL: zmm_reg_f128:
// CHECK: #APP
// CHECK: vmovaps zmm{{[0-9]+}}, zmm{{[0-9]+}}
// CHECK: #NO_APP
check!(zmm_reg_f128 f128 zmm_reg "vmovaps");
// CHECK-LABEL: zmm_reg_ptr:
// CHECK: #APP
// CHECK: vmovaps zmm{{[0-9]+}}, zmm{{[0-9]+}}
@ -597,6 +693,12 @@ check!(zmm_reg_i32x4 i32x4 zmm_reg "vmovaps");
// CHECK: #NO_APP
check!(zmm_reg_i64x2 i64x2 zmm_reg "vmovaps");
// CHECK-LABEL: zmm_reg_f16x8:
// CHECK: #APP
// CHECK: vmovaps zmm{{[0-9]+}}, zmm{{[0-9]+}}
// CHECK: #NO_APP
check!(zmm_reg_f16x8 f16x8 zmm_reg "vmovaps");
// CHECK-LABEL: zmm_reg_f32x4:
// CHECK: #APP
// CHECK: vmovaps zmm{{[0-9]+}}, zmm{{[0-9]+}}
@ -633,6 +735,12 @@ check!(zmm_reg_i32x8 i32x8 zmm_reg "vmovaps");
// CHECK: #NO_APP
check!(zmm_reg_i64x4 i64x4 zmm_reg "vmovaps");
// CHECK-LABEL: zmm_reg_f16x16:
// CHECK: #APP
// CHECK: vmovaps zmm{{[0-9]+}}, zmm{{[0-9]+}}
// CHECK: #NO_APP
check!(zmm_reg_f16x16 f16x16 zmm_reg "vmovaps");
// CHECK-LABEL: zmm_reg_f32x8:
// CHECK: #APP
// CHECK: vmovaps zmm{{[0-9]+}}, zmm{{[0-9]+}}
@ -669,6 +777,12 @@ check!(zmm_reg_i32x16 i32x16 zmm_reg "vmovaps");
// CHECK: #NO_APP
check!(zmm_reg_i64x8 i64x8 zmm_reg "vmovaps");
// CHECK-LABEL: zmm_reg_f16x32:
// CHECK: #APP
// CHECK: vmovaps zmm{{[0-9]+}}, zmm{{[0-9]+}}
// CHECK: #NO_APP
check!(zmm_reg_f16x32 f16x32 zmm_reg "vmovaps");
// CHECK-LABEL: zmm_reg_f32x16:
// CHECK: #APP
// CHECK: vmovaps zmm{{[0-9]+}}, zmm{{[0-9]+}}
@ -717,6 +831,12 @@ check!(kreg_ptr ptr kreg "kmovq");
// CHECK: #NO_APP
check_reg!(eax_i16 i16 "eax" "mov");
// CHECK-LABEL: eax_f16:
// CHECK: #APP
// CHECK: mov eax, eax
// CHECK: #NO_APP
check_reg!(eax_f16 f16 "eax" "mov");
// CHECK-LABEL: eax_i32:
// CHECK: #APP
// CHECK: mov eax, eax
@ -756,6 +876,12 @@ check_reg!(eax_ptr ptr "eax" "mov");
#[cfg(i686)]
check_reg!(ah_byte i8 "ah" "mov");
// CHECK-LABEL: xmm0_f16:
// CHECK: #APP
// CHECK: movaps xmm0, xmm0
// CHECK: #NO_APP
check_reg!(xmm0_f16 f16 "xmm0" "movaps");
// CHECK-LABEL: xmm0_i32:
// CHECK: #APP
// CHECK: movaps xmm0, xmm0
@ -780,6 +906,12 @@ check_reg!(xmm0_i64 i64 "xmm0" "movaps");
// CHECK: #NO_APP
check_reg!(xmm0_f64 f64 "xmm0" "movaps");
// CHECK-LABEL: xmm0_f128:
// CHECK: #APP
// CHECK: movaps xmm0, xmm0
// CHECK: #NO_APP
check_reg!(xmm0_f128 f128 "xmm0" "movaps");
// CHECK-LABEL: xmm0_ptr:
// CHECK: #APP
// CHECK: movaps xmm0, xmm0
@ -810,6 +942,12 @@ check_reg!(xmm0_i32x4 i32x4 "xmm0" "movaps");
// CHECK: #NO_APP
check_reg!(xmm0_i64x2 i64x2 "xmm0" "movaps");
// CHECK-LABEL: xmm0_f16x8:
// CHECK: #APP
// CHECK: movaps xmm0, xmm0
// CHECK: #NO_APP
check_reg!(xmm0_f16x8 f16x8 "xmm0" "movaps");
// CHECK-LABEL: xmm0_f32x4:
// CHECK: #APP
// CHECK: movaps xmm0, xmm0
@ -822,6 +960,12 @@ check_reg!(xmm0_f32x4 f32x4 "xmm0" "movaps");
// CHECK: #NO_APP
check_reg!(xmm0_f64x2 f64x2 "xmm0" "movaps");
// CHECK-LABEL: ymm0_f16:
// CHECK: #APP
// CHECK: vmovaps ymm0, ymm0
// CHECK: #NO_APP
check_reg!(ymm0_f16 f16 "ymm0" "vmovaps");
// CHECK-LABEL: ymm0_i32:
// CHECK: #APP
// CHECK: vmovaps ymm0, ymm0
@ -846,6 +990,12 @@ check_reg!(ymm0_i64 i64 "ymm0" "vmovaps");
// CHECK: #NO_APP
check_reg!(ymm0_f64 f64 "ymm0" "vmovaps");
// CHECK-LABEL: ymm0_f128:
// CHECK: #APP
// CHECK: vmovaps ymm0, ymm0
// CHECK: #NO_APP
check_reg!(ymm0_f128 f128 "ymm0" "vmovaps");
// CHECK-LABEL: ymm0_ptr:
// CHECK: #APP
// CHECK: vmovaps ymm0, ymm0
@ -876,6 +1026,12 @@ check_reg!(ymm0_i32x4 i32x4 "ymm0" "vmovaps");
// CHECK: #NO_APP
check_reg!(ymm0_i64x2 i64x2 "ymm0" "vmovaps");
// CHECK-LABEL: ymm0_f16x8:
// CHECK: #APP
// CHECK: vmovaps ymm0, ymm0
// CHECK: #NO_APP
check_reg!(ymm0_f16x8 f16x8 "ymm0" "vmovaps");
// CHECK-LABEL: ymm0_f32x4:
// CHECK: #APP
// CHECK: vmovaps ymm0, ymm0
@ -912,6 +1068,12 @@ check_reg!(ymm0_i32x8 i32x8 "ymm0" "vmovaps");
// CHECK: #NO_APP
check_reg!(ymm0_i64x4 i64x4 "ymm0" "vmovaps");
// CHECK-LABEL: ymm0_f16x16:
// CHECK: #APP
// CHECK: vmovaps ymm0, ymm0
// CHECK: #NO_APP
check_reg!(ymm0_f16x16 f16x16 "ymm0" "vmovaps");
// CHECK-LABEL: ymm0_f32x8:
// CHECK: #APP
// CHECK: vmovaps ymm0, ymm0
@ -924,6 +1086,12 @@ check_reg!(ymm0_f32x8 f32x8 "ymm0" "vmovaps");
// CHECK: #NO_APP
check_reg!(ymm0_f64x4 f64x4 "ymm0" "vmovaps");
// CHECK-LABEL: zmm0_f16:
// CHECK: #APP
// CHECK: vmovaps zmm0, zmm0
// CHECK: #NO_APP
check_reg!(zmm0_f16 f16 "zmm0" "vmovaps");
// CHECK-LABEL: zmm0_i32:
// CHECK: #APP
// CHECK: vmovaps zmm0, zmm0
@ -948,6 +1116,12 @@ check_reg!(zmm0_i64 i64 "zmm0" "vmovaps");
// CHECK: #NO_APP
check_reg!(zmm0_f64 f64 "zmm0" "vmovaps");
// CHECK-LABEL: zmm0_f128:
// CHECK: #APP
// CHECK: vmovaps zmm0, zmm0
// CHECK: #NO_APP
check_reg!(zmm0_f128 f128 "zmm0" "vmovaps");
// CHECK-LABEL: zmm0_ptr:
// CHECK: #APP
// CHECK: vmovaps zmm0, zmm0
@ -978,6 +1152,12 @@ check_reg!(zmm0_i32x4 i32x4 "zmm0" "vmovaps");
// CHECK: #NO_APP
check_reg!(zmm0_i64x2 i64x2 "zmm0" "vmovaps");
// CHECK-LABEL: zmm0_f16x8:
// CHECK: #APP
// CHECK: vmovaps zmm0, zmm0
// CHECK: #NO_APP
check_reg!(zmm0_f16x8 f16x8 "zmm0" "vmovaps");
// CHECK-LABEL: zmm0_f32x4:
// CHECK: #APP
// CHECK: vmovaps zmm0, zmm0
@ -1014,6 +1194,12 @@ check_reg!(zmm0_i32x8 i32x8 "zmm0" "vmovaps");
// CHECK: #NO_APP
check_reg!(zmm0_i64x4 i64x4 "zmm0" "vmovaps");
// CHECK-LABEL: zmm0_f16x16:
// CHECK: #APP
// CHECK: vmovaps zmm0, zmm0
// CHECK: #NO_APP
check_reg!(zmm0_f16x16 f16x16 "zmm0" "vmovaps");
// CHECK-LABEL: zmm0_f32x8:
// CHECK: #APP
// CHECK: vmovaps zmm0, zmm0
@ -1050,6 +1236,12 @@ check_reg!(zmm0_i32x16 i32x16 "zmm0" "vmovaps");
// CHECK: #NO_APP
check_reg!(zmm0_i64x8 i64x8 "zmm0" "vmovaps");
// CHECK-LABEL: zmm0_f16x32:
// CHECK: #APP
// CHECK: vmovaps zmm0, zmm0
// CHECK: #NO_APP
check_reg!(zmm0_f16x32 f16x32 "zmm0" "vmovaps");
// CHECK-LABEL: zmm0_f32x16:
// CHECK: #APP
// CHECK: vmovaps zmm0, zmm0

View File

@ -1,5 +0,0 @@
//@ known-bug: rust-lang/rust#124583
fn main() {
let _ = -(-0.0f16);
}

View File

@ -55,16 +55,19 @@ fn test_intrinsics() -> ControlFlow<()> {
///
/// If by any chance this test breaks because you changed how an intrinsic is implemented, please
/// update the test to invoke a different intrinsic.
///
/// In StableMIR, we only expose intrinsic body if they are not marked with
/// `rustc_intrinsic_must_be_overridden`.
fn check_instance(instance: &Instance) {
assert_eq!(instance.kind, InstanceKind::Intrinsic);
let name = instance.intrinsic_name().unwrap();
if instance.has_body() {
let Some(body) = instance.body() else { unreachable!("Expected a body") };
assert!(!body.blocks.is_empty());
assert_matches!(name.as_str(), "likely" | "vtable_size");
assert_eq!(&name, "likely");
} else {
assert!(instance.body().is_none());
assert_eq!(&name, "size_of_val");
assert_matches!(name.as_str(), "size_of_val" | "vtable_size");
}
}
@ -75,11 +78,13 @@ fn check_def(fn_def: FnDef) {
let name = intrinsic.fn_name();
match name.as_str() {
"likely" | "size_of_val" => {
"likely" => {
assert!(!intrinsic.must_be_overridden());
assert!(fn_def.has_body());
}
"vtable_size" => {
"vtable_size" | "size_of_val" => {
assert!(intrinsic.must_be_overridden());
assert!(!fn_def.has_body());
}
_ => unreachable!("Unexpected intrinsic: {}", name),
}
@ -96,9 +101,9 @@ impl<'a> MirVisitor for CallsVisitor<'a> {
TerminatorKind::Call { func, .. } => {
let TyKind::RigidTy(RigidTy::FnDef(def, args)) =
func.ty(self.locals).unwrap().kind()
else {
return;
};
else {
return;
};
self.calls.push((def, args.clone()));
}
_ => {}

View File

@ -4,7 +4,7 @@ error: type `i128` cannot be used with this register class
LL | asm!("{}", in(reg) 0i128);
| ^^^^^
|
= note: register class `reg` supports these types: i16, i32, i64, f32, f64
= note: register class `reg` supports these types: i16, i32, i64, f16, f32, f64
error: type `__m128` cannot be used with this register class
--> $DIR/type-check-3.rs:16:28
@ -12,7 +12,7 @@ error: type `__m128` cannot be used with this register class
LL | asm!("{}", in(reg) _mm_setzero_ps());
| ^^^^^^^^^^^^^^^^
|
= note: register class `reg` supports these types: i16, i32, i64, f32, f64
= note: register class `reg` supports these types: i16, i32, i64, f16, f32, f64
error: type `__m256` cannot be used with this register class
--> $DIR/type-check-3.rs:18:28
@ -20,7 +20,7 @@ error: type `__m256` cannot be used with this register class
LL | asm!("{}", in(reg) _mm256_setzero_ps());
| ^^^^^^^^^^^^^^^^^^^
|
= note: register class `reg` supports these types: i16, i32, i64, f32, f64
= note: register class `reg` supports these types: i16, i32, i64, f16, f32, f64
error: type `u8` cannot be used with this register class
--> $DIR/type-check-3.rs:20:32
@ -28,7 +28,7 @@ error: type `u8` cannot be used with this register class
LL | asm!("{}", in(xmm_reg) 0u8);
| ^^^
|
= note: register class `xmm_reg` supports these types: i32, i64, f32, f64, i8x16, i16x8, i32x4, i64x2, f32x4, f64x2
= note: register class `xmm_reg` supports these types: i32, i64, f16, f32, f64, f128, i8x16, i16x8, i32x4, i64x2, f16x8, f32x4, f64x2
error: `avx512bw` target feature is not enabled
--> $DIR/type-check-3.rs:29:29
@ -81,7 +81,7 @@ error: type `i8` cannot be used with this register class
LL | asm!("{}", in(reg) 0i8);
| ^^^
|
= note: register class `reg` supports these types: i16, i32, i64, f32, f64
= note: register class `reg` supports these types: i16, i32, i64, f16, f32, f64
= help: consider using the `reg_byte` register class instead
error: incompatible types for asm inout argument

View File

@ -1,3 +1,5 @@
// Make sure negation happens correctly. Also included:
// issue: rust-lang/rust#124583
//@ run-pass
#![feature(f16)]
@ -8,9 +10,11 @@ fn main() {
assert_eq!((-0.0_f16).to_bits(), 0x8000);
assert_eq!(10.0_f16.to_bits(), 0x4900);
assert_eq!((-10.0_f16).to_bits(), 0xC900);
assert_eq!((-(-0.0f16)).to_bits(), 0x0000);
assert_eq!(0.0_f128.to_bits(), 0x0000_0000_0000_0000_0000_0000_0000_0000);
assert_eq!((-0.0_f128).to_bits(), 0x8000_0000_0000_0000_0000_0000_0000_0000);
assert_eq!(10.0_f128.to_bits(), 0x4002_4000_0000_0000_0000_0000_0000_0000);
assert_eq!((-10.0_f128).to_bits(), 0xC002_4000_0000_0000_0000_0000_0000_0000);
assert_eq!((-(-0.0f128)).to_bits(), 0x0000_0000_0000_0000_0000_0000_0000_0000);
}