Auto merge of #76912 - RalfJung:rollup-q9ur56h, r=RalfJung

Rollup of 14 pull requests

Successful merges:

 - #73963 (deny(unsafe_op_in_unsafe_fn) in libstd/path.rs)
 - #75099 (lint/ty: move fns to avoid abstraction violation)
 - #75502 (Use implicit (not explicit) rules for promotability by default in `const fn`)
 - #75580 (Add test for checking duplicated branch or-patterns)
 - #76310 (Add `[T; N]: TryFrom<Vec<T>>` (insta-stable))
 - #76400 (Clean up vec benches bench_in_place style)
 - #76434 (do not inline black_box when building for Miri)
 - #76492 (Add associated constant `BITS` to all integer types)
 - #76525 (Add as_str() to string::Drain.)
 - #76636 (assert ScalarMaybeUninit size)
 - #76749 (give *even better* suggestion when matching a const range)
 - #76757 (don't convert types to the same type with try_into (clippy::useless_conversion))
 - #76796 (Give a better error message when x.py uses the wrong stage for CI)
 - #76798 (Build fixes for RISC-V 32-bit Linux support)

Failed merges:

r? `@ghost`
This commit is contained in:
bors 2020-09-19 11:29:00 +00:00
commit 8e9d5db839
46 changed files with 456 additions and 181 deletions

View File

@ -14,6 +14,7 @@
#![feature(generators)]
#![feature(generator_trait)]
#![feature(fn_traits)]
#![feature(int_bits_const)]
#![feature(min_specialization)]
#![feature(optin_builtin_traits)]
#![feature(nll)]

View File

@ -48,7 +48,7 @@ where
P: Pointer,
T: Tag,
{
const TAG_BIT_SHIFT: usize = (8 * std::mem::size_of::<usize>()) - T::BITS;
const TAG_BIT_SHIFT: usize = usize::BITS as usize - T::BITS;
const ASSERTION: () = {
assert!(T::BITS <= P::BITS);
// Used for the transmute_copy's below

View File

@ -21,7 +21,8 @@
//! `late_lint_methods!` invocation in `lib.rs`.
use crate::{
types::CItemKind, EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext,
types::{transparent_newtype_field, CItemKind},
EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext,
};
use rustc_ast::attr::{self, HasAttrs};
use rustc_ast::tokenstream::{TokenStream, TokenTree};
@ -2688,8 +2689,7 @@ impl ClashingExternDeclarations {
if is_transparent && !is_non_null {
debug_assert!(def.variants.len() == 1);
let v = &def.variants[VariantIdx::new(0)];
ty = v
.transparent_newtype_field(tcx)
ty = transparent_newtype_field(tcx, v)
.expect(
"single-variant transparent structure with zero-sized field",
)

View File

@ -639,6 +639,26 @@ crate fn nonnull_optimization_guaranteed<'tcx>(tcx: TyCtxt<'tcx>, def: &ty::AdtD
.any(|a| tcx.sess.check_name(a, sym::rustc_nonnull_optimization_guaranteed))
}
/// `repr(transparent)` structs can have a single non-ZST field, this function returns that
/// field.
pub fn transparent_newtype_field<'a, 'tcx>(
tcx: TyCtxt<'tcx>,
variant: &'a ty::VariantDef,
) -> Option<&'a ty::FieldDef> {
let param_env = tcx.param_env(variant.def_id);
for field in &variant.fields {
let field_ty = tcx.type_of(field.did);
let is_zst =
tcx.layout_of(param_env.and(field_ty)).map(|layout| layout.is_zst()).unwrap_or(false);
if !is_zst {
return Some(field);
}
}
None
}
/// Is type known to be non-null?
crate fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool {
let tcx = cx.tcx;
@ -654,7 +674,7 @@ crate fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: C
}
for variant in &def.variants {
if let Some(field) = variant.transparent_newtype_field(tcx) {
if let Some(field) = transparent_newtype_field(cx.tcx, variant) {
if ty_is_known_nonnull(cx, field.ty(tcx, substs), mode) {
return true;
}
@ -675,7 +695,7 @@ fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'t
ty::Adt(field_def, field_substs) => {
let inner_field_ty = {
let first_non_zst_ty =
field_def.variants.iter().filter_map(|v| v.transparent_newtype_field(tcx));
field_def.variants.iter().filter_map(|v| transparent_newtype_field(cx.tcx, v));
debug_assert_eq!(
first_non_zst_ty.clone().count(),
1,
@ -816,7 +836,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
if def.repr.transparent() {
// Can assume that only one field is not a ZST, so only check
// that field's type for FFI-safety.
if let Some(field) = variant.transparent_newtype_field(self.cx.tcx) {
if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) {
self.check_field_type_for_ffi(cache, field, substs)
} else {
bug!("malformed transparent type");

View File

@ -578,6 +578,9 @@ pub enum ScalarMaybeUninit<Tag = ()> {
Uninit,
}
#[cfg(target_arch = "x86_64")]
static_assert_size!(ScalarMaybeUninit, 24);
impl<Tag> From<Scalar<Tag>> for ScalarMaybeUninit<Tag> {
#[inline(always)]
fn from(s: Scalar<Tag>) -> Self {

View File

@ -1999,7 +1999,7 @@ pub struct VariantDef {
flags: VariantFlags,
}
impl<'tcx> VariantDef {
impl VariantDef {
/// Creates a new `VariantDef`.
///
/// `variant_did` is the `DefId` that identifies the enum variant (if this `VariantDef`
@ -2065,19 +2065,6 @@ impl<'tcx> VariantDef {
pub fn is_recovered(&self) -> bool {
self.flags.intersects(VariantFlags::IS_RECOVERED)
}
/// `repr(transparent)` structs can have a single non-ZST field, this function returns that
/// field.
pub fn transparent_newtype_field(&self, tcx: TyCtxt<'tcx>) -> Option<&FieldDef> {
for field in &self.fields {
let field_ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, self.def_id));
if !field_ty.is_zst(tcx, self.def_id) {
return Some(field);
}
}
None
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]

View File

@ -2322,9 +2322,4 @@ impl<'tcx> TyS<'tcx> {
}
}
}
/// Is this a zero-sized type?
pub fn is_zst(&'tcx self, tcx: TyCtxt<'tcx>, did: DefId) -> bool {
tcx.layout_of(tcx.param_env(did).and(self)).map(|layout| layout.is_zst()).unwrap_or(false)
}
}

View File

@ -4,7 +4,6 @@ use rustc_middle::mir::*;
use rustc_middle::ty::{self, TyCtxt};
use smallvec::{smallvec, SmallVec};
use std::convert::TryInto;
use std::mem;
use super::abs_domain::Lift;
@ -481,12 +480,7 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
};
let base_ty = base_place.ty(self.builder.body, self.builder.tcx).ty;
let len: u64 = match base_ty.kind() {
ty::Array(_, size) => {
let length = size.eval_usize(self.builder.tcx, self.builder.param_env);
length
.try_into()
.expect("slice pattern of array with more than u32::MAX elements")
}
ty::Array(_, size) => size.eval_usize(self.builder.tcx, self.builder.param_env),
_ => bug!("from_end: false slice pattern of non-array type"),
};
for offset in from..to {

View File

@ -551,7 +551,7 @@ where
let n = base.len(self)?;
if n < min_length {
// This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len: min_length.into(), index: n });
throw_ub!(BoundsCheckFailed { len: min_length, index: n });
}
let index = if from_end {
@ -565,9 +565,7 @@ where
self.mplace_index(base, index)?
}
Subslice { from, to, from_end } => {
self.mplace_subslice(base, u64::from(from), u64::from(to), from_end)?
}
Subslice { from, to, from_end } => self.mplace_subslice(base, from, to, from_end)?,
})
}

View File

@ -734,7 +734,14 @@ impl<'tcx> Validator<'_, 'tcx> {
) -> Result<(), Unpromotable> {
let fn_ty = callee.ty(self.body, self.tcx);
if !self.explicit && self.const_kind.is_none() {
// `const` and `static` use the explicit rules for promotion regardless of the `Candidate`,
// meaning calls to `const fn` can be promoted.
let context_uses_explicit_promotion_rules = matches!(
self.const_kind,
Some(hir::ConstContext::Static(_) | hir::ConstContext::Const)
);
if !self.explicit && !context_uses_explicit_promotion_rules {
if let ty::FnDef(def_id, _) = *fn_ty.kind() {
// Never promote runtime `const fn` calls of
// functions without `#[rustc_promotable]`.

View File

@ -33,7 +33,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let tcx = self.hir.tcx();
let (min_length, exact_size) = match place.ty(&self.local_decls, tcx).ty.kind() {
ty::Array(_, length) => {
(length.eval_usize(tcx, self.hir.param_env).try_into().unwrap(), true)
(length.eval_usize(tcx, self.hir.param_env), true)
}
_ => ((prefix.len() + suffix.len()).try_into().unwrap(), false),
};

View File

@ -1,5 +1,6 @@
use crate::check::FnCtxt;
use rustc_ast as ast;
use rustc_ast::util::lev_distance::find_best_match_for_name;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder};
@ -740,6 +741,40 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pat_ty
}
fn maybe_suggest_range_literal(
&self,
e: &mut DiagnosticBuilder<'_>,
opt_def_id: Option<hir::def_id::DefId>,
ident: Ident,
) -> bool {
match opt_def_id {
Some(def_id) => match self.tcx.hir().get_if_local(def_id) {
Some(hir::Node::Item(hir::Item {
kind: hir::ItemKind::Const(_, body_id), ..
})) => match self.tcx.hir().get(body_id.hir_id) {
hir::Node::Expr(expr) => {
if hir::is_range_literal(expr) {
let span = self.tcx.hir().span(body_id.hir_id);
if let Ok(snip) = self.tcx.sess.source_map().span_to_snippet(span) {
e.span_suggestion_verbose(
ident.span,
"you may want to move the range into the match block",
snip,
Applicability::MachineApplicable,
);
return true;
}
}
}
_ => (),
},
_ => (),
},
_ => (),
}
false
}
fn emit_bad_pat_path(
&self,
mut e: DiagnosticBuilder<'_>,
@ -772,12 +807,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
}
_ => {
let const_def_id = match pat_ty.kind() {
let (type_def_id, item_def_id) = match pat_ty.kind() {
Adt(def, _) => match res {
Res::Def(DefKind::Const, _) => Some(def.did),
_ => None,
Res::Def(DefKind::Const, def_id) => (Some(def.did), Some(def_id)),
_ => (None, None),
},
_ => None,
_ => (None, None),
};
let ranges = &[
@ -788,11 +823,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.tcx.lang_items().range_inclusive_struct(),
self.tcx.lang_items().range_to_inclusive_struct(),
];
if const_def_id != None && ranges.contains(&const_def_id) {
let msg = "constants only support matching by type, \
if you meant to match against a range of values, \
consider using a range pattern like `min ..= max` in the match block";
e.note(msg);
if type_def_id != None && ranges.contains(&type_def_id) {
if !self.maybe_suggest_range_literal(&mut e, item_def_id, *ident) {
let msg = "constants only support matching by type, \
if you meant to match against a range of values, \
consider using a range pattern like `min ..= max` in the match block";
e.note(msg);
}
} else {
let msg = "introduce a new binding instead";
let sugg = format!("other_{}", ident.as_str().to_lowercase());

View File

@ -457,9 +457,7 @@ fn bench_clone_from_10_1000_0100(b: &mut Bencher) {
}
macro_rules! bench_in_place {
(
$($fname:ident, $type:ty , $count:expr, $init: expr);*
) => {
($($fname:ident, $type:ty, $count:expr, $init:expr);*) => {
$(
#[bench]
fn $fname(b: &mut Bencher) {
@ -467,7 +465,8 @@ macro_rules! bench_in_place {
let src: Vec<$type> = black_box(vec![$init; $count]);
let mut sink = src.into_iter()
.enumerate()
.map(|(idx, e)| { (idx as $type) ^ e }).collect::<Vec<$type>>();
.map(|(idx, e)| idx as $type ^ e)
.collect::<Vec<$type>>();
black_box(sink.as_mut_ptr())
});
}
@ -476,24 +475,24 @@ macro_rules! bench_in_place {
}
bench_in_place![
bench_in_place_xxu8_i0_0010, u8, 10, 0;
bench_in_place_xxu8_i0_0100, u8, 100, 0;
bench_in_place_xxu8_i0_1000, u8, 1000, 0;
bench_in_place_xxu8_i1_0010, u8, 10, 1;
bench_in_place_xxu8_i1_0100, u8, 100, 1;
bench_in_place_xxu8_i1_1000, u8, 1000, 1;
bench_in_place_xu32_i0_0010, u32, 10, 0;
bench_in_place_xu32_i0_0100, u32, 100, 0;
bench_in_place_xu32_i0_1000, u32, 1000, 0;
bench_in_place_xu32_i1_0010, u32, 10, 1;
bench_in_place_xu32_i1_0100, u32, 100, 1;
bench_in_place_xu32_i1_1000, u32, 1000, 1;
bench_in_place_u128_i0_0010, u128, 10, 0;
bench_in_place_u128_i0_0100, u128, 100, 0;
bench_in_place_u128_i0_1000, u128, 1000, 0;
bench_in_place_u128_i1_0010, u128, 10, 1;
bench_in_place_u128_i1_0100, u128, 100, 1;
bench_in_place_u128_i1_1000, u128, 1000, 1
bench_in_place_xxu8_0010_i0, u8, 10, 0;
bench_in_place_xxu8_0100_i0, u8, 100, 0;
bench_in_place_xxu8_1000_i0, u8, 1000, 0;
bench_in_place_xxu8_0010_i1, u8, 10, 1;
bench_in_place_xxu8_0100_i1, u8, 100, 1;
bench_in_place_xxu8_1000_i1, u8, 1000, 1;
bench_in_place_xu32_0010_i0, u32, 10, 0;
bench_in_place_xu32_0100_i0, u32, 100, 0;
bench_in_place_xu32_1000_i0, u32, 1000, 0;
bench_in_place_xu32_0010_i1, u32, 10, 1;
bench_in_place_xu32_0100_i1, u32, 100, 1;
bench_in_place_xu32_1000_i1, u32, 1000, 1;
bench_in_place_u128_0010_i0, u128, 10, 0;
bench_in_place_u128_0100_i0, u128, 100, 0;
bench_in_place_u128_1000_i0, u128, 1000, 0;
bench_in_place_u128_0010_i1, u128, 10, 1;
bench_in_place_u128_0100_i1, u128, 100, 1;
bench_in_place_u128_1000_i1, u128, 1000, 1
];
#[bench]

View File

@ -146,7 +146,7 @@
use core::fmt;
use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
use core::mem::{self, size_of, swap, ManuallyDrop};
use core::mem::{self, swap, ManuallyDrop};
use core::ops::{Deref, DerefMut};
use core::ptr;
@ -617,7 +617,7 @@ impl<T: Ord> BinaryHeap<T> {
#[inline(always)]
fn log2_fast(x: usize) -> usize {
8 * size_of::<usize>() - (x.leading_zeros() as usize) - 1
(usize::BITS - x.leading_zeros() - 1) as usize
}
// `rebuild` takes O(len1 + len2) operations

View File

@ -101,6 +101,7 @@
#![feature(fn_traits)]
#![feature(fundamental)]
#![feature(inplace_iteration)]
#![feature(int_bits_const)]
#![feature(lang_items)]
#![feature(layout_for_ptr)]
#![feature(libc)]

View File

@ -528,7 +528,7 @@ unsafe impl<#[may_dangle] T, A: AllocRef> Drop for RawVec<T, A> {
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
if mem::size_of::<usize>() < 8 && alloc_size > isize::MAX as usize {
if usize::BITS < 64 && alloc_size > isize::MAX as usize {
Err(CapacityOverflow)
} else {
Ok(())

View File

@ -2440,7 +2440,7 @@ pub struct Drain<'a> {
#[stable(feature = "collection_debug", since = "1.17.0")]
impl fmt::Debug for Drain<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Drain { .. }")
f.debug_tuple("Drain").field(&self.as_str()).finish()
}
}
@ -2463,6 +2463,40 @@ impl Drop for Drain<'_> {
}
}
impl<'a> Drain<'a> {
/// Returns the remaining (sub)string of this iterator as a slice.
///
/// # Examples
///
/// ```
/// #![feature(string_drain_as_str)]
/// let mut s = String::from("abc");
/// let mut drain = s.drain(..);
/// assert_eq!(drain.as_str(), "abc");
/// let _ = drain.next().unwrap();
/// assert_eq!(drain.as_str(), "bc");
/// ```
#[unstable(feature = "string_drain_as_str", issue = "76905")] // Note: uncomment AsRef impls below when stabilizing.
pub fn as_str(&self) -> &str {
self.iter.as_str()
}
}
// Uncomment when stabilizing `string_drain_as_str`.
// #[unstable(feature = "string_drain_as_str", issue = "76905")]
// impl<'a> AsRef<str> for Drain<'a> {
// fn as_ref(&self) -> &str {
// self.as_str()
// }
// }
//
// #[unstable(feature = "string_drain_as_str", issue = "76905")]
// impl<'a> AsRef<[u8]> for Drain<'a> {
// fn as_ref(&self) -> &[u8] {
// self.as_str().as_bytes()
// }
// }
#[stable(feature = "drain", since = "1.6.0")]
impl Iterator for Drain<'_> {
type Item = char;

View File

@ -55,6 +55,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use core::cmp::{self, Ordering};
use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
use core::intrinsics::{arith_offset, assume};
@ -2754,6 +2755,57 @@ impl From<&str> for Vec<u8> {
}
}
#[stable(feature = "array_try_from_vec", since = "1.48.0")]
impl<T, const N: usize> TryFrom<Vec<T>> for [T; N] {
type Error = Vec<T>;
/// Gets the entire contents of the `Vec<T>` as an array,
/// if its size exactly matches that of the requested array.
///
/// # Examples
///
/// ```
/// use std::convert::TryInto;
/// assert_eq!(vec![1, 2, 3].try_into(), Ok([1, 2, 3]));
/// assert_eq!(<Vec<i32>>::new().try_into(), Ok([]));
/// ```
///
/// If the length doesn't match, the input comes back in `Err`:
/// ```
/// use std::convert::TryInto;
/// let r: Result<[i32; 4], _> = (0..10).collect::<Vec<_>>().try_into();
/// assert_eq!(r, Err(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]));
/// ```
///
/// If you're fine with just getting a prefix of the `Vec<T>`,
/// you can call [`.truncate(N)`](Vec::truncate) first.
/// ```
/// use std::convert::TryInto;
/// let mut v = String::from("hello world").into_bytes();
/// v.sort();
/// v.truncate(2);
/// let [a, b]: [_; 2] = v.try_into().unwrap();
/// assert_eq!(a, b' ');
/// assert_eq!(b, b'd');
/// ```
fn try_from(mut vec: Vec<T>) -> Result<[T; N], Vec<T>> {
if vec.len() != N {
return Err(vec);
}
// SAFETY: `.set_len(0)` is always sound.
unsafe { vec.set_len(0) };
// SAFETY: A `Vec`'s pointer is always aligned properly, and
// the alignment the array needs is the same as the items.
// We checked earlier that we have sufficient items.
// The items will not double-drop as the `set_len`
// tells the `Vec` not to also drop them.
let array = unsafe { ptr::read(vec.as_ptr() as *const [T; N]) };
Ok(array)
}
}
////////////////////////////////////////////////////////////////////////////////
// Clone-on-write
////////////////////////////////////////////////////////////////////////////////

View File

@ -18,6 +18,7 @@
#![feature(deque_range)]
#![feature(inplace_iteration)]
#![feature(iter_map_while)]
#![feature(int_bits_const)]
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};

View File

@ -1,6 +1,5 @@
use std::borrow::Cow;
use std::collections::TryReserveError::*;
use std::mem::size_of;
use std::ops::Bound::*;
pub trait IntoCow<'a, B: ?Sized>
@ -605,7 +604,7 @@ fn test_try_reserve() {
// on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
// Any platform that succeeds for these requests is technically broken with
// ptr::offset because LLVM is the worst.
let guards_against_isize = size_of::<usize>() < 8;
let guards_against_isize = usize::BITS < 64;
{
// Note: basic stuff is checked by test_reserve
@ -686,7 +685,7 @@ fn test_try_reserve_exact() {
const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
let guards_against_isize = size_of::<usize>() < 8;
let guards_against_isize = usize::BITS < 64;
{
let mut empty_string: String = String::new();

View File

@ -1341,7 +1341,7 @@ fn test_try_reserve() {
// on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
// Any platform that succeeds for these requests is technically broken with
// ptr::offset because LLVM is the worst.
let guards_against_isize = size_of::<usize>() < 8;
let guards_against_isize = usize::BITS < 64;
{
// Note: basic stuff is checked by test_reserve

View File

@ -2086,7 +2086,7 @@ impl<T: ?Sized> Pointer for *const T {
f.flags |= 1 << (FlagV1::SignAwareZeroPad as u32);
if f.width.is_none() {
f.width = Some(((mem::size_of::<usize>() * 8) / 4) + 2);
f.width = Some((usize::BITS / 4) as usize + 2);
}
}
f.flags |= 1 << (FlagV1::Alternate as u32);

View File

@ -108,7 +108,8 @@ pub fn spin_loop() {
/// Note however, that `black_box` is only (and can only be) provided on a "best-effort" basis. The
/// extent to which it can block optimisations may vary depending upon the platform and code-gen
/// backend used. Programs cannot rely on `black_box` for *correctness* in any way.
#[inline]
#[cfg_attr(not(miri), inline)]
#[cfg_attr(miri, inline(never))]
#[unstable(feature = "test", issue = "50297")]
#[allow(unreachable_code)] // this makes #[cfg] a bit easier below.
pub fn black_box<T>(mut dummy: T) -> T {

View File

@ -20,7 +20,6 @@
#![macro_use]
use crate::intrinsics;
use crate::mem;
/// Arithmetic operations required by bignums.
pub trait FullOps: Sized {
@ -58,25 +57,22 @@ macro_rules! impl_full_ops {
// This cannot overflow;
// the output is between `0` and `2^nbits * (2^nbits - 1)`.
// FIXME: will LLVM optimize this into ADC or similar?
let nbits = mem::size_of::<$ty>() * 8;
let v = (self as $bigty) * (other as $bigty) + (carry as $bigty);
((v >> nbits) as $ty, v as $ty)
((v >> <$ty>::BITS) as $ty, v as $ty)
}
fn full_mul_add(self, other: $ty, other2: $ty, carry: $ty) -> ($ty, $ty) {
// This cannot overflow;
// the output is between `0` and `2^nbits * (2^nbits - 1)`.
let nbits = mem::size_of::<$ty>() * 8;
let v = (self as $bigty) * (other as $bigty) + (other2 as $bigty) +
(carry as $bigty);
((v >> nbits) as $ty, v as $ty)
((v >> <$ty>::BITS) as $ty, v as $ty)
}
fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) {
debug_assert!(borrow < other);
// This cannot overflow; the output is between `0` and `other * (2^nbits - 1)`.
let nbits = mem::size_of::<$ty>() * 8;
let lhs = ((borrow as $bigty) << nbits) | (self as $bigty);
let lhs = ((borrow as $bigty) << <$ty>::BITS) | (self as $bigty);
let rhs = other as $bigty;
((lhs / rhs) as $ty, (lhs % rhs) as $ty)
}
@ -128,13 +124,11 @@ macro_rules! define_bignum {
/// Makes a bignum from `u64` value.
pub fn from_u64(mut v: u64) -> $name {
use crate::mem;
let mut base = [0; $n];
let mut sz = 0;
while v > 0 {
base[sz] = v as $ty;
v >>= mem::size_of::<$ty>() * 8;
v >>= <$ty>::BITS;
sz += 1;
}
$name { size: sz, base: base }
@ -150,9 +144,7 @@ macro_rules! define_bignum {
/// Returns the `i`-th bit where bit 0 is the least significant one.
/// In other words, the bit with weight `2^i`.
pub fn get_bit(&self, i: usize) -> u8 {
use crate::mem;
let digitbits = mem::size_of::<$ty>() * 8;
let digitbits = <$ty>::BITS as usize;
let d = i / digitbits;
let b = i % digitbits;
((self.base[d] >> b) & 1) as u8
@ -166,8 +158,6 @@ macro_rules! define_bignum {
/// Returns the number of bits necessary to represent this value. Note that zero
/// is considered to need 0 bits.
pub fn bit_length(&self) -> usize {
use crate::mem;
// Skip over the most significant digits which are zero.
let digits = self.digits();
let zeros = digits.iter().rev().take_while(|&&x| x == 0).count();
@ -180,7 +170,7 @@ macro_rules! define_bignum {
}
// This could be optimized with leading_zeros() and bit shifts, but that's
// probably not worth the hassle.
let digitbits = mem::size_of::<$ty>() * 8;
let digitbits = <$ty>::BITS as usize;
let mut i = nonzero.len() * digitbits - 1;
while self.get_bit(i) == 0 {
i -= 1;
@ -265,9 +255,7 @@ macro_rules! define_bignum {
/// Multiplies itself by `2^bits` and returns its own mutable reference.
pub fn mul_pow2(&mut self, bits: usize) -> &mut $name {
use crate::mem;
let digitbits = mem::size_of::<$ty>() * 8;
let digitbits = <$ty>::BITS as usize;
let digits = bits / digitbits;
let bits = bits % digitbits;
@ -393,13 +381,11 @@ macro_rules! define_bignum {
/// Divide self by another bignum, overwriting `q` with the quotient and `r` with the
/// remainder.
pub fn div_rem(&self, d: &$name, q: &mut $name, r: &mut $name) {
use crate::mem;
// Stupid slow base-2 long division taken from
// https://en.wikipedia.org/wiki/Division_algorithm
// FIXME use a greater base ($ty) for the long division.
assert!(!d.is_zero());
let digitbits = mem::size_of::<$ty>() * 8;
let digitbits = <$ty>::BITS as usize;
for digit in &mut q.base[..] {
*digit = 0;
}
@ -462,10 +448,8 @@ macro_rules! define_bignum {
impl crate::fmt::Debug for $name {
fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
use crate::mem;
let sz = if self.size < 1 { 1 } else { self.size };
let digitlen = mem::size_of::<$ty>() * 2;
let digitlen = <$ty>::BITS as usize / 4;
write!(f, "{:#x}", self.base[sz - 1])?;
for &v in self.base[..sz - 1].iter().rev() {

View File

@ -348,6 +348,20 @@ $EndFeature, "
pub const MAX: Self = !Self::MIN;
}
doc_comment! {
concat!("The size of this integer type in bits.
# Examples
```
", $Feature, "#![feature(int_bits_const)]
assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");",
$EndFeature, "
```"),
#[unstable(feature = "int_bits_const", issue = "76904")]
pub const BITS: u32 = $BITS;
}
doc_comment! {
concat!("Converts a string slice in a given base to an integer.
@ -2601,6 +2615,20 @@ $EndFeature, "
pub const MAX: Self = !0;
}
doc_comment! {
concat!("The size of this integer type in bits.
# Examples
```
", $Feature, "#![feature(int_bits_const)]
assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");",
$EndFeature, "
```"),
#[unstable(feature = "int_bits_const", issue = "76904")]
pub const BITS: u32 = $BITS;
}
doc_comment! {
concat!("Converts a string slice in a given base to an integer.

View File

@ -565,7 +565,7 @@ fn break_patterns<T>(v: &mut [T]) {
random
};
let mut gen_usize = || {
if mem::size_of::<usize>() <= 4 {
if usize::BITS <= 32 {
gen_u32() as usize
} else {
(((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
@ -667,7 +667,7 @@ where
///
/// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
/// this function will immediately switch to heapsort.
fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: usize)
fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: u32)
where
F: FnMut(&T, &T) -> bool,
{
@ -763,7 +763,7 @@ where
}
// Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
let limit = mem::size_of::<usize>() * 8 - v.len().leading_zeros() as usize;
let limit = usize::BITS - v.len().leading_zeros();
recurse(v, &mut is_less, None, limit);
}

View File

@ -474,7 +474,7 @@ fn test_iterator_step_by_nth_overflow() {
}
let mut it = Test(0);
let root = usize::MAX >> (::std::mem::size_of::<usize>() * 8 / 2);
let root = usize::MAX >> (usize::BITS / 2);
let n = root + 20;
(&mut it).step_by(n).nth(n);
assert_eq!(it.0, n as Bigger * n as Bigger);

View File

@ -52,6 +52,7 @@
#![feature(partition_point)]
#![feature(once_cell)]
#![feature(unsafe_block_in_unsafe_fn)]
#![feature(int_bits_const)]
#![deny(unsafe_op_in_unsafe_fn)]
extern crate test;

View File

@ -2,7 +2,6 @@ macro_rules! int_module {
($T:ident, $T_i:ident) => {
#[cfg(test)]
mod tests {
use core::mem;
use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
use core::$T_i::*;
@ -82,30 +81,27 @@ macro_rules! int_module {
#[test]
fn test_count_zeros() {
let bits = mem::size_of::<$T>() * 8;
assert_eq!(A.count_zeros(), bits as u32 - 3);
assert_eq!(B.count_zeros(), bits as u32 - 2);
assert_eq!(C.count_zeros(), bits as u32 - 5);
assert_eq!(A.count_zeros(), $T::BITS - 3);
assert_eq!(B.count_zeros(), $T::BITS - 2);
assert_eq!(C.count_zeros(), $T::BITS - 5);
}
#[test]
fn test_leading_trailing_ones() {
let bits = (mem::size_of::<$T>() * 8) as u32;
let a: $T = 0b0101_1111;
assert_eq!(a.trailing_ones(), 5);
assert_eq!((!a).leading_ones(), bits - 7);
assert_eq!((!a).leading_ones(), $T::BITS - 7);
assert_eq!(a.reverse_bits().leading_ones(), 5);
assert_eq!(_1.leading_ones(), bits);
assert_eq!(_1.trailing_ones(), bits);
assert_eq!(_1.leading_ones(), $T::BITS);
assert_eq!(_1.trailing_ones(), $T::BITS);
assert_eq!((_1 << 1).trailing_ones(), 0);
assert_eq!(MAX.leading_ones(), 0);
assert_eq!((_1 << 1).leading_ones(), bits - 1);
assert_eq!(MAX.trailing_ones(), bits - 1);
assert_eq!((_1 << 1).leading_ones(), $T::BITS - 1);
assert_eq!(MAX.trailing_ones(), $T::BITS - 1);
assert_eq!(_0.leading_ones(), 0);
assert_eq!(_0.trailing_ones(), 0);

View File

@ -4,7 +4,6 @@ macro_rules! uint_module {
mod tests {
use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
use core::$T_i::*;
use std::mem;
use std::str::FromStr;
use crate::num;
@ -47,30 +46,27 @@ macro_rules! uint_module {
#[test]
fn test_count_zeros() {
let bits = mem::size_of::<$T>() * 8;
assert!(A.count_zeros() == bits as u32 - 3);
assert!(B.count_zeros() == bits as u32 - 2);
assert!(C.count_zeros() == bits as u32 - 5);
assert!(A.count_zeros() == $T::BITS - 3);
assert!(B.count_zeros() == $T::BITS - 2);
assert!(C.count_zeros() == $T::BITS - 5);
}
#[test]
fn test_leading_trailing_ones() {
let bits = (mem::size_of::<$T>() * 8) as u32;
let a: $T = 0b0101_1111;
assert_eq!(a.trailing_ones(), 5);
assert_eq!((!a).leading_ones(), bits - 7);
assert_eq!((!a).leading_ones(), $T::BITS - 7);
assert_eq!(a.reverse_bits().leading_ones(), 5);
assert_eq!(_1.leading_ones(), bits);
assert_eq!(_1.trailing_ones(), bits);
assert_eq!(_1.leading_ones(), $T::BITS);
assert_eq!(_1.trailing_ones(), $T::BITS);
assert_eq!((_1 << 1).trailing_ones(), 0);
assert_eq!((_1 >> 1).leading_ones(), 0);
assert_eq!((_1 << 1).leading_ones(), bits - 1);
assert_eq!((_1 >> 1).trailing_ones(), bits - 1);
assert_eq!((_1 << 1).leading_ones(), $T::BITS - 1);
assert_eq!((_1 >> 1).trailing_ones(), $T::BITS - 1);
assert_eq!(_0.leading_ones(), 0);
assert_eq!(_0.trailing_ones(), 0);

View File

@ -53,7 +53,7 @@ impl DwarfReader {
}
pub unsafe fn read_sleb128(&mut self) -> i64 {
let mut shift: usize = 0;
let mut shift: u32 = 0;
let mut result: u64 = 0;
let mut byte: u8;
loop {
@ -65,7 +65,7 @@ impl DwarfReader {
}
}
// sign-extend
if shift < 8 * mem::size_of::<u64>() && (byte & 0x40) != 0 {
if shift < u64::BITS && (byte & 0x40) != 0 {
result |= (!0 as u64) << shift;
}
result as i64

View File

@ -120,7 +120,7 @@ const UNWIND_DATA_REG: (i32, i32) = (24, 25); // I0, I1
#[cfg(target_arch = "hexagon")]
const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1
#[cfg(target_arch = "riscv64")]
#[cfg(any(target_arch = "riscv64", target_arch = "riscv32"))]
const UNWIND_DATA_REG: (i32, i32) = (10, 11); // x10, x11
// The following code is based on GCC's C and C++ personality routines. For reference, see:

View File

@ -18,6 +18,7 @@
issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/"
)]
#![feature(core_intrinsics)]
#![feature(int_bits_const)]
#![feature(lang_items)]
#![feature(libc)]
#![feature(nll)]

View File

@ -234,7 +234,8 @@ mod arch {
target_arch = "mips64",
target_arch = "s390x",
target_arch = "sparc64",
target_arch = "riscv64"
target_arch = "riscv64",
target_arch = "riscv32"
))]
mod arch {
pub use libc::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};

View File

@ -22,7 +22,8 @@ mod tests;
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
target_arch = "riscv64"
target_arch = "riscv64",
target_arch = "riscv32"
)
),
all(target_os = "android", any(target_arch = "aarch64", target_arch = "arm")),
@ -65,7 +66,8 @@ pub type c_char = u8;
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
target_arch = "riscv64"
target_arch = "riscv64",
target_arch = "riscv32"
)
),
all(target_os = "android", any(target_arch = "aarch64", target_arch = "arm")),

View File

@ -58,6 +58,7 @@
//! [`push`]: PathBuf::push
#![stable(feature = "rust1", since = "1.0.0")]
#![deny(unsafe_op_in_unsafe_fn)]
#[cfg(test)]
mod tests;
@ -294,7 +295,8 @@ fn os_str_as_u8_slice(s: &OsStr) -> &[u8] {
unsafe { &*(s as *const OsStr as *const [u8]) }
}
unsafe fn u8_slice_as_os_str(s: &[u8]) -> &OsStr {
&*(s as *const [u8] as *const OsStr)
// SAFETY: see the comment of `os_str_as_u8_slice`
unsafe { &*(s as *const [u8] as *const OsStr) }
}
// Detect scheme on Redox
@ -314,24 +316,21 @@ fn has_physical_root(s: &[u8], prefix: Option<Prefix<'_>>) -> bool {
// basic workhorse for splitting stem and extension
fn split_file_at_dot(file: &OsStr) -> (Option<&OsStr>, Option<&OsStr>) {
unsafe {
if os_str_as_u8_slice(file) == b".." {
return (Some(file), None);
}
if os_str_as_u8_slice(file) == b".." {
return (Some(file), None);
}
// The unsafety here stems from converting between &OsStr and &[u8]
// and back. This is safe to do because (1) we only look at ASCII
// contents of the encoding and (2) new &OsStr values are produced
// only from ASCII-bounded slices of existing &OsStr values.
let mut iter = os_str_as_u8_slice(file).rsplitn(2, |b| *b == b'.');
let after = iter.next();
let before = iter.next();
if before == Some(b"") {
(Some(file), None)
} else {
(before.map(|s| u8_slice_as_os_str(s)), after.map(|s| u8_slice_as_os_str(s)))
}
// The unsafety here stems from converting between &OsStr and &[u8]
// and back. This is safe to do because (1) we only look at ASCII
// contents of the encoding and (2) new &OsStr values are produced
// only from ASCII-bounded slices of existing &OsStr values.
let mut iter = os_str_as_u8_slice(file).rsplitn(2, |b| *b == b'.');
let after = iter.next();
let before = iter.next();
if before == Some(b"") {
(Some(file), None)
} else {
unsafe { (before.map(|s| u8_slice_as_os_str(s)), after.map(|s| u8_slice_as_os_str(s))) }
}
}
@ -1702,7 +1701,7 @@ impl Path {
// The following (private!) function allows construction of a path from a u8
// slice, which is only safe when it is known to follow the OsStr encoding.
unsafe fn from_u8_slice(s: &[u8]) -> &Path {
Path::new(u8_slice_as_os_str(s))
unsafe { Path::new(u8_slice_as_os_str(s)) }
}
// The following (private!) function reveals the byte encoding used for OsStr.
fn as_u8_slice(&self) -> &[u8] {

View File

@ -14,7 +14,8 @@ use crate::ptr;
target_arch = "powerpc64",
target_arch = "asmjs",
target_arch = "wasm32",
target_arch = "hexagon"
target_arch = "hexagon",
target_arch = "riscv32"
)))]
pub const MIN_ALIGN: usize = 8;
#[cfg(all(any(

View File

@ -54,7 +54,7 @@ pub const unwinder_private_data_size: usize = 2;
#[cfg(target_arch = "sparc64")]
pub const unwinder_private_data_size: usize = 2;
#[cfg(target_arch = "riscv64")]
#[cfg(any(target_arch = "riscv64", target_arch = "riscv32"))]
pub const unwinder_private_data_size: usize = 2;
#[cfg(target_os = "emscripten")]

View File

@ -617,7 +617,13 @@ impl Config {
| Subcommand::Build { .. }
| Subcommand::Bench { .. }
| Subcommand::Dist { .. }
| Subcommand::Install { .. } => assert_eq!(config.stage, 2),
| Subcommand::Install { .. } => {
assert_eq!(
config.stage, 2,
"x.py should be run with `--stage 2` on CI, but was run with `--stage {}`",
config.stage,
);
}
Subcommand::Clean { .. }
| Subcommand::Check { .. }
| Subcommand::Clippy { .. }

View File

@ -0,0 +1,21 @@
// EMIT_MIR issue_75439.foo.MatchBranchSimplification.diff
#![feature(const_fn_transmute)]
#![feature(or_patterns)]
use std::mem::transmute;
pub fn foo(bytes: [u8; 16]) -> Option<[u8; 4]> {
// big endian `u32`s
let dwords: [u32; 4] = unsafe { transmute(bytes) };
const FF: u32 = 0x0000_ffff_u32.to_be();
if let [0, 0, 0 | FF, ip] = dwords {
Some(unsafe { transmute(ip) })
} else {
None
}
}
fn main() {
let _ = foo([0; 16]);
}

View File

@ -0,0 +1,87 @@
- // MIR for `foo` before MatchBranchSimplification
+ // MIR for `foo` after MatchBranchSimplification
fn foo(_1: [u8; 16]) -> Option<[u8; 4]> {
debug bytes => _1; // in scope 0 at $DIR/issue-75439.rs:8:12: 8:17
let mut _0: std::option::Option<[u8; 4]>; // return place in scope 0 at $DIR/issue-75439.rs:8:32: 8:47
let _2: [u32; 4]; // in scope 0 at $DIR/issue-75439.rs:10:9: 10:15
let mut _3: [u8; 16]; // in scope 0 at $DIR/issue-75439.rs:10:47: 10:52
let mut _5: [u8; 4]; // in scope 0 at $DIR/issue-75439.rs:13:14: 13:38
let mut _6: u32; // in scope 0 at $DIR/issue-75439.rs:13:33: 13:35
scope 1 {
debug dwords => _2; // in scope 1 at $DIR/issue-75439.rs:10:9: 10:15
let _4: u32; // in scope 1 at $DIR/issue-75439.rs:12:27: 12:29
scope 3 {
debug ip => _4; // in scope 3 at $DIR/issue-75439.rs:12:27: 12:29
scope 4 {
}
}
}
scope 2 {
}
bb0: {
StorageLive(_2); // scope 0 at $DIR/issue-75439.rs:10:9: 10:15
StorageLive(_3); // scope 2 at $DIR/issue-75439.rs:10:47: 10:52
_3 = _1; // scope 2 at $DIR/issue-75439.rs:10:47: 10:52
_2 = transmute::<[u8; 16], [u32; 4]>(move _3) -> bb1; // scope 2 at $DIR/issue-75439.rs:10:37: 10:53
// mir::Constant
// + span: $DIR/issue-75439.rs:10:37: 10:46
// + literal: Const { ty: unsafe extern "rust-intrinsic" fn([u8; 16]) -> [u32; 4] {std::intrinsics::transmute::<[u8; 16], [u32; 4]>}, val: Value(Scalar(<ZST>)) }
}
bb1: {
StorageDead(_3); // scope 2 at $DIR/issue-75439.rs:10:52: 10:53
switchInt(_2[0 of 4]) -> [0_u32: bb2, otherwise: bb4]; // scope 1 at $DIR/issue-75439.rs:12:13: 12:14
}
bb2: {
switchInt(_2[1 of 4]) -> [0_u32: bb3, otherwise: bb4]; // scope 1 at $DIR/issue-75439.rs:12:16: 12:17
}
bb3: {
switchInt(_2[2 of 4]) -> [0_u32: bb6, 4294901760_u32: bb7, otherwise: bb4]; // scope 1 at $DIR/issue-75439.rs:12:19: 12:20
}
bb4: {
discriminant(_0) = 0; // scope 1 at $DIR/issue-75439.rs:15:9: 15:13
goto -> bb9; // scope 1 at $DIR/issue-75439.rs:12:5: 16:6
}
bb5: {
StorageLive(_5); // scope 3 at $DIR/issue-75439.rs:13:14: 13:38
StorageLive(_6); // scope 4 at $DIR/issue-75439.rs:13:33: 13:35
_6 = _4; // scope 4 at $DIR/issue-75439.rs:13:33: 13:35
_5 = transmute::<u32, [u8; 4]>(move _6) -> bb8; // scope 4 at $DIR/issue-75439.rs:13:23: 13:36
// mir::Constant
// + span: $DIR/issue-75439.rs:13:23: 13:32
// + literal: Const { ty: unsafe extern "rust-intrinsic" fn(u32) -> [u8; 4] {std::intrinsics::transmute::<u32, [u8; 4]>}, val: Value(Scalar(<ZST>)) }
}
bb6: {
StorageLive(_4); // scope 1 at $DIR/issue-75439.rs:12:27: 12:29
_4 = _2[3 of 4]; // scope 1 at $DIR/issue-75439.rs:12:27: 12:29
goto -> bb5; // scope 1 at $DIR/issue-75439.rs:12:5: 16:6
}
bb7: {
StorageLive(_4); // scope 1 at $DIR/issue-75439.rs:12:27: 12:29
_4 = _2[3 of 4]; // scope 1 at $DIR/issue-75439.rs:12:27: 12:29
goto -> bb5; // scope 1 at $DIR/issue-75439.rs:12:5: 16:6
}
bb8: {
StorageDead(_6); // scope 4 at $DIR/issue-75439.rs:13:35: 13:36
((_0 as Some).0: [u8; 4]) = move _5; // scope 3 at $DIR/issue-75439.rs:13:9: 13:39
discriminant(_0) = 1; // scope 3 at $DIR/issue-75439.rs:13:9: 13:39
StorageDead(_5); // scope 3 at $DIR/issue-75439.rs:13:38: 13:39
StorageDead(_4); // scope 1 at $DIR/issue-75439.rs:14:5: 14:6
goto -> bb9; // scope 1 at $DIR/issue-75439.rs:12:5: 16:6
}
bb9: {
StorageDead(_2); // scope 0 at $DIR/issue-75439.rs:17:1: 17:2
return; // scope 0 at $DIR/issue-75439.rs:17:2: 17:2
}
}

View File

@ -1,5 +1,7 @@
// run-pass
// Test a ZST enum whose dicriminant is ~0i128. This caused an ICE when casting to a i32.
#![feature(test)]
use std::hint::black_box;
#[derive(Copy, Clone)]
enum Nums {
@ -12,9 +14,6 @@ const NEG_ONE_I32: i32 = Nums::NegOne as i32;
const NEG_ONE_I64: i64 = Nums::NegOne as i64;
const NEG_ONE_I128: i128 = Nums::NegOne as i128;
#[inline(never)]
fn identity<T>(t: T) -> T { t }
fn test_as_arg(n: Nums) {
assert_eq!(-1i8, n as i8);
assert_eq!(-1i16, n as i16);
@ -31,11 +30,11 @@ fn main() {
assert_eq!(-1i64, kind as i64);
assert_eq!(-1i128, kind as i128);
assert_eq!(-1i8, identity(kind) as i8);
assert_eq!(-1i16, identity(kind) as i16);
assert_eq!(-1i32, identity(kind) as i32);
assert_eq!(-1i64, identity(kind) as i64);
assert_eq!(-1i128, identity(kind) as i128);
assert_eq!(-1i8, black_box(kind) as i8);
assert_eq!(-1i16, black_box(kind) as i16);
assert_eq!(-1i32, black_box(kind) as i32);
assert_eq!(-1i64, black_box(kind) as i64);
assert_eq!(-1i128, black_box(kind) as i128);
test_as_arg(Nums::NegOne);

View File

@ -1,14 +1,10 @@
// run-pass
#![feature(const_discriminant)]
#![feature(test)]
#![allow(dead_code)]
use std::mem::{discriminant, Discriminant};
// `discriminant(const_expr)` may get const-propagated.
// As we want to check that const-eval is equal to ordinary exection,
// we wrap `const_expr` with a function which is not const to prevent this.
#[inline(never)]
fn identity<T>(x: T) -> T { x }
use std::hint::black_box;
enum Test {
A(u8),
@ -31,10 +27,10 @@ const TEST_V: Discriminant<SingleVariant> = discriminant(&SingleVariant::V);
fn main() {
assert_eq!(TEST_A, TEST_A_OTHER);
assert_eq!(TEST_A, discriminant(identity(&Test::A(17))));
assert_eq!(TEST_B, discriminant(identity(&Test::B)));
assert_eq!(TEST_A, discriminant(black_box(&Test::A(17))));
assert_eq!(TEST_B, discriminant(black_box(&Test::B)));
assert_ne!(TEST_A, TEST_B);
assert_ne!(TEST_B, discriminant(identity(&Test::C { a: 42, b: 7 })));
assert_ne!(TEST_B, discriminant(black_box(&Test::C { a: 42, b: 7 })));
assert_eq!(TEST_V, discriminant(identity(&SingleVariant::V)));
assert_eq!(TEST_V, discriminant(black_box(&SingleVariant::V)));
}

View File

@ -2,13 +2,18 @@
#![allow(non_snake_case)]
use std::ops::RangeInclusive;
const RANGE: RangeInclusive<i32> = 0..=255;
const RANGE2: RangeInclusive<i32> = panic!();
fn main() {
let n: i32 = 1;
match n {
RANGE => {}
//~^ ERROR mismatched types
RANGE2 => {}
//~^ ERROR mismatched types
_ => {}
}
}

View File

@ -1,5 +1,5 @@
error[E0308]: mismatched types
--> $DIR/issue-76191.rs:10:9
--> $DIR/issue-76191.rs:13:9
|
LL | const RANGE: RangeInclusive<i32> = 0..=255;
| ------------------------------------------- constant defined here
@ -14,8 +14,30 @@ LL | RANGE => {}
|
= note: expected type `i32`
found struct `RangeInclusive<i32>`
help: you may want to move the range into the match block
|
LL | 0..=255 => {}
| ^^^^^^^
error[E0308]: mismatched types
--> $DIR/issue-76191.rs:15:9
|
LL | const RANGE2: RangeInclusive<i32> = panic!();
| --------------------------------------------- constant defined here
...
LL | match n {
| - this expression has type `i32`
...
LL | RANGE2 => {}
| ^^^^^^
| |
| expected `i32`, found struct `RangeInclusive`
| `RANGE2` is interpreted as a constant, not a new binding
|
= note: expected type `i32`
found struct `RangeInclusive<i32>`
= note: constants only support matching by type, if you meant to match against a range of values, consider using a range pattern like `min ..= max` in the match block
error: aborting due to previous error
error: aborting due to 2 previous errors
For more information about this error, try `rustc --explain E0308`.

View File

@ -111,6 +111,7 @@ static TARGETS: &[&str] = &[
"riscv32i-unknown-none-elf",
"riscv32imc-unknown-none-elf",
"riscv32imac-unknown-none-elf",
"riscv32gc-unknown-linux-gnu",
"riscv64imac-unknown-none-elf",
"riscv64gc-unknown-none-elf",
"riscv64gc-unknown-linux-gnu",