mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-25 00:03:43 +00:00
Replace NonZero::<_>::new
with NonZero::new
.
This commit is contained in:
parent
746a58d435
commit
a90cc05233
@ -143,15 +143,14 @@ where
|
||||
// `{non_zero} | packed_tag` can't make the value zero.
|
||||
|
||||
let packed = (addr.get() >> T::BITS) | packed_tag;
|
||||
unsafe { NonZero::<usize>::new_unchecked(packed) }
|
||||
unsafe { NonZero::new_unchecked(packed) }
|
||||
})
|
||||
}
|
||||
|
||||
/// Retrieves the original raw pointer from `self.packed`.
|
||||
#[inline]
|
||||
pub(super) fn pointer_raw(&self) -> NonNull<P::Target> {
|
||||
self.packed
|
||||
.map_addr(|addr| unsafe { NonZero::<usize>::new_unchecked(addr.get() << T::BITS) })
|
||||
self.packed.map_addr(|addr| unsafe { NonZero::new_unchecked(addr.get() << T::BITS) })
|
||||
}
|
||||
|
||||
/// This provides a reference to the `P` pointer itself, rather than the
|
||||
|
@ -105,7 +105,7 @@ const fn to_nonzero(n: Option<u32>) -> Option<NonZero<u32>> {
|
||||
// in const context. Requires https://github.com/rust-lang/rfcs/pull/2632.
|
||||
match n {
|
||||
None => None,
|
||||
Some(n) => NonZero::<u32>::new(n),
|
||||
Some(n) => NonZero::new(n),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -827,7 +827,7 @@ fn test_unstable_options_tracking_hash() {
|
||||
tracked!(tls_model, Some(TlsModel::GeneralDynamic));
|
||||
tracked!(translate_remapped_path_to_local_path, false);
|
||||
tracked!(trap_unreachable, Some(false));
|
||||
tracked!(treat_err_as_bug, NonZero::<usize>::new(1));
|
||||
tracked!(treat_err_as_bug, NonZero::new(1));
|
||||
tracked!(tune_cpu, Some(String::from("abc")));
|
||||
tracked!(uninit_const_chunk_threshold, 123);
|
||||
tracked!(unleash_the_miri_inside_of_you, true);
|
||||
|
@ -107,7 +107,7 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
|
||||
use rustc_query_impl::QueryCtxt;
|
||||
use rustc_query_system::query::{deadlock, QueryContext};
|
||||
|
||||
let registry = sync::Registry::new(std::num::NonZero::<usize>::new(threads).unwrap());
|
||||
let registry = sync::Registry::new(std::num::NonZero::new(threads).unwrap());
|
||||
|
||||
if !sync::is_dyn_thread_safe() {
|
||||
return run_in_thread_with_globals(edition, || {
|
||||
|
@ -338,7 +338,7 @@ impl<'a, 'tcx> DecodeContext<'a, 'tcx> {
|
||||
}
|
||||
LazyState::Previous(last_pos) => last_pos.get() + distance,
|
||||
};
|
||||
let position = NonZero::<usize>::new(position).unwrap();
|
||||
let position = NonZero::new(position).unwrap();
|
||||
self.lazy_state = LazyState::Previous(position);
|
||||
f(position)
|
||||
}
|
||||
@ -685,17 +685,15 @@ impl MetadataBlob {
|
||||
}
|
||||
|
||||
pub(crate) fn get_rustc_version(&self) -> String {
|
||||
LazyValue::<String>::from_position(
|
||||
NonZero::<usize>::new(METADATA_HEADER.len() + 8).unwrap(),
|
||||
)
|
||||
.decode(self)
|
||||
LazyValue::<String>::from_position(NonZero::new(METADATA_HEADER.len() + 8).unwrap())
|
||||
.decode(self)
|
||||
}
|
||||
|
||||
fn root_pos(&self) -> NonZero<usize> {
|
||||
let offset = METADATA_HEADER.len();
|
||||
let pos_bytes = self.blob()[offset..][..8].try_into().unwrap();
|
||||
let pos = u64::from_le_bytes(pos_bytes);
|
||||
NonZero::<usize>::new(pos as usize).unwrap()
|
||||
NonZero::new(pos as usize).unwrap()
|
||||
}
|
||||
|
||||
pub(crate) fn get_header(&self) -> CrateHeader {
|
||||
|
@ -439,7 +439,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
|
||||
position.get() - last_pos.get()
|
||||
}
|
||||
};
|
||||
self.lazy_state = LazyState::Previous(NonZero::<usize>::new(pos).unwrap());
|
||||
self.lazy_state = LazyState::Previous(NonZero::new(pos).unwrap());
|
||||
self.emit_usize(distance);
|
||||
}
|
||||
|
||||
@ -447,7 +447,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
|
||||
where
|
||||
T::Value<'tcx>: Encodable<EncodeContext<'a, 'tcx>>,
|
||||
{
|
||||
let pos = NonZero::<usize>::new(self.position()).unwrap();
|
||||
let pos = NonZero::new(self.position()).unwrap();
|
||||
|
||||
assert_eq!(self.lazy_state, LazyState::NoNode);
|
||||
self.lazy_state = LazyState::NodeStart(pos);
|
||||
@ -466,7 +466,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
|
||||
where
|
||||
T::Value<'tcx>: Encodable<EncodeContext<'a, 'tcx>>,
|
||||
{
|
||||
let pos = NonZero::<usize>::new(self.position()).unwrap();
|
||||
let pos = NonZero::new(self.position()).unwrap();
|
||||
|
||||
assert_eq!(self.lazy_state, LazyState::NoNode);
|
||||
self.lazy_state = LazyState::NodeStart(pos);
|
||||
|
@ -119,7 +119,7 @@ impl<T: ParameterizedOverTcx> ParameterizedOverTcx for LazyArray<T> {
|
||||
|
||||
impl<T> Default for LazyArray<T> {
|
||||
fn default() -> LazyArray<T> {
|
||||
LazyArray::from_position_and_num_elems(NonZero::<usize>::new(1).unwrap(), 0)
|
||||
LazyArray::from_position_and_num_elems(NonZero::new(1).unwrap(), 0)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -339,7 +339,7 @@ impl<T> FixedSizeEncoding for Option<LazyValue<T>> {
|
||||
|
||||
#[inline]
|
||||
fn from_bytes(b: &[u8; 8]) -> Self {
|
||||
let position = NonZero::<usize>::new(u64::from_bytes(b) as usize)?;
|
||||
let position = NonZero::new(u64::from_bytes(b) as usize)?;
|
||||
Some(LazyValue::from_position(position))
|
||||
}
|
||||
|
||||
@ -366,7 +366,7 @@ impl<T> LazyArray<T> {
|
||||
}
|
||||
|
||||
fn from_bytes_impl(position: &[u8; 8], meta: &[u8; 8]) -> Option<LazyArray<T>> {
|
||||
let position = NonZero::<usize>::new(u64::from_bytes(position) as usize)?;
|
||||
let position = NonZero::new(u64::from_bytes(position) as usize)?;
|
||||
let len = u64::from_bytes(meta) as usize;
|
||||
Some(LazyArray::from_position_and_num_elems(position, len))
|
||||
}
|
||||
@ -497,7 +497,7 @@ impl<I: Idx, const N: usize, T: FixedSizeEncoding<ByteArray = [u8; N]>> TableBui
|
||||
}
|
||||
|
||||
LazyTable::from_position_and_encoded_size(
|
||||
NonZero::<usize>::new(pos).unwrap(),
|
||||
NonZero::new(pos).unwrap(),
|
||||
width,
|
||||
self.blocks.len(),
|
||||
)
|
||||
|
@ -433,7 +433,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
// the `-Z force-unstable-if-unmarked` flag present (we're
|
||||
// compiling a compiler crate), then let this missing feature
|
||||
// annotation slide.
|
||||
if feature == sym::rustc_private && issue == NonZero::<u32>::new(27812) {
|
||||
if feature == sym::rustc_private && issue == NonZero::new(27812) {
|
||||
if self.sess.opts.unstable_opts.force_unstable_if_unmarked {
|
||||
return EvalResult::Allow;
|
||||
}
|
||||
|
@ -500,7 +500,7 @@ impl<'tcx> AllocMap<'tcx> {
|
||||
AllocMap {
|
||||
alloc_map: Default::default(),
|
||||
dedup: Default::default(),
|
||||
next_id: AllocId(NonZero::<u64>::new(1).unwrap()),
|
||||
next_id: AllocId(NonZero::new(1).unwrap()),
|
||||
}
|
||||
}
|
||||
fn reserve(&mut self) -> AllocId {
|
||||
|
@ -155,7 +155,7 @@ impl CtfeProvenance {
|
||||
/// Returns the `AllocId` of this provenance.
|
||||
#[inline(always)]
|
||||
pub fn alloc_id(self) -> AllocId {
|
||||
AllocId(NonZero::<u64>::new(self.0.get() & !IMMUTABLE_MASK).unwrap())
|
||||
AllocId(NonZero::new(self.0.get() & !IMMUTABLE_MASK).unwrap())
|
||||
}
|
||||
|
||||
/// Returns whether this provenance is immutable.
|
||||
|
@ -161,14 +161,14 @@ impl<D: Decoder> Decodable<D> for ScalarInt {
|
||||
let mut data = [0u8; 16];
|
||||
let size = d.read_u8();
|
||||
data[..size as usize].copy_from_slice(d.read_raw_bytes(size as usize));
|
||||
ScalarInt { data: u128::from_le_bytes(data), size: NonZero::<u8>::new(size).unwrap() }
|
||||
ScalarInt { data: u128::from_le_bytes(data), size: NonZero::new(size).unwrap() }
|
||||
}
|
||||
}
|
||||
|
||||
impl ScalarInt {
|
||||
pub const TRUE: ScalarInt = ScalarInt { data: 1_u128, size: NonZero::<u8>::new(1).unwrap() };
|
||||
pub const TRUE: ScalarInt = ScalarInt { data: 1_u128, size: NonZero::new(1).unwrap() };
|
||||
|
||||
pub const FALSE: ScalarInt = ScalarInt { data: 0_u128, size: NonZero::<u8>::new(1).unwrap() };
|
||||
pub const FALSE: ScalarInt = ScalarInt { data: 0_u128, size: NonZero::new(1).unwrap() };
|
||||
|
||||
#[inline]
|
||||
pub fn size(self) -> Size {
|
||||
@ -196,7 +196,7 @@ impl ScalarInt {
|
||||
|
||||
#[inline]
|
||||
pub fn null(size: Size) -> Self {
|
||||
Self { data: 0, size: NonZero::<u8>::new(size.bytes() as u8).unwrap() }
|
||||
Self { data: 0, size: NonZero::new(size.bytes() as u8).unwrap() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -208,7 +208,7 @@ impl ScalarInt {
|
||||
pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
|
||||
let data = i.into();
|
||||
if size.truncate(data) == data {
|
||||
Some(Self { data, size: NonZero::<u8>::new(size.bytes() as u8).unwrap() })
|
||||
Some(Self { data, size: NonZero::new(size.bytes() as u8).unwrap() })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -220,7 +220,7 @@ impl ScalarInt {
|
||||
// `into` performed sign extension, we have to truncate
|
||||
let truncated = size.truncate(i as u128);
|
||||
if size.sign_extend(truncated) as i128 == i {
|
||||
Some(Self { data: truncated, size: NonZero::<u8>::new(size.bytes() as u8).unwrap() })
|
||||
Some(Self { data: truncated, size: NonZero::new(size.bytes() as u8).unwrap() })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -388,7 +388,7 @@ macro_rules! from {
|
||||
fn from(u: $ty) -> Self {
|
||||
Self {
|
||||
data: u128::from(u),
|
||||
size: NonZero::<u8>::new(std::mem::size_of::<$ty>() as u8).unwrap(),
|
||||
size: NonZero::new(std::mem::size_of::<$ty>() as u8).unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -427,10 +427,7 @@ impl TryFrom<ScalarInt> for bool {
|
||||
impl From<char> for ScalarInt {
|
||||
#[inline]
|
||||
fn from(c: char) -> Self {
|
||||
Self {
|
||||
data: c as u128,
|
||||
size: NonZero::<u8>::new(std::mem::size_of::<char>() as u8).unwrap(),
|
||||
}
|
||||
Self { data: c as u128, size: NonZero::new(std::mem::size_of::<char>() as u8).unwrap() }
|
||||
}
|
||||
}
|
||||
|
||||
@ -457,7 +454,7 @@ impl From<Single> for ScalarInt {
|
||||
#[inline]
|
||||
fn from(f: Single) -> Self {
|
||||
// We trust apfloat to give us properly truncated data.
|
||||
Self { data: f.to_bits(), size: NonZero::<u8>::new((Single::BITS / 8) as u8).unwrap() }
|
||||
Self { data: f.to_bits(), size: NonZero::new((Single::BITS / 8) as u8).unwrap() }
|
||||
}
|
||||
}
|
||||
|
||||
@ -473,7 +470,7 @@ impl From<Double> for ScalarInt {
|
||||
#[inline]
|
||||
fn from(f: Double) -> Self {
|
||||
// We trust apfloat to give us properly truncated data.
|
||||
Self { data: f.to_bits(), size: NonZero::<u8>::new((Double::BITS / 8) as u8).unwrap() }
|
||||
Self { data: f.to_bits(), size: NonZero::new((Double::BITS / 8) as u8).unwrap() }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -143,9 +143,8 @@ impl<'tcx> From<ty::Term<'tcx>> for GenericArg<'tcx> {
|
||||
impl<'tcx> GenericArg<'tcx> {
|
||||
#[inline]
|
||||
pub fn unpack(self) -> GenericArgKind<'tcx> {
|
||||
let ptr = unsafe {
|
||||
self.ptr.map_addr(|addr| NonZero::<usize>::new_unchecked(addr.get() & !TAG_MASK))
|
||||
};
|
||||
let ptr =
|
||||
unsafe { self.ptr.map_addr(|addr| NonZero::new_unchecked(addr.get() & !TAG_MASK)) };
|
||||
// SAFETY: use of `Interned::new_unchecked` here is ok because these
|
||||
// pointers were originally created from `Interned` types in `pack()`,
|
||||
// and this is just going in the other direction.
|
||||
|
@ -761,7 +761,7 @@ where
|
||||
};
|
||||
tcx.mk_layout(LayoutS {
|
||||
variants: Variants::Single { index: variant_index },
|
||||
fields: match NonZero::<usize>::new(fields) {
|
||||
fields: match NonZero::new(fields) {
|
||||
Some(fields) => FieldsShape::Union(fields),
|
||||
None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
|
||||
},
|
||||
|
@ -617,9 +617,8 @@ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for Term<'tcx> {
|
||||
impl<'tcx> Term<'tcx> {
|
||||
#[inline]
|
||||
pub fn unpack(self) -> TermKind<'tcx> {
|
||||
let ptr = unsafe {
|
||||
self.ptr.map_addr(|addr| NonZero::<usize>::new_unchecked(addr.get() & !TAG_MASK))
|
||||
};
|
||||
let ptr =
|
||||
unsafe { self.ptr.map_addr(|addr| NonZero::new_unchecked(addr.get() & !TAG_MASK)) };
|
||||
// SAFETY: use of `Interned::new_unchecked` here is ok because these
|
||||
// pointers were originally created from `Interned` types in `pack()`,
|
||||
// and this is just going in the other direction.
|
||||
|
@ -645,7 +645,7 @@ fn stability_index(tcx: TyCtxt<'_>, (): ()) -> Index {
|
||||
let stability = Stability {
|
||||
level: attr::StabilityLevel::Unstable {
|
||||
reason: UnstableReason::Default,
|
||||
issue: NonZero::<u32>::new(27812),
|
||||
issue: NonZero::new(27812),
|
||||
is_soft: false,
|
||||
implied_by: None,
|
||||
},
|
||||
|
@ -68,10 +68,8 @@ impl QueryContext for QueryCtxt<'_> {
|
||||
#[inline]
|
||||
fn next_job_id(self) -> QueryJobId {
|
||||
QueryJobId(
|
||||
NonZero::<u64>::new(
|
||||
self.query_system.jobs.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
|
||||
)
|
||||
.unwrap(),
|
||||
NonZero::new(self.query_system.jobs.fetch_add(1, std::sync::atomic::Ordering::Relaxed))
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -225,7 +225,7 @@ impl<S: Encoder> Encodable<S> for NonZero<u32> {
|
||||
|
||||
impl<D: Decoder> Decodable<D> for NonZero<u32> {
|
||||
fn decode(d: &mut D) -> Self {
|
||||
NonZero::<u32>::new(d.read_u32()).unwrap()
|
||||
NonZero::new(d.read_u32()).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1007,7 +1007,7 @@ mod parse {
|
||||
}
|
||||
},
|
||||
None => {
|
||||
*slot = NonZero::<usize>::new(1);
|
||||
*slot = NonZero::new(1);
|
||||
true
|
||||
}
|
||||
}
|
||||
|
@ -350,7 +350,7 @@ impl<T: Ord, A: Allocator> DerefMut for PeekMut<'_, T, A> {
|
||||
// the standard library as "leak amplification".
|
||||
unsafe {
|
||||
// SAFETY: len > 1 so len != 0.
|
||||
self.original_len = Some(NonZero::<usize>::new_unchecked(len));
|
||||
self.original_len = Some(NonZero::new_unchecked(len));
|
||||
// SAFETY: len > 1 so all this does for now is leak elements,
|
||||
// which is safe.
|
||||
self.heap.data.set_len(1);
|
||||
@ -1576,8 +1576,8 @@ unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
|
||||
#[unstable(issue = "none", feature = "inplace_iteration")]
|
||||
#[doc(hidden)]
|
||||
unsafe impl<I, A: Allocator> InPlaceIterable for IntoIter<I, A> {
|
||||
const EXPAND_BY: Option<NonZero<usize>> = NonZero::<usize>::new(1);
|
||||
const MERGE_BY: Option<NonZero<usize>> = NonZero::<usize>::new(1);
|
||||
const EXPAND_BY: Option<NonZero<usize>> = NonZero::new(1);
|
||||
const MERGE_BY: Option<NonZero<usize>> = NonZero::new(1);
|
||||
}
|
||||
|
||||
unsafe impl<I> AsVecIntoIter for IntoIter<I> {
|
||||
|
@ -63,7 +63,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
|
||||
self.inner.drain(..n);
|
||||
0
|
||||
};
|
||||
NonZero::<usize>::new(rem).map_or(Ok(()), Err)
|
||||
NonZero::new(rem).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -192,7 +192,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
|
||||
self.inner.truncate(len - n);
|
||||
0
|
||||
};
|
||||
NonZero::<usize>::new(rem).map_or(Ok(()), Err)
|
||||
NonZero::new(rem).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
fn try_rfold<B, F, R>(&mut self, mut init: B, mut f: F) -> R
|
||||
|
@ -248,7 +248,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
|
||||
unsafe {
|
||||
ptr::drop_in_place(to_drop);
|
||||
}
|
||||
NonZero::<usize>::new(n - step_size).map_or(Ok(()), Err)
|
||||
NonZero::new(n - step_size).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -350,7 +350,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
|
||||
unsafe {
|
||||
ptr::drop_in_place(to_drop);
|
||||
}
|
||||
NonZero::<usize>::new(n - step_size).map_or(Ok(()), Err)
|
||||
NonZero::new(n - step_size).map_or(Ok(()), Err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -457,8 +457,8 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> {
|
||||
#[unstable(issue = "none", feature = "inplace_iteration")]
|
||||
#[doc(hidden)]
|
||||
unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {
|
||||
const EXPAND_BY: Option<NonZero<usize>> = NonZero::<usize>::new(1);
|
||||
const MERGE_BY: Option<NonZero<usize>> = NonZero::<usize>::new(1);
|
||||
const EXPAND_BY: Option<NonZero<usize>> = NonZero::new(1);
|
||||
const MERGE_BY: Option<NonZero<usize>> = NonZero::new(1);
|
||||
}
|
||||
|
||||
#[unstable(issue = "none", feature = "inplace_iteration")]
|
||||
|
@ -1089,9 +1089,9 @@ fn test_into_iter_advance_by() {
|
||||
assert_eq!(i.advance_back_by(1), Ok(()));
|
||||
assert_eq!(i.as_slice(), [2, 3, 4]);
|
||||
|
||||
assert_eq!(i.advance_back_by(usize::MAX), Err(NonZero::<usize>::new(usize::MAX - 3).unwrap()));
|
||||
assert_eq!(i.advance_back_by(usize::MAX), Err(NonZero::new(usize::MAX - 3).unwrap()));
|
||||
|
||||
assert_eq!(i.advance_by(usize::MAX), Err(NonZero::<usize>::new(usize::MAX).unwrap()));
|
||||
assert_eq!(i.advance_by(usize::MAX), Err(NonZero::new(usize::MAX).unwrap()));
|
||||
|
||||
assert_eq!(i.advance_by(0), Ok(()));
|
||||
assert_eq!(i.advance_back_by(0), Ok(()));
|
||||
@ -1192,7 +1192,7 @@ fn test_from_iter_specialization_with_iterator_adapters() {
|
||||
.map(|(a, b)| a + b)
|
||||
.map_while(Option::Some)
|
||||
.skip(1)
|
||||
.map(|e| if e != usize::MAX { Ok(NonZero::<usize>::new(e)) } else { Err(()) });
|
||||
.map(|e| if e != usize::MAX { Ok(NonZero::new(e)) } else { Err(()) });
|
||||
assert_in_place_trait(&iter);
|
||||
let sink = iter.collect::<Result<Vec<_>, _>>().unwrap();
|
||||
let sinkptr = sink.as_ptr();
|
||||
|
@ -445,9 +445,9 @@ fn test_into_iter() {
|
||||
assert_eq!(it.next_back(), Some(3));
|
||||
|
||||
let mut it = VecDeque::from(vec![1, 2, 3, 4, 5]).into_iter();
|
||||
assert_eq!(it.advance_by(10), Err(NonZero::<usize>::new(5).unwrap()));
|
||||
assert_eq!(it.advance_by(10), Err(NonZero::new(5).unwrap()));
|
||||
let mut it = VecDeque::from(vec![1, 2, 3, 4, 5]).into_iter();
|
||||
assert_eq!(it.advance_back_by(10), Err(NonZero::<usize>::new(5).unwrap()));
|
||||
assert_eq!(it.advance_back_by(10), Err(NonZero::new(5).unwrap()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,7 @@ impl<T, const N: usize> Iterator for IntoIter<T, N> {
|
||||
ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice));
|
||||
}
|
||||
|
||||
NonZero::<usize>::new(remaining).map_or(Ok(()), Err)
|
||||
NonZero::new(remaining).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -347,7 +347,7 @@ impl<T, const N: usize> DoubleEndedIterator for IntoIter<T, N> {
|
||||
ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice));
|
||||
}
|
||||
|
||||
NonZero::<usize>::new(remaining).map_or(Ok(()), Err)
|
||||
NonZero::new(remaining).map_or(Ok(()), Err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -255,7 +255,7 @@ where
|
||||
unsafe impl<I: InPlaceIterable + Iterator, const N: usize> InPlaceIterable for ArrayChunks<I, N> {
|
||||
const EXPAND_BY: Option<NonZero<usize>> = I::EXPAND_BY;
|
||||
const MERGE_BY: Option<NonZero<usize>> = const {
|
||||
match (I::MERGE_BY, NonZero::<usize>::new(N)) {
|
||||
match (I::MERGE_BY, NonZero::new(N)) {
|
||||
(Some(m), Some(n)) => m.checked_mul(n),
|
||||
_ => None,
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ where
|
||||
// we don't fuse the second iterator
|
||||
}
|
||||
|
||||
NonZero::<usize>::new(n).map_or(Ok(()), Err)
|
||||
NonZero::new(n).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -196,7 +196,7 @@ where
|
||||
// we don't fuse the second iterator
|
||||
}
|
||||
|
||||
NonZero::<usize>::new(n).map_or(Ok(()), Err)
|
||||
NonZero::new(n).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -97,7 +97,7 @@ where
|
||||
};
|
||||
}
|
||||
|
||||
NonZero::<usize>::new(n).map_or(Ok(()), Err)
|
||||
NonZero::new(n).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
// No `fold` override, because `fold` doesn't make much sense for `Cycle`,
|
||||
|
@ -200,7 +200,7 @@ where
|
||||
#[rustc_specialization_trait]
|
||||
#[unstable(issue = "none", feature = "inplace_iteration")]
|
||||
unsafe trait BoundedSize {
|
||||
const UPPER_BOUND: Option<NonZero<usize>> = NonZero::<usize>::new(1);
|
||||
const UPPER_BOUND: Option<NonZero<usize>> = NonZero::new(1);
|
||||
}
|
||||
|
||||
#[unstable(issue = "none", feature = "inplace_iteration")]
|
||||
@ -217,11 +217,11 @@ unsafe impl<T> BoundedSize for Once<T> {}
|
||||
unsafe impl<T> BoundedSize for OnceWith<T> {}
|
||||
#[unstable(issue = "none", feature = "inplace_iteration")]
|
||||
unsafe impl<T, const N: usize> BoundedSize for [T; N] {
|
||||
const UPPER_BOUND: Option<NonZero<usize>> = NonZero::<usize>::new(N);
|
||||
const UPPER_BOUND: Option<NonZero<usize>> = NonZero::new(N);
|
||||
}
|
||||
#[unstable(issue = "none", feature = "inplace_iteration")]
|
||||
unsafe impl<T, const N: usize> BoundedSize for array::IntoIter<T, N> {
|
||||
const UPPER_BOUND: Option<NonZero<usize>> = NonZero::<usize>::new(N);
|
||||
const UPPER_BOUND: Option<NonZero<usize>> = NonZero::new(N);
|
||||
}
|
||||
#[unstable(issue = "none", feature = "inplace_iteration")]
|
||||
unsafe impl<I: BoundedSize, P> BoundedSize for Filter<I, P> {
|
||||
@ -680,9 +680,7 @@ where
|
||||
}
|
||||
|
||||
match self.iter_try_fold(n, advance) {
|
||||
ControlFlow::Continue(remaining) => {
|
||||
NonZero::<usize>::new(remaining).map_or(Ok(()), Err)
|
||||
}
|
||||
ControlFlow::Continue(remaining) => NonZero::new(remaining).map_or(Ok(()), Err),
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
@ -772,9 +770,7 @@ where
|
||||
}
|
||||
|
||||
match self.iter_try_rfold(n, advance) {
|
||||
ControlFlow::Continue(remaining) => {
|
||||
NonZero::<usize>::new(remaining).map_or(Ok(()), Err)
|
||||
}
|
||||
ControlFlow::Continue(remaining) => NonZero::new(remaining).map_or(Ok(()), Err),
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
NonZero::<usize>::new(n).map_or(Ok(()), Err)
|
||||
NonZero::new(n).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
@ -238,7 +238,7 @@ where
|
||||
let min = crate::cmp::min(self.len(), n);
|
||||
let rem = self.iter.advance_back_by(min);
|
||||
assert!(rem.is_ok(), "ExactSizeIterator contract violation");
|
||||
NonZero::<usize>::new(n - min).map_or(Ok(()), Err)
|
||||
NonZero::new(n - min).map_or(Ok(()), Err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ where
|
||||
};
|
||||
let advanced = min - rem;
|
||||
self.n -= advanced;
|
||||
NonZero::<usize>::new(n - advanced).map_or(Ok(()), Err)
|
||||
NonZero::new(n - advanced).map_or(Ok(()), Err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -235,7 +235,7 @@ where
|
||||
let advanced_by_inner = advance_by - remainder;
|
||||
let advanced_by = advanced_by_inner - trim_inner;
|
||||
self.n -= advanced_by;
|
||||
NonZero::<usize>::new(n - advanced_by).map_or(Ok(()), Err)
|
||||
NonZero::new(n - advanced_by).map_or(Ok(()), Err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -678,7 +678,7 @@ impl<A: Step> RangeIteratorImpl for ops::Range<A> {
|
||||
self.start =
|
||||
Step::forward_checked(self.start.clone(), taken).expect("`Step` invariants not upheld");
|
||||
|
||||
NonZero::<usize>::new(n - taken).map_or(Ok(()), Err)
|
||||
NonZero::new(n - taken).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -719,7 +719,7 @@ impl<A: Step> RangeIteratorImpl for ops::Range<A> {
|
||||
self.end =
|
||||
Step::backward_checked(self.end.clone(), taken).expect("`Step` invariants not upheld");
|
||||
|
||||
NonZero::<usize>::new(n - taken).map_or(Ok(()), Err)
|
||||
NonZero::new(n - taken).map_or(Ok(()), Err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -766,7 +766,7 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
|
||||
// Otherwise 0 is returned which always safe to use.
|
||||
self.start = unsafe { Step::forward_unchecked(self.start, taken) };
|
||||
|
||||
NonZero::<usize>::new(n - taken).map_or(Ok(()), Err)
|
||||
NonZero::new(n - taken).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -807,7 +807,7 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
|
||||
// SAFETY: same as the spec_advance_by() implementation
|
||||
self.end = unsafe { Step::backward_unchecked(self.end, taken) };
|
||||
|
||||
NonZero::<usize>::new(n - taken).map_or(Ok(()), Err)
|
||||
NonZero::new(n - taken).map_or(Ok(()), Err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -145,7 +145,7 @@ impl<A: Clone> Iterator for RepeatN<A> {
|
||||
|
||||
if skip > len {
|
||||
// SAFETY: we just checked that the difference is positive
|
||||
Err(unsafe { NonZero::<usize>::new_unchecked(skip - len) })
|
||||
Err(unsafe { NonZero::new_unchecked(skip - len) })
|
||||
} else {
|
||||
self.count = len - skip;
|
||||
Ok(())
|
||||
|
@ -138,7 +138,7 @@ pub trait DoubleEndedIterator: Iterator {
|
||||
for i in 0..n {
|
||||
if self.next_back().is_none() {
|
||||
// SAFETY: `i` is always less than `n`.
|
||||
return Err(unsafe { NonZero::<usize>::new_unchecked(n - i) });
|
||||
return Err(unsafe { NonZero::new_unchecked(n - i) });
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -337,7 +337,7 @@ pub trait Iterator {
|
||||
for i in 0..n {
|
||||
if self.next().is_none() {
|
||||
// SAFETY: `i` is always less than `n`.
|
||||
return Err(unsafe { NonZero::<usize>::new_unchecked(n - i) });
|
||||
return Err(unsafe { NonZero::new_unchecked(n - i) });
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -336,7 +336,7 @@ macro_rules! nonzero_integer {
|
||||
// SAFETY:
|
||||
// `self` is non-zero, which means it has at least one bit set, which means
|
||||
// that the result of `count_ones` is non-zero.
|
||||
unsafe { NonZero::<u32>::new_unchecked(self.get().count_ones()) }
|
||||
unsafe { NonZero::new_unchecked(self.get().count_ones()) }
|
||||
}
|
||||
|
||||
nonzero_integer_signedness_dependent_methods! {
|
||||
|
@ -132,7 +132,7 @@ impl Iterator for IndexRange {
|
||||
#[inline]
|
||||
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
|
||||
let taken = self.take_prefix(n);
|
||||
NonZero::<usize>::new(n - taken.len()).map_or(Ok(()), Err)
|
||||
NonZero::new(n - taken.len()).map_or(Ok(()), Err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ impl DoubleEndedIterator for IndexRange {
|
||||
#[inline]
|
||||
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
|
||||
let taken = self.take_suffix(n);
|
||||
NonZero::<usize>::new(n - taken.len()).map_or(Ok(()), Err)
|
||||
NonZero::new(n - taken.len()).map_or(Ok(()), Err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ impl Alignment {
|
||||
#[inline]
|
||||
pub const fn as_nonzero(self) -> NonZeroUsize {
|
||||
// SAFETY: All the discriminants are non-zero.
|
||||
unsafe { NonZero::<usize>::new_unchecked(self.as_usize()) }
|
||||
unsafe { NonZero::new_unchecked(self.as_usize()) }
|
||||
}
|
||||
|
||||
/// Returns the base-2 logarithm of the alignment.
|
||||
|
@ -295,7 +295,7 @@ impl<T: ?Sized> NonNull<T> {
|
||||
pub fn addr(self) -> NonZeroUsize {
|
||||
// SAFETY: The pointer is guaranteed by the type to be non-null,
|
||||
// meaning that the address will be non-zero.
|
||||
unsafe { NonZero::<usize>::new_unchecked(self.pointer.addr()) }
|
||||
unsafe { NonZero::new_unchecked(self.pointer.addr()) }
|
||||
}
|
||||
|
||||
/// Creates a new pointer with the given address.
|
||||
|
@ -200,7 +200,7 @@ macro_rules! iterator {
|
||||
let advance = cmp::min(len!(self), n);
|
||||
// SAFETY: By construction, `advance` does not exceed `self.len()`.
|
||||
unsafe { self.post_inc_start(advance) };
|
||||
NonZero::<usize>::new(n - advance).map_or(Ok(()), Err)
|
||||
NonZero::new(n - advance).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -425,7 +425,7 @@ macro_rules! iterator {
|
||||
let advance = cmp::min(len!(self), n);
|
||||
// SAFETY: By construction, `advance` does not exceed `self.len()`.
|
||||
unsafe { self.pre_dec_end(advance) };
|
||||
NonZero::<usize>::new(n - advance).map_or(Ok(()), Err)
|
||||
NonZero::new(n - advance).map_or(Ok(()), Err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1086,7 +1086,7 @@ impl<T> [T] {
|
||||
#[inline]
|
||||
#[track_caller]
|
||||
pub fn windows(&self, size: usize) -> Windows<'_, T> {
|
||||
let size = NonZero::<usize>::new(size).expect("window size must be non-zero");
|
||||
let size = NonZero::new(size).expect("window size must be non-zero");
|
||||
Windows::new(self, size)
|
||||
}
|
||||
|
||||
|
@ -96,7 +96,7 @@ impl<'a> Iterator for Chars<'a> {
|
||||
unsafe { self.iter.advance_by(slurp).unwrap_unchecked() };
|
||||
}
|
||||
|
||||
NonZero::<usize>::new(remainder).map_or(Ok(()), Err)
|
||||
NonZero::new(remainder).map_or(Ok(()), Err)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -548,7 +548,7 @@ fn array_intoiter_advance_by() {
|
||||
assert_eq!(counter.get(), 13);
|
||||
|
||||
let r = it.advance_by(123456);
|
||||
assert_eq!(r, Err(NonZero::<usize>::new(123456 - 87).unwrap()));
|
||||
assert_eq!(r, Err(NonZero::new(123456 - 87).unwrap()));
|
||||
assert_eq!(it.len(), 0);
|
||||
assert_eq!(counter.get(), 100);
|
||||
|
||||
@ -558,7 +558,7 @@ fn array_intoiter_advance_by() {
|
||||
assert_eq!(counter.get(), 100);
|
||||
|
||||
let r = it.advance_by(10);
|
||||
assert_eq!(r, Err(NonZero::<usize>::new(10).unwrap()));
|
||||
assert_eq!(r, Err(NonZero::new(10).unwrap()));
|
||||
assert_eq!(it.len(), 0);
|
||||
assert_eq!(counter.get(), 100);
|
||||
}
|
||||
@ -601,7 +601,7 @@ fn array_intoiter_advance_back_by() {
|
||||
assert_eq!(counter.get(), 13);
|
||||
|
||||
let r = it.advance_back_by(123456);
|
||||
assert_eq!(r, Err(NonZero::<usize>::new(123456 - 87).unwrap()));
|
||||
assert_eq!(r, Err(NonZero::new(123456 - 87).unwrap()));
|
||||
assert_eq!(it.len(), 0);
|
||||
assert_eq!(counter.get(), 100);
|
||||
|
||||
@ -611,7 +611,7 @@ fn array_intoiter_advance_back_by() {
|
||||
assert_eq!(counter.get(), 100);
|
||||
|
||||
let r = it.advance_back_by(10);
|
||||
assert_eq!(r, Err(NonZero::<usize>::new(10).unwrap()));
|
||||
assert_eq!(r, Err(NonZero::new(10).unwrap()));
|
||||
assert_eq!(it.len(), 0);
|
||||
assert_eq!(counter.get(), 100);
|
||||
}
|
||||
|
@ -34,10 +34,7 @@ fn test_iterator_chain_advance_by() {
|
||||
let mut iter = Unfuse::new(xs).chain(Unfuse::new(ys));
|
||||
assert_eq!(iter.advance_by(i), Ok(()));
|
||||
assert_eq!(iter.next(), Some(&xs[i]));
|
||||
assert_eq!(
|
||||
iter.advance_by(100),
|
||||
Err(NonZero::<usize>::new(100 - (len - i - 1)).unwrap())
|
||||
);
|
||||
assert_eq!(iter.advance_by(100), Err(NonZero::new(100 - (len - i - 1)).unwrap()));
|
||||
assert_eq!(iter.advance_by(0), Ok(()));
|
||||
}
|
||||
|
||||
@ -45,10 +42,7 @@ fn test_iterator_chain_advance_by() {
|
||||
let mut iter = Unfuse::new(xs).chain(Unfuse::new(ys));
|
||||
assert_eq!(iter.advance_by(xs.len() + i), Ok(()));
|
||||
assert_eq!(iter.next(), Some(&ys[i]));
|
||||
assert_eq!(
|
||||
iter.advance_by(100),
|
||||
Err(NonZero::<usize>::new(100 - (ys.len() - i - 1)).unwrap())
|
||||
);
|
||||
assert_eq!(iter.advance_by(100), Err(NonZero::new(100 - (ys.len() - i - 1)).unwrap()));
|
||||
assert_eq!(iter.advance_by(0), Ok(()));
|
||||
}
|
||||
|
||||
@ -58,7 +52,7 @@ fn test_iterator_chain_advance_by() {
|
||||
assert_eq!(iter.advance_by(0), Ok(()));
|
||||
|
||||
let mut iter = xs.iter().chain(ys);
|
||||
assert_eq!(iter.advance_by(len + 1), Err(NonZero::<usize>::new(1).unwrap()));
|
||||
assert_eq!(iter.advance_by(len + 1), Err(NonZero::new(1).unwrap()));
|
||||
assert_eq!(iter.advance_by(0), Ok(()));
|
||||
}
|
||||
|
||||
@ -77,10 +71,7 @@ fn test_iterator_chain_advance_back_by() {
|
||||
let mut iter = Unfuse::new(xs).chain(Unfuse::new(ys));
|
||||
assert_eq!(iter.advance_back_by(i), Ok(()));
|
||||
assert_eq!(iter.next_back(), Some(&ys[ys.len() - i - 1]));
|
||||
assert_eq!(
|
||||
iter.advance_back_by(100),
|
||||
Err(NonZero::<usize>::new(100 - (len - i - 1)).unwrap())
|
||||
);
|
||||
assert_eq!(iter.advance_back_by(100), Err(NonZero::new(100 - (len - i - 1)).unwrap()));
|
||||
assert_eq!(iter.advance_back_by(0), Ok(()));
|
||||
}
|
||||
|
||||
@ -90,7 +81,7 @@ fn test_iterator_chain_advance_back_by() {
|
||||
assert_eq!(iter.next_back(), Some(&xs[xs.len() - i - 1]));
|
||||
assert_eq!(
|
||||
iter.advance_back_by(100),
|
||||
Err(NonZero::<usize>::new(100 - (xs.len() - i - 1)).unwrap())
|
||||
Err(NonZero::new(100 - (xs.len() - i - 1)).unwrap())
|
||||
);
|
||||
assert_eq!(iter.advance_back_by(0), Ok(()));
|
||||
}
|
||||
@ -101,7 +92,7 @@ fn test_iterator_chain_advance_back_by() {
|
||||
assert_eq!(iter.advance_back_by(0), Ok(()));
|
||||
|
||||
let mut iter = xs.iter().chain(ys);
|
||||
assert_eq!(iter.advance_back_by(len + 1), Err(NonZero::<usize>::new(1).unwrap()));
|
||||
assert_eq!(iter.advance_back_by(len + 1), Err(NonZero::new(1).unwrap()));
|
||||
assert_eq!(iter.advance_back_by(0), Ok(()));
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ fn test_iterator_enumerate_advance_by() {
|
||||
assert_eq!(it.next(), Some((2, &2)));
|
||||
assert_eq!(it.advance_by(2), Ok(()));
|
||||
assert_eq!(it.next(), Some((5, &5)));
|
||||
assert_eq!(it.advance_by(1), Err(NonZero::<usize>::new(1).unwrap()));
|
||||
assert_eq!(it.advance_by(1), Err(NonZero::new(1).unwrap()));
|
||||
assert_eq!(it.next(), None);
|
||||
}
|
||||
|
||||
|
@ -72,8 +72,8 @@ fn test_flatten_advance_by() {
|
||||
assert_eq!(it.advance_back_by(9), Ok(()));
|
||||
assert_eq!(it.next_back(), Some(25));
|
||||
|
||||
assert_eq!(it.advance_by(usize::MAX), Err(NonZero::<usize>::new(usize::MAX - 9).unwrap()));
|
||||
assert_eq!(it.advance_back_by(usize::MAX), Err(NonZero::<usize>::new(usize::MAX).unwrap()));
|
||||
assert_eq!(it.advance_by(usize::MAX), Err(NonZero::new(usize::MAX - 9).unwrap()));
|
||||
assert_eq!(it.advance_back_by(usize::MAX), Err(NonZero::new(usize::MAX).unwrap()));
|
||||
assert_eq!(it.advance_by(0), Ok(()));
|
||||
assert_eq!(it.advance_back_by(0), Ok(()));
|
||||
assert_eq!(it.size_hint(), (0, Some(0)));
|
||||
|
@ -75,14 +75,14 @@ fn test_iterator_skip_nth() {
|
||||
#[test]
|
||||
fn test_skip_advance_by() {
|
||||
assert_eq!((0..0).skip(10).advance_by(0), Ok(()));
|
||||
assert_eq!((0..0).skip(10).advance_by(1), Err(NonZero::<usize>::new(1).unwrap()));
|
||||
assert_eq!((0..0).skip(10).advance_by(1), Err(NonZero::new(1).unwrap()));
|
||||
assert_eq!(
|
||||
(0u128..(usize::MAX as u128) + 1).skip(usize::MAX - 10).advance_by(usize::MAX - 5),
|
||||
Err(NonZero::<usize>::new(usize::MAX - 16).unwrap())
|
||||
Err(NonZero::new(usize::MAX - 16).unwrap())
|
||||
);
|
||||
assert_eq!((0u128..u128::MAX).skip(usize::MAX - 10).advance_by(20), Ok(()));
|
||||
|
||||
assert_eq!((0..2).skip(1).advance_back_by(10), Err(NonZero::<usize>::new(9).unwrap()));
|
||||
assert_eq!((0..2).skip(1).advance_back_by(10), Err(NonZero::new(9).unwrap()));
|
||||
assert_eq!((0..0).skip(1).advance_back_by(0), Ok(()));
|
||||
}
|
||||
|
||||
|
@ -79,23 +79,23 @@ fn test_take_advance_by() {
|
||||
let mut take = (0..10).take(3);
|
||||
assert_eq!(take.advance_by(2), Ok(()));
|
||||
assert_eq!(take.next(), Some(2));
|
||||
assert_eq!(take.advance_by(1), Err(NonZero::<usize>::new(1).unwrap()));
|
||||
assert_eq!(take.advance_by(1), Err(NonZero::new(1).unwrap()));
|
||||
|
||||
assert_eq!((0..0).take(10).advance_by(0), Ok(()));
|
||||
assert_eq!((0..0).take(10).advance_by(1), Err(NonZero::<usize>::new(1).unwrap()));
|
||||
assert_eq!((0..10).take(4).advance_by(5), Err(NonZero::<usize>::new(1).unwrap()));
|
||||
assert_eq!((0..0).take(10).advance_by(1), Err(NonZero::new(1).unwrap()));
|
||||
assert_eq!((0..10).take(4).advance_by(5), Err(NonZero::new(1).unwrap()));
|
||||
|
||||
let mut take = (0..10).take(3);
|
||||
assert_eq!(take.advance_back_by(2), Ok(()));
|
||||
assert_eq!(take.next(), Some(0));
|
||||
assert_eq!(take.advance_back_by(1), Err(NonZero::<usize>::new(1).unwrap()));
|
||||
assert_eq!(take.advance_back_by(1), Err(NonZero::new(1).unwrap()));
|
||||
|
||||
assert_eq!((0..2).take(1).advance_back_by(10), Err(NonZero::<usize>::new(9).unwrap()));
|
||||
assert_eq!((0..0).take(1).advance_back_by(1), Err(NonZero::<usize>::new(1).unwrap()));
|
||||
assert_eq!((0..2).take(1).advance_back_by(10), Err(NonZero::new(9).unwrap()));
|
||||
assert_eq!((0..0).take(1).advance_back_by(1), Err(NonZero::new(1).unwrap()));
|
||||
assert_eq!((0..0).take(1).advance_back_by(0), Ok(()));
|
||||
assert_eq!(
|
||||
(0..usize::MAX).take(100).advance_back_by(usize::MAX),
|
||||
Err(NonZero::<usize>::new(usize::MAX - 100).unwrap())
|
||||
Err(NonZero::new(usize::MAX - 100).unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -314,7 +314,7 @@ fn test_range_advance_by() {
|
||||
|
||||
assert_eq!((r.start, r.end), (1, usize::MAX - 1));
|
||||
|
||||
assert_eq!(Err(NonZero::<usize>::new(2).unwrap()), r.advance_by(usize::MAX));
|
||||
assert_eq!(Err(NonZero::new(2).unwrap()), r.advance_by(usize::MAX));
|
||||
|
||||
assert_eq!(Ok(()), r.advance_by(0));
|
||||
assert_eq!(Ok(()), r.advance_back_by(0));
|
||||
|
@ -152,14 +152,11 @@ fn test_iterator_advance_by() {
|
||||
let mut iter = v.iter();
|
||||
assert_eq!(iter.advance_by(i), Ok(()));
|
||||
assert_eq!(iter.next().unwrap(), &v[i]);
|
||||
assert_eq!(
|
||||
iter.advance_by(100),
|
||||
Err(NonZero::<usize>::new(100 - (v.len() - 1 - i)).unwrap())
|
||||
);
|
||||
assert_eq!(iter.advance_by(100), Err(NonZero::new(100 - (v.len() - 1 - i)).unwrap()));
|
||||
}
|
||||
|
||||
assert_eq!(v.iter().advance_by(v.len()), Ok(()));
|
||||
assert_eq!(v.iter().advance_by(100), Err(NonZero::<usize>::new(100 - v.len()).unwrap()));
|
||||
assert_eq!(v.iter().advance_by(100), Err(NonZero::new(100 - v.len()).unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -170,14 +167,11 @@ fn test_iterator_advance_back_by() {
|
||||
let mut iter = v.iter();
|
||||
assert_eq!(iter.advance_back_by(i), Ok(()));
|
||||
assert_eq!(iter.next_back().unwrap(), &v[v.len() - 1 - i]);
|
||||
assert_eq!(
|
||||
iter.advance_back_by(100),
|
||||
Err(NonZero::<usize>::new(100 - (v.len() - 1 - i)).unwrap())
|
||||
);
|
||||
assert_eq!(iter.advance_back_by(100), Err(NonZero::new(100 - (v.len() - 1 - i)).unwrap()));
|
||||
}
|
||||
|
||||
assert_eq!(v.iter().advance_back_by(v.len()), Ok(()));
|
||||
assert_eq!(v.iter().advance_back_by(100), Err(NonZero::<usize>::new(100 - v.len()).unwrap()));
|
||||
assert_eq!(v.iter().advance_back_by(100), Err(NonZero::new(100 - v.len()).unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -188,17 +182,11 @@ fn test_iterator_rev_advance_back_by() {
|
||||
let mut iter = v.iter().rev();
|
||||
assert_eq!(iter.advance_back_by(i), Ok(()));
|
||||
assert_eq!(iter.next_back().unwrap(), &v[i]);
|
||||
assert_eq!(
|
||||
iter.advance_back_by(100),
|
||||
Err(NonZero::<usize>::new(100 - (v.len() - 1 - i)).unwrap())
|
||||
);
|
||||
assert_eq!(iter.advance_back_by(100), Err(NonZero::new(100 - (v.len() - 1 - i)).unwrap()));
|
||||
}
|
||||
|
||||
assert_eq!(v.iter().rev().advance_back_by(v.len()), Ok(()));
|
||||
assert_eq!(
|
||||
v.iter().rev().advance_back_by(100),
|
||||
Err(NonZero::<usize>::new(100 - v.len()).unwrap())
|
||||
);
|
||||
assert_eq!(v.iter().rev().advance_back_by(100), Err(NonZero::new(100 - v.len()).unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -466,14 +454,11 @@ fn test_iterator_rev_advance_by() {
|
||||
let mut iter = v.iter().rev();
|
||||
assert_eq!(iter.advance_by(i), Ok(()));
|
||||
assert_eq!(iter.next().unwrap(), &v[v.len() - 1 - i]);
|
||||
assert_eq!(
|
||||
iter.advance_by(100),
|
||||
Err(NonZero::<usize>::new(100 - (v.len() - 1 - i)).unwrap())
|
||||
);
|
||||
assert_eq!(iter.advance_by(100), Err(NonZero::new(100 - (v.len() - 1 - i)).unwrap()));
|
||||
}
|
||||
|
||||
assert_eq!(v.iter().rev().advance_by(v.len()), Ok(()));
|
||||
assert_eq!(v.iter().rev().advance_by(100), Err(NonZero::<usize>::new(100 - v.len()).unwrap()));
|
||||
assert_eq!(v.iter().rev().advance_by(100), Err(NonZero::new(100 - v.len()).unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -4,7 +4,7 @@ use std::mem::size_of;
|
||||
|
||||
#[test]
|
||||
fn test_create_nonzero_instance() {
|
||||
let _a = unsafe { NonZero::<u32>::new_unchecked(21) };
|
||||
let _a = unsafe { NonZero::new_unchecked(21) };
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -18,12 +18,12 @@ fn test_match_on_nonzero_option() {
|
||||
let a = Some(unsafe { NonZero::<u32>::new_unchecked(42) });
|
||||
match a {
|
||||
Some(val) => assert_eq!(val.get(), 42),
|
||||
None => panic!("unexpected None while matching on Some(NonZeroU32(_))"),
|
||||
None => panic!("unexpected None while matching on Some(NonZero(_))"),
|
||||
}
|
||||
|
||||
match unsafe { Some(NonZero::<u32>::new_unchecked(43)) } {
|
||||
Some(val) => assert_eq!(val.get(), 43),
|
||||
None => panic!("unexpected None while matching on Some(NonZeroU32(_))"),
|
||||
None => panic!("unexpected None while matching on Some(NonZero(_))"),
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,7 +93,7 @@ mod atom {
|
||||
index: NonZero<u32>, // private
|
||||
}
|
||||
|
||||
pub const FOO_ATOM: Atom = Atom { index: unsafe { NonZero::<u32>::new_unchecked(7) } };
|
||||
pub const FOO_ATOM: Atom = Atom { index: unsafe { NonZero::new_unchecked(7) } };
|
||||
}
|
||||
|
||||
macro_rules! atom {
|
||||
@ -113,21 +113,21 @@ fn test_match_nonzero_const_pattern() {
|
||||
|
||||
#[test]
|
||||
fn test_from_nonzero() {
|
||||
let nz = NonZero::<u32>::new(1).unwrap();
|
||||
let nz = NonZero::new(1).unwrap();
|
||||
let num: u32 = nz.into();
|
||||
assert_eq!(num, 1u32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_signed_nonzero() {
|
||||
let nz = NonZero::<i32>::new(1).unwrap();
|
||||
let nz = NonZero::new(1).unwrap();
|
||||
let num: i32 = nz.into();
|
||||
assert_eq!(num, 1i32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_str() {
|
||||
assert_eq!("123".parse::<NonZero<u8>>(), Ok(NonZero::<u8>::new(123).unwrap()));
|
||||
assert_eq!("123".parse::<NonZero<u8>>(), Ok(NonZero::new(123).unwrap()));
|
||||
assert_eq!(
|
||||
"0".parse::<NonZero<u8>>().err().map(|e| e.kind().clone()),
|
||||
Some(IntErrorKind::Zero)
|
||||
@ -148,8 +148,8 @@ fn test_from_str() {
|
||||
|
||||
#[test]
|
||||
fn test_nonzero_bitor() {
|
||||
let nz_alt = NonZero::<u8>::new(0b1010_1010).unwrap();
|
||||
let nz_low = NonZero::<u8>::new(0b0000_1111).unwrap();
|
||||
let nz_alt = NonZero::new(0b1010_1010).unwrap();
|
||||
let nz_low = NonZero::new(0b0000_1111).unwrap();
|
||||
|
||||
let both_nz: NonZero<u8> = nz_alt | nz_low;
|
||||
assert_eq!(both_nz.get(), 0b1010_1111);
|
||||
@ -171,7 +171,7 @@ fn test_nonzero_bitor() {
|
||||
fn test_nonzero_bitor_assign() {
|
||||
let mut target = NonZero::<u8>::new(0b1010_1010).unwrap();
|
||||
|
||||
target |= NonZero::<u8>::new(0b0000_1111).unwrap();
|
||||
target |= NonZero::new(0b0000_1111).unwrap();
|
||||
assert_eq!(target.get(), 0b1010_1111);
|
||||
|
||||
target |= 0b0001_0000;
|
||||
@ -183,11 +183,11 @@ fn test_nonzero_bitor_assign() {
|
||||
|
||||
#[test]
|
||||
fn test_nonzero_from_int_on_success() {
|
||||
assert_eq!(NonZero::<u8>::try_from(5), Ok(NonZero::<u8>::new(5).unwrap()));
|
||||
assert_eq!(NonZero::<u32>::try_from(5), Ok(NonZero::<u32>::new(5).unwrap()));
|
||||
assert_eq!(NonZero::<u8>::try_from(5), Ok(NonZero::new(5).unwrap()));
|
||||
assert_eq!(NonZero::<u32>::try_from(5), Ok(NonZero::new(5).unwrap()));
|
||||
|
||||
assert_eq!(NonZero::<i8>::try_from(-5), Ok(NonZero::<i8>::new(-5).unwrap()));
|
||||
assert_eq!(NonZero::<i32>::try_from(-5), Ok(NonZero::<i32>::new(-5).unwrap()));
|
||||
assert_eq!(NonZero::<i8>::try_from(-5), Ok(NonZero::new(-5).unwrap()));
|
||||
assert_eq!(NonZero::<i32>::try_from(-5), Ok(NonZero::new(-5).unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -204,15 +204,15 @@ fn nonzero_const() {
|
||||
// test that the methods of `NonZeroX>` are usable in a const context
|
||||
// Note: only tests NonZero<u8>
|
||||
|
||||
const NONZERO_U8: NonZero<u8> = unsafe { NonZero::<u8>::new_unchecked(5) };
|
||||
const NONZERO_U8: NonZero<u8> = unsafe { NonZero::new_unchecked(5) };
|
||||
|
||||
const GET: u8 = NONZERO_U8.get();
|
||||
assert_eq!(GET, 5);
|
||||
|
||||
const ZERO: Option<NonZero<u8>> = NonZero::<u8>::new(0);
|
||||
const ZERO: Option<NonZero<u8>> = NonZero::new(0);
|
||||
assert!(ZERO.is_none());
|
||||
|
||||
const ONE: Option<NonZero<u8>> = NonZero::<u8>::new(1);
|
||||
const ONE: Option<NonZero<u8>> = NonZero::new(1);
|
||||
assert!(ONE.is_some());
|
||||
|
||||
/* FIXME(#110395)
|
||||
@ -323,7 +323,7 @@ fn nonzero_trailing_zeros() {
|
||||
|
||||
#[test]
|
||||
fn test_nonzero_uint_div() {
|
||||
let nz = NonZero::<u32>::new(1).unwrap();
|
||||
let nz = NonZero::new(1).unwrap();
|
||||
|
||||
let x: u32 = 42u32 / nz;
|
||||
assert_eq!(x, 42u32);
|
||||
@ -331,7 +331,7 @@ fn test_nonzero_uint_div() {
|
||||
|
||||
#[test]
|
||||
fn test_nonzero_uint_rem() {
|
||||
let nz = NonZero::<u32>::new(10).unwrap();
|
||||
let nz = NonZero::new(10).unwrap();
|
||||
|
||||
let x: u32 = 42u32 % nz;
|
||||
assert_eq!(x, 2u32);
|
||||
|
@ -1050,9 +1050,8 @@ fn nonnull_tagged_pointer_with_provenance() {
|
||||
/// memory location.
|
||||
pub fn pointer(self) -> NonNull<T> {
|
||||
// SAFETY: The `addr` guaranteed to have bits set in the Self::ADDRESS_MASK, so the result will be non-null.
|
||||
self.0.map_addr(|addr| unsafe {
|
||||
NonZero::<usize>::new_unchecked(addr.get() & Self::ADDRESS_MASK)
|
||||
})
|
||||
self.0
|
||||
.map_addr(|addr| unsafe { NonZero::new_unchecked(addr.get() & Self::ADDRESS_MASK) })
|
||||
}
|
||||
|
||||
/// Consume this tagged pointer and produce the data it carries.
|
||||
@ -1073,7 +1072,7 @@ fn nonnull_tagged_pointer_with_provenance() {
|
||||
// ADDRESS_MASK) will always be non-zero. This a property of the type and its
|
||||
// construction.
|
||||
self.0 = self.0.map_addr(|addr| unsafe {
|
||||
NonZero::<usize>::new_unchecked((addr.get() & Self::ADDRESS_MASK) | data)
|
||||
NonZero::new_unchecked((addr.get() & Self::ADDRESS_MASK) | data)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -411,7 +411,7 @@ fn result_try_trait_v2_branch() {
|
||||
|
||||
assert_eq!(Ok::<i32, i32>(4).branch(), Continue(4));
|
||||
assert_eq!(Err::<i32, i32>(4).branch(), Break(Err(4)));
|
||||
let one = NonZero::<u32>::new(1).unwrap();
|
||||
let one = NonZero::new(1).unwrap();
|
||||
assert_eq!(Ok::<(), NonZero<u32>>(()).branch(), Continue(()));
|
||||
assert_eq!(Err::<(), NonZero<u32>>(one).branch(), Break(Err(one)));
|
||||
assert_eq!(Ok::<NonZero<u32>, ()>(one).branch(), Continue(one));
|
||||
|
@ -147,7 +147,7 @@ fn test_iterator_advance_by() {
|
||||
}
|
||||
|
||||
let mut iter = v.iter();
|
||||
assert_eq!(iter.advance_by(v.len() + 1), Err(NonZero::<usize>::new(1).unwrap()));
|
||||
assert_eq!(iter.advance_by(v.len() + 1), Err(NonZero::new(1).unwrap()));
|
||||
assert_eq!(iter.as_slice(), &[]);
|
||||
|
||||
let mut iter = v.iter();
|
||||
@ -169,7 +169,7 @@ fn test_iterator_advance_back_by() {
|
||||
}
|
||||
|
||||
let mut iter = v.iter();
|
||||
assert_eq!(iter.advance_back_by(v.len() + 1), Err(NonZero::<usize>::new(1).unwrap()));
|
||||
assert_eq!(iter.advance_back_by(v.len() + 1), Err(NonZero::new(1).unwrap()));
|
||||
assert_eq!(iter.as_slice(), &[]);
|
||||
|
||||
let mut iter = v.iter();
|
||||
|
@ -137,7 +137,7 @@ thread_local! {
|
||||
names: fxhash::FxHashMap::default(),
|
||||
strings: Vec::new(),
|
||||
// Start with a base of 1 to make sure that `NonZeroU32` works.
|
||||
sym_base: NonZero::<u32>::new(1).unwrap(),
|
||||
sym_base: NonZero::new(1).unwrap(),
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,7 @@ impl Thread {
|
||||
}
|
||||
|
||||
pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
unsafe { Ok(NonZero::<usize>::new_unchecked(abi::get_processor_count())) }
|
||||
unsafe { Ok(NonZero::new_unchecked(abi::get_processor_count())) }
|
||||
}
|
||||
|
||||
pub mod guard {
|
||||
|
@ -38,7 +38,7 @@ impl Key {
|
||||
}
|
||||
|
||||
fn from_index(index: usize) -> Self {
|
||||
Key(NonZero::<usize>::new(index + 1).unwrap())
|
||||
Key(NonZero::new(index + 1).unwrap())
|
||||
}
|
||||
|
||||
pub fn as_usize(self) -> usize {
|
||||
@ -46,7 +46,7 @@ impl Key {
|
||||
}
|
||||
|
||||
pub fn from_usize(index: usize) -> Self {
|
||||
Key(NonZero::<usize>::new(index).unwrap())
|
||||
Key(NonZero::new(index).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,7 @@ macro_rules! enclave_usercalls_internal_define_usercalls {
|
||||
#[inline(always)]
|
||||
pub unsafe fn $f($n1: $t1, $n2: $t2, $n3: $t3, $n4: $t4) -> $r {
|
||||
ReturnValue::from_registers(stringify!($f), unsafe { do_usercall(
|
||||
rtunwrap!(Some, NonZero::<u64>::new(Usercalls::$f as Register)),
|
||||
rtunwrap!(Some, NonZero::new(Usercalls::$f as Register)),
|
||||
RegisterArgument::into_register($n1),
|
||||
RegisterArgument::into_register($n2),
|
||||
RegisterArgument::into_register($n3),
|
||||
@ -211,7 +211,7 @@ macro_rules! enclave_usercalls_internal_define_usercalls {
|
||||
#[inline(always)]
|
||||
pub unsafe fn $f($n1: $t1, $n2: $t2, $n3: $t3) -> $r {
|
||||
ReturnValue::from_registers(stringify!($f), unsafe { do_usercall(
|
||||
rtunwrap!(Some, NonZero::<u64>::new(Usercalls::$f as Register)),
|
||||
rtunwrap!(Some, NonZero::new(Usercalls::$f as Register)),
|
||||
RegisterArgument::into_register($n1),
|
||||
RegisterArgument::into_register($n2),
|
||||
RegisterArgument::into_register($n3),
|
||||
@ -227,7 +227,7 @@ macro_rules! enclave_usercalls_internal_define_usercalls {
|
||||
#[inline(always)]
|
||||
pub unsafe fn $f($n1: $t1, $n2: $t2) -> $r {
|
||||
ReturnValue::from_registers(stringify!($f), unsafe { do_usercall(
|
||||
rtunwrap!(Some, NonZero::<u64>::new(Usercalls::$f as Register)),
|
||||
rtunwrap!(Some, NonZero::new(Usercalls::$f as Register)),
|
||||
RegisterArgument::into_register($n1),
|
||||
RegisterArgument::into_register($n2),
|
||||
0,0,
|
||||
@ -242,7 +242,7 @@ macro_rules! enclave_usercalls_internal_define_usercalls {
|
||||
#[inline(always)]
|
||||
pub unsafe fn $f($n1: $t1) -> $r {
|
||||
ReturnValue::from_registers(stringify!($f), unsafe { do_usercall(
|
||||
rtunwrap!(Some, NonZero::<u64>::new(Usercalls::$f as Register)),
|
||||
rtunwrap!(Some, NonZero::new(Usercalls::$f as Register)),
|
||||
RegisterArgument::into_register($n1),
|
||||
0,0,0,
|
||||
return_type_is_abort!($r)
|
||||
@ -256,7 +256,7 @@ macro_rules! enclave_usercalls_internal_define_usercalls {
|
||||
#[inline(always)]
|
||||
pub unsafe fn $f() -> $r {
|
||||
ReturnValue::from_registers(stringify!($f), unsafe { do_usercall(
|
||||
rtunwrap!(Some, NonZero::<u64>::new(Usercalls::$f as Register)),
|
||||
rtunwrap!(Some, NonZero::new(Usercalls::$f as Register)),
|
||||
0,0,0,0,
|
||||
return_type_is_abort!($r)
|
||||
) })
|
||||
|
@ -53,8 +53,7 @@ impl RwLock {
|
||||
// Another thread has passed the lock to us
|
||||
} else {
|
||||
// No waiting writers, acquire the read lock
|
||||
*rguard.lock_var_mut() =
|
||||
NonZero::<usize>::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
|
||||
*rguard.lock_var_mut() = NonZero::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -68,8 +67,7 @@ impl RwLock {
|
||||
false
|
||||
} else {
|
||||
// No waiting writers, acquire the read lock
|
||||
*rguard.lock_var_mut() =
|
||||
NonZero::<usize>::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
|
||||
*rguard.lock_var_mut() = NonZero::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
|
||||
true
|
||||
}
|
||||
}
|
||||
@ -111,7 +109,7 @@ impl RwLock {
|
||||
mut rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZero<usize>>>>,
|
||||
wguard: SpinMutexGuard<'_, WaitVariable<bool>>,
|
||||
) {
|
||||
*rguard.lock_var_mut() = NonZero::<usize>::new(rguard.lock_var().unwrap().get() - 1);
|
||||
*rguard.lock_var_mut() = NonZero::new(rguard.lock_var().unwrap().get() - 1);
|
||||
if rguard.lock_var().is_some() {
|
||||
// There are other active readers
|
||||
} else {
|
||||
|
@ -252,7 +252,7 @@ impl WaitQueue {
|
||||
entry_guard.wake = true;
|
||||
}
|
||||
|
||||
if let Some(count) = NonZero::<usize>::new(count) {
|
||||
if let Some(count) = NonZero::new(count) {
|
||||
Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::All { count } })
|
||||
} else {
|
||||
Err(guard)
|
||||
|
@ -46,7 +46,7 @@ impl Thread {
|
||||
|
||||
pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
// UEFI is single threaded
|
||||
Ok(NonZero::<usize>::new(1).unwrap())
|
||||
Ok(NonZero::new(1).unwrap())
|
||||
}
|
||||
|
||||
pub mod guard {
|
||||
|
@ -338,7 +338,7 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
// some old MIPS kernels were buggy and zero-initialized the mask if
|
||||
// none was explicitly set.
|
||||
// In that case we use the sysconf fallback.
|
||||
if let Some(count) = NonZero::<usize>::new(count) {
|
||||
if let Some(count) = NonZero::new(count) {
|
||||
return Ok(count)
|
||||
}
|
||||
}
|
||||
@ -351,7 +351,7 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
let count = cpus as usize;
|
||||
// Cover the unusual situation where we were able to get the quota but not the affinity mask
|
||||
let count = count.min(quota);
|
||||
Ok(unsafe { NonZero::<usize>::new_unchecked(count) })
|
||||
Ok(unsafe { NonZero::new_unchecked(count) })
|
||||
}
|
||||
}
|
||||
} else if #[cfg(any(
|
||||
@ -375,7 +375,7 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
) == 0 {
|
||||
let count = libc::CPU_COUNT(&set) as usize;
|
||||
if count > 0 {
|
||||
return Ok(NonZero::<usize>::new_unchecked(count));
|
||||
return Ok(NonZero::new_unchecked(count));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -397,7 +397,7 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
}
|
||||
}
|
||||
libc::_cpuset_destroy(set);
|
||||
if let Some(count) = NonZero::<usize>::new(count) {
|
||||
if let Some(count) = NonZero::new(count) {
|
||||
return Ok(count);
|
||||
}
|
||||
}
|
||||
@ -433,7 +433,7 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(unsafe { NonZero::<usize>::new_unchecked(cpus as usize) })
|
||||
Ok(unsafe { NonZero::new_unchecked(cpus as usize) })
|
||||
} else if #[cfg(target_os = "nto")] {
|
||||
unsafe {
|
||||
use libc::_syspage_ptr;
|
||||
@ -441,7 +441,7 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
Err(io::const_io_error!(io::ErrorKind::NotFound, "No syspage available"))
|
||||
} else {
|
||||
let cpus = (*_syspage_ptr).num_cpu;
|
||||
NonZero::<usize>::new(cpus as usize)
|
||||
NonZero::new(cpus as usize)
|
||||
.ok_or(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform"))
|
||||
}
|
||||
}
|
||||
@ -456,7 +456,7 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
return Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform"));
|
||||
}
|
||||
|
||||
Ok(NonZero::<usize>::new_unchecked(sinfo.cpu_count as usize))
|
||||
Ok(NonZero::new_unchecked(sinfo.cpu_count as usize))
|
||||
}
|
||||
} else {
|
||||
// FIXME: implement on vxWorks, Redox, l4re
|
||||
|
@ -21,12 +21,12 @@ use crate::vec;
|
||||
|
||||
use crate::iter;
|
||||
|
||||
/// This is the const equivalent to `NonZero::<u16>::new(n).unwrap()`
|
||||
/// This is the const equivalent to `NonZero::new(n).unwrap()`
|
||||
///
|
||||
/// FIXME: This can be removed once `Option::unwrap` is stably const.
|
||||
/// See the `const_option` feature (#67441).
|
||||
const fn non_zero_u16(n: u16) -> NonZero<u16> {
|
||||
match NonZero::<u16>::new(n) {
|
||||
match NonZero::new(n) {
|
||||
Some(n) => n,
|
||||
None => panic!("called `unwrap` on a `None` value"),
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
io::ErrorKind::NotFound,
|
||||
"The number of hardware threads is not known for the target platform",
|
||||
)),
|
||||
cpus => Ok(unsafe { NonZero::<usize>::new_unchecked(cpus) }),
|
||||
cpus => Ok(unsafe { NonZero::new_unchecked(cpus) }),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ impl Thread {
|
||||
|
||||
pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
// We're unicore right now.
|
||||
Ok(unsafe { NonZero::<usize>::new_unchecked(1) })
|
||||
Ok(unsafe { NonZero::new_unchecked(1) })
|
||||
}
|
||||
|
||||
pub mod guard {
|
||||
|
@ -26,7 +26,7 @@ impl WStrUnits<'_> {
|
||||
pub fn peek(&self) -> Option<NonZero<u16>> {
|
||||
// SAFETY: It's always safe to read the current item because we don't
|
||||
// ever move out of the array's bounds.
|
||||
unsafe { NonZero::<u16>::new(*self.lpwstr.as_ptr()) }
|
||||
unsafe { NonZero::new(*self.lpwstr.as_ptr()) }
|
||||
}
|
||||
|
||||
/// Advance the iterator while `predicate` returns true.
|
||||
|
@ -1188,7 +1188,7 @@ impl ThreadId {
|
||||
};
|
||||
|
||||
match COUNTER.compare_exchange_weak(last, id, Relaxed, Relaxed) {
|
||||
Ok(_) => return ThreadId(NonZero::<u64>::new(id).unwrap()),
|
||||
Ok(_) => return ThreadId(NonZero::new(id).unwrap()),
|
||||
Err(id) => last = id,
|
||||
}
|
||||
}
|
||||
@ -1207,7 +1207,7 @@ impl ThreadId {
|
||||
|
||||
*counter = id;
|
||||
drop(counter);
|
||||
ThreadId(NonZero::<u64>::new(id).unwrap())
|
||||
ThreadId(NonZero::new(id).unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ pub struct BorTag(NonZero<u64>);
|
||||
|
||||
impl BorTag {
|
||||
pub fn new(i: u64) -> Option<Self> {
|
||||
NonZero::<u64>::new(i).map(BorTag)
|
||||
NonZero::new(i).map(BorTag)
|
||||
}
|
||||
|
||||
pub fn get(&self) -> u64 {
|
||||
@ -184,7 +184,7 @@ impl GlobalStateInner {
|
||||
borrow_tracker_method,
|
||||
next_ptr_tag: BorTag::one(),
|
||||
base_ptr_tags: FxHashMap::default(),
|
||||
next_call_id: NonZero::<u64>::new(1).unwrap(),
|
||||
next_call_id: NonZero::new(1).unwrap(),
|
||||
protected_tags: FxHashMap::default(),
|
||||
tracked_pointer_tags,
|
||||
tracked_call_ids,
|
||||
@ -206,7 +206,7 @@ impl GlobalStateInner {
|
||||
if self.tracked_call_ids.contains(&call_id) {
|
||||
machine.emit_diagnostic(NonHaltingDiagnostic::CreatedCallId(call_id));
|
||||
}
|
||||
self.next_call_id = NonZero::<u64>::new(call_id.get() + 1).unwrap();
|
||||
self.next_call_id = NonZero::new(call_id.get() + 1).unwrap();
|
||||
FrameState { call_id, protected_tags: SmallVec::new() }
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ macro_rules! declare_id {
|
||||
impl SyncId for $name {
|
||||
// Panics if `id == 0`.
|
||||
fn from_u32(id: u32) -> Self {
|
||||
Self(std::num::NonZero::<u32>::new(id).unwrap())
|
||||
Self(std::num::NonZero::new(id).unwrap())
|
||||
}
|
||||
fn to_u32(&self) -> u32 {
|
||||
self.0.get()
|
||||
@ -43,7 +43,7 @@ macro_rules! declare_id {
|
||||
// therefore, need to shift by one when converting from an index
|
||||
// into a vector.
|
||||
let shifted_idx = u32::try_from(idx).unwrap().checked_add(1).unwrap();
|
||||
$name(std::num::NonZero::<u32>::new(shifted_idx).unwrap())
|
||||
$name(std::num::NonZero::new(shifted_idx).unwrap())
|
||||
}
|
||||
fn index(self) -> usize {
|
||||
// See the comment in `Self::new`.
|
||||
|
@ -477,7 +477,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
let [id, show_unnamed] = this.check_shim(abi, Abi::Rust, link_name, args)?;
|
||||
let id = this.read_scalar(id)?.to_u64()?;
|
||||
let show_unnamed = this.read_scalar(show_unnamed)?.to_bool()?;
|
||||
if let Some(id) = std::num::NonZero::<u64>::new(id) {
|
||||
if let Some(id) = std::num::NonZero::new(id) {
|
||||
this.print_borrow_state(AllocId(id), show_unnamed)?;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user