Auto merge of #89512 - Manishearth:rollup-meh9x7r, r=Manishearth

Rollup of 14 pull requests

Successful merges:

 - #86434 (Add `Ipv6Addr::is_benchmarking`)
 - #86828 (const fn for option copied, take & replace)
 - #87679 (BTree: refine some comments)
 - #87910 (Mark unsafe methods NonZero*::unchecked_(add|mul) as const.)
 - #88286 (Remove unnecessary unsafe block in `process_unix`)
 - #88305 (Manual Debug for Unix ExitCode ExitStatus ExitStatusError)
 - #88353 (Partially stabilize `array_methods`)
 - #88370 (Add missing `# Panics` section to `Vec` method)
 - #88481 (Remove some feature gates)
 - #89138 (Fix link in Ipv6Addr::to_ipv4 docs)
 - #89401 (Add truncate note to Vec::resize)
 - #89467 (Fix typos in rustdoc/lints)
 - #89472 (Only register `WSACleanup` if `WSAStartup` is actually ever called)
 - #89505 (Add regression test for spurious const error with NLL)

Failed merges:

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2021-10-04 07:25:50 +00:00
commit 44593aeb13
30 changed files with 251 additions and 135 deletions

View File

@ -144,7 +144,7 @@ impl<R> MemberConstraintSet<'tcx, R>
where
R: Copy + Hash + Eq,
{
crate fn all_indices(&self) -> impl Iterator<Item = NllMemberConstraintIndex> {
crate fn all_indices(&self) -> impl Iterator<Item = NllMemberConstraintIndex> + '_ {
self.constraints.indices()
}

View File

@ -497,7 +497,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
}
/// Returns an iterator over all the region indices.
pub fn regions(&self) -> impl Iterator<Item = RegionVid> {
pub fn regions(&self) -> impl Iterator<Item = RegionVid> + '_ {
self.definitions.indices()
}

View File

@ -7,7 +7,6 @@
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(allow_internal_unstable)]
#![feature(array_windows)]
#![feature(associated_type_bounds)]
#![feature(auto_traits)]

View File

@ -2,9 +2,8 @@
#![feature(bench_black_box)]
#![feature(extend_one)]
#![feature(iter_zip)]
#![feature(unboxed_closures)]
#![feature(min_specialization)]
#![feature(test)]
#![feature(fn_traits)]
pub mod bit_set;
pub mod vec;

View File

@ -3,9 +3,9 @@ use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use std::fmt;
use std::fmt::Debug;
use std::hash::Hash;
use std::iter::{self, FromIterator};
use std::iter::FromIterator;
use std::marker::PhantomData;
use std::ops::{Index, IndexMut, Range, RangeBounds};
use std::ops::{Index, IndexMut, RangeBounds};
use std::slice;
use std::vec;
@ -518,8 +518,6 @@ impl<I: Idx, T: fmt::Debug> fmt::Debug for IndexVec<I, T> {
}
}
pub type Enumerated<I, J> = iter::Map<iter::Enumerate<J>, IntoIdx<I>>;
impl<I: Idx, T> IndexVec<I, T> {
#[inline]
pub fn new() -> Self {
@ -596,8 +594,10 @@ impl<I: Idx, T> IndexVec<I, T> {
}
#[inline]
pub fn into_iter_enumerated(self) -> Enumerated<I, vec::IntoIter<T>> {
self.raw.into_iter().enumerate().map(IntoIdx { _marker: PhantomData })
pub fn into_iter_enumerated(
self,
) -> impl DoubleEndedIterator<Item = (I, T)> + ExactSizeIterator {
self.raw.into_iter().enumerate().map(|(n, t)| (I::new(n), t))
}
#[inline]
@ -606,13 +606,15 @@ impl<I: Idx, T> IndexVec<I, T> {
}
#[inline]
pub fn iter_enumerated(&self) -> Enumerated<I, slice::Iter<'_, T>> {
self.raw.iter().enumerate().map(IntoIdx { _marker: PhantomData })
pub fn iter_enumerated(
&self,
) -> impl DoubleEndedIterator<Item = (I, &T)> + ExactSizeIterator + '_ {
self.raw.iter().enumerate().map(|(n, t)| (I::new(n), t))
}
#[inline]
pub fn indices(&self) -> iter::Map<Range<usize>, IntoIdx<I>> {
(0..self.len()).map(IntoIdx { _marker: PhantomData })
pub fn indices(&self) -> impl DoubleEndedIterator<Item = I> + ExactSizeIterator + 'static {
(0..self.len()).map(|n| I::new(n))
}
#[inline]
@ -621,8 +623,10 @@ impl<I: Idx, T> IndexVec<I, T> {
}
#[inline]
pub fn iter_enumerated_mut(&mut self) -> Enumerated<I, slice::IterMut<'_, T>> {
self.raw.iter_mut().enumerate().map(IntoIdx { _marker: PhantomData })
pub fn iter_enumerated_mut(
&mut self,
) -> impl DoubleEndedIterator<Item = (I, &mut T)> + ExactSizeIterator + '_ {
self.raw.iter_mut().enumerate().map(|(n, t)| (I::new(n), t))
}
#[inline]
@ -638,7 +642,7 @@ impl<I: Idx, T> IndexVec<I, T> {
&'a mut self,
range: R,
) -> impl Iterator<Item = (I, T)> + 'a {
self.raw.drain(range).enumerate().map(IntoIdx { _marker: PhantomData })
self.raw.drain(range).enumerate().map(|(n, t)| (I::new(n), t))
}
#[inline]
@ -832,36 +836,5 @@ impl<'a, I: Idx, T> IntoIterator for &'a mut IndexVec<I, T> {
}
}
pub struct IntoIdx<I: Idx> {
_marker: PhantomData<fn(&I)>,
}
impl<I: Idx, T> FnOnce<((usize, T),)> for IntoIdx<I> {
type Output = (I, T);
extern "rust-call" fn call_once(self, ((n, t),): ((usize, T),)) -> Self::Output {
(I::new(n), t)
}
}
impl<I: Idx, T> FnMut<((usize, T),)> for IntoIdx<I> {
extern "rust-call" fn call_mut(&mut self, ((n, t),): ((usize, T),)) -> Self::Output {
(I::new(n), t)
}
}
impl<I: Idx> FnOnce<(usize,)> for IntoIdx<I> {
type Output = I;
extern "rust-call" fn call_once(self, (n,): (usize,)) -> Self::Output {
I::new(n)
}
}
impl<I: Idx> FnMut<(usize,)> for IntoIdx<I> {
extern "rust-call" fn call_mut(&mut self, (n,): (usize,)) -> Self::Output {
I::new(n)
}
}
#[cfg(test)]
mod tests;

View File

@ -26,7 +26,6 @@
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![cfg_attr(test, feature(test))]
#![feature(array_windows)]
#![feature(bool_to_option)]
#![feature(box_patterns)]

View File

@ -41,13 +41,11 @@
#![feature(once_cell)]
#![feature(min_specialization)]
#![feature(trusted_len)]
#![feature(test)]
#![feature(in_band_lifetimes)]
#![feature(crate_visibility_modifier)]
#![feature(associated_type_bounds)]
#![feature(rustc_attrs)]
#![feature(half_open_range_patterns)]
#![feature(exclusive_range_pattern)]
#![feature(control_flow_enum)]
#![feature(associated_type_defaults)]
#![feature(iter_zip)]

View File

@ -1,10 +1,10 @@
// This module contains some shared code for encoding and decoding various
// things from the `ty` module, and in particular implements support for
// "shorthands" which allow to have pointers back into the already encoded
// stream instead of re-encoding the same thing twice.
//
// The functionality in here is shared between persisting to crate metadata and
// persisting to incr. comp. caches.
//! This module contains some shared code for encoding and decoding various
//! things from the `ty` module, and in particular implements support for
//! "shorthands" which allow to have pointers back into the already encoded
//! stream instead of re-encoding the same thing twice.
//!
//! The functionality in here is shared between persisting to crate metadata and
//! persisting to incr. comp. caches.
use crate::arena::ArenaAllocatable;
use crate::infer::canonical::{CanonicalVarInfo, CanonicalVarInfos};

View File

@ -1,6 +1,5 @@
use core::slice::Iter;
use rustc_data_structures::fx::FxHashMap;
use rustc_index::vec::{Enumerated, IndexVec};
use rustc_index::vec::IndexVec;
use rustc_middle::mir::*;
use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
use rustc_span::Span;
@ -337,7 +336,9 @@ impl MovePathLookup {
/// An enumerated iterator of `local`s and their associated
/// `MovePathIndex`es.
pub fn iter_locals_enumerated(&self) -> Enumerated<Local, Iter<'_, MovePathIndex>> {
pub fn iter_locals_enumerated(
&self,
) -> impl DoubleEndedIterator<Item = (Local, &MovePathIndex)> + ExactSizeIterator {
self.locals.iter_enumerated()
}
}

View File

@ -15,7 +15,6 @@
#![feature(exhaustive_patterns)]
#![feature(min_specialization)]
#![feature(step_trait)]
#![feature(unchecked_math)]
use std::path::{Path, PathBuf};

View File

@ -6,8 +6,6 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(control_flow_enum)]
#![feature(half_open_range_patterns)]
#![feature(exclusive_range_pattern)]
#![feature(nll)]
#![recursion_limit = "256"]

View File

@ -19,16 +19,16 @@ mod entry;
pub use entry::{Entry, OccupiedEntry, OccupiedError, VacantEntry};
use Entry::*;
/// Minimum number of elements in nodes that are not a root.
/// Minimum number of elements in a node that is not a root.
/// We might temporarily have fewer elements during methods.
pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT;
// A tree in a `BTreeMap` is a tree in the `node` module with additional invariants:
// - Keys must appear in ascending order (according to the key's type).
// - If the root node is internal, it must contain at least 1 element.
// - Every non-leaf node contains at least 1 element (has at least 2 children).
// - Every non-root node contains at least MIN_LEN elements.
//
// An empty map may be represented both by the absence of a root node or by a
// An empty map is represented either by the absence of a root node or by a
// root node that is an empty leaf.
/// A map based on a [B-Tree].
@ -1735,8 +1735,8 @@ impl<'a, K: 'a, V: 'a> DrainFilterInner<'a, K, V> {
pub(super) fn size_hint(&self) -> (usize, Option<usize>) {
// In most of the btree iterators, `self.length` is the number of elements
// yet to be visited. Here, it includes elements that were visited and that
// the predicate decided not to drain. Making this upper bound more accurate
// requires maintaining an extra field and is not worth while.
// the predicate decided not to drain. Making this upper bound more tight
// during iteration would require an extra field.
(0, Some(*self.length))
}
}

View File

@ -440,8 +440,7 @@ impl<K, V> Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge> {
/// - The given edge must not have been previously returned by counterpart
/// `deallocating_next_back`.
/// - The returned KV handle is only valid to access the key and value,
/// and only valid until the next call to this method or counterpart
/// `deallocating_next_back`.
/// and only valid until the next call to a `deallocating_` method.
unsafe fn deallocating_next(
self,
) -> Option<(Self, Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>)>
@ -470,8 +469,7 @@ impl<K, V> Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge> {
/// - The given edge must not have been previously returned by counterpart
/// `deallocating_next`.
/// - The returned KV handle is only valid to access the key and value,
/// and only valid until the next call to this method or counterpart
/// `deallocating_next`.
/// and only valid until the next call to a `deallocating_` method.
unsafe fn deallocating_next_back(
self,
) -> Option<(Self, Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>)>

View File

@ -574,7 +574,7 @@ impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
/// no cleanup is done on any of the keys, values and other children.
/// This decreases the height by 1 and is the opposite of `push_internal_level`.
///
/// Requires exclusive access to the `Root` object but not to the root node;
/// Requires exclusive access to the `NodeRef` object but not to the root node;
/// it will not invalidate other handles or references to the root node.
///
/// Panics if there is no internal level, i.e., if the root node is a leaf.

View File

@ -2137,6 +2137,7 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
/// in order to be able to clone the passed value.
/// If you need more flexibility (or want to rely on [`Default`] instead of
/// [`Clone`]), use [`Vec::resize_with`].
/// If you only need to resize to a smaller size, use [`Vec::truncate`].
///
/// # Examples
///
@ -2188,7 +2189,12 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
/// Copies elements from `src` range to the end of the vector.
///
/// ## Examples
/// # Panics
///
/// Panics if the starting point is greater than the end point or if
/// the end point is greater than the length of the vector.
///
/// # Examples
///
/// ```
/// let mut vec = vec![0, 1, 2, 3, 4];

View File

@ -368,14 +368,14 @@ impl<T, const N: usize> [T; N] {
}
/// Returns a slice containing the entire array. Equivalent to `&s[..]`.
#[unstable(feature = "array_methods", issue = "76118")]
pub fn as_slice(&self) -> &[T] {
#[stable(feature = "array_as_slice", since = "1.57.0")]
pub const fn as_slice(&self) -> &[T] {
self
}
/// Returns a mutable slice containing the entire array. Equivalent to
/// `&mut s[..]`.
#[unstable(feature = "array_methods", issue = "76118")]
#[stable(feature = "array_as_slice", since = "1.57.0")]
pub fn as_mut_slice(&mut self) -> &mut [T] {
self
}

View File

@ -91,6 +91,7 @@
#![feature(const_maybe_uninit_assume_init)]
#![feature(const_option)]
#![feature(const_pin)]
#![feature(const_replace)]
#![feature(const_ptr_offset)]
#![feature(const_ptr_offset_from)]
#![feature(const_ptr_read)]

View File

@ -379,7 +379,7 @@ macro_rules! nonzero_unsigned_operations {
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[inline]
pub unsafe fn unchecked_add(self, other: $Int) -> $Ty {
pub const unsafe fn unchecked_add(self, other: $Int) -> $Ty {
// SAFETY: The caller ensures there is no overflow.
unsafe { $Ty::new_unchecked(self.get().unchecked_add(other)) }
}
@ -750,7 +750,7 @@ macro_rules! nonzero_unsigned_signed_operations {
/// ```
#[unstable(feature = "nonzero_ops", issue = "84186")]
#[inline]
pub unsafe fn unchecked_mul(self, other: $Ty) -> $Ty {
pub const unsafe fn unchecked_mul(self, other: $Ty) -> $Ty {
// SAFETY: The caller ensures there is no overflow.
unsafe { $Ty::new_unchecked(self.get().unchecked_mul(other.get())) }
}

View File

@ -544,8 +544,8 @@ impl<T> Option<T> {
/// ```
#[must_use = "if you intended to assert that this has a value, consider `.unwrap()` instead"]
#[inline]
#[rustc_const_stable(feature = "const_option", since = "1.48.0")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_option", since = "1.48.0")]
pub const fn is_some(&self) -> bool {
matches!(*self, Some(_))
}
@ -564,8 +564,8 @@ impl<T> Option<T> {
#[must_use = "if you intended to assert that this doesn't have a value, consider \
`.and_then(|_| panic!(\"`Option` had a value when expected `None`\"))` instead"]
#[inline]
#[rustc_const_stable(feature = "const_option", since = "1.48.0")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_option", since = "1.48.0")]
pub const fn is_none(&self) -> bool {
!self.is_some()
}
@ -1318,8 +1318,10 @@ impl<T> Option<T> {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn take(&mut self) -> Option<T> {
mem::take(self)
#[rustc_const_unstable(feature = "const_option", issue = "67441")]
pub const fn take(&mut self) -> Option<T> {
// FIXME replace `mem::replace` by `mem::take` when the latter is const ready
mem::replace(self, None)
}
/// Replaces the actual value in the option by the value given in parameter,
@ -1340,8 +1342,9 @@ impl<T> Option<T> {
/// assert_eq!(old, None);
/// ```
#[inline]
#[rustc_const_unstable(feature = "const_option", issue = "67441")]
#[stable(feature = "option_replace", since = "1.31.0")]
pub fn replace(&mut self, value: T) -> Option<T> {
pub const fn replace(&mut self, value: T) -> Option<T> {
mem::replace(self, Some(value))
}
@ -1446,8 +1449,14 @@ impl<T: Copy> Option<&T> {
/// assert_eq!(copied, Some(12));
/// ```
#[stable(feature = "copied", since = "1.35.0")]
pub fn copied(self) -> Option<T> {
self.map(|&t| t)
#[rustc_const_unstable(feature = "const_option", issue = "67441")]
pub const fn copied(self) -> Option<T> {
// FIXME: this implementation, which sidesteps using `Option::map` since it's not const
// ready yet, should be reverted when possible to avoid code repetition
match self {
Some(&v) => Some(v),
None => None,
}
}
}

View File

@ -367,6 +367,19 @@ fn option_const() {
const IS_NONE: bool = OPTION.is_none();
assert!(!IS_NONE);
const COPIED: Option<usize> = OPTION.as_ref().copied();
assert_eq!(COPIED, OPTION);
}
#[test]
const fn option_const_mut() {
// test that the methods of `Option` that take mutable references are usable in a const context
let mut option: Option<usize> = Some(32);
let _take = option.take();
let _replace = option.replace(42);
}
#[test]

View File

@ -340,6 +340,30 @@ impl IpAddr {
}
}
/// Returns [`true`] if this address is in a range designated for benchmarking.
///
/// See the documentation for [`Ipv4Addr::is_benchmarking()`] and
/// [`Ipv6Addr::is_benchmarking()`] for more details.
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(198, 19, 255, 255)).is_benchmarking(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0)).is_benchmarking(), true);
/// ```
#[unstable(feature = "ip", issue = "27709")]
#[inline]
pub const fn is_benchmarking(&self) -> bool {
match self {
IpAddr::V4(ip) => ip.is_benchmarking(),
IpAddr::V6(ip) => ip.is_benchmarking(),
}
}
/// Returns [`true`] if this address is an [`IPv4` address], and [`false`]
/// otherwise.
///
@ -1449,6 +1473,28 @@ impl Ipv6Addr {
(self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8)
}
/// Returns [`true`] if this is an address reserved for benchmarking (`2001:2::/48`).
///
/// This property is defined in [IETF RFC 5180], where it is mistakenly specified as covering the range `2001:0200::/48`.
/// This is corrected in [IETF RFC Errata 1752] to `2001:0002::/48`.
///
/// [IETF RFC 5180]: https://tools.ietf.org/html/rfc5180
/// [IETF RFC Errata 1752]: https://www.rfc-editor.org/errata_search.php?eid=1752
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc613, 0x0).is_benchmarking(), false);
/// assert_eq!(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0).is_benchmarking(), true);
/// ```
#[unstable(feature = "ip", issue = "27709")]
#[inline]
pub const fn is_benchmarking(&self) -> bool {
(self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && (self.segments()[2] == 0)
}
/// Returns [`true`] if the address is a globally routable unicast address.
///
/// The following return false:
@ -1589,7 +1635,7 @@ impl Ipv6Addr {
/// `::a.b.c.d` and `::ffff:a.b.c.d` become `a.b.c.d`
/// All addresses *not* starting with either all zeroes or `::ffff` will return `None`.
///
/// [IPv4 address]: Ipv4Addr
/// [`IPv4` address]: Ipv4Addr
/// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses
/// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses
/// [IETF RFC 4291 section 2.5.5.1]: https://tools.ietf.org/html/rfc4291#section-2.5.5.1

View File

@ -224,6 +224,7 @@ fn ip_properties() {
let global: u8 = 1 << 2;
let multicast: u8 = 1 << 3;
let doc: u8 = 1 << 4;
let benchmarking: u8 = 1 << 5;
if ($mask & unspec) == unspec {
assert!(ip!($s).is_unspecified());
@ -254,6 +255,12 @@ fn ip_properties() {
} else {
assert!(!ip!($s).is_documentation());
}
if ($mask & benchmarking) == benchmarking {
assert!(ip!($s).is_benchmarking());
} else {
assert!(!ip!($s).is_benchmarking());
}
}};
}
@ -262,6 +269,7 @@ fn ip_properties() {
let global: u8 = 1 << 2;
let multicast: u8 = 1 << 3;
let doc: u8 = 1 << 4;
let benchmarking: u8 = 1 << 5;
check!("0.0.0.0", unspec);
check!("0.0.0.1");
@ -280,9 +288,9 @@ fn ip_properties() {
check!("239.255.255.255", global | multicast);
check!("255.255.255.255");
// make sure benchmarking addresses are not global
check!("198.18.0.0");
check!("198.18.54.2");
check!("198.19.255.255");
check!("198.18.0.0", benchmarking);
check!("198.18.54.2", benchmarking);
check!("198.19.255.255", benchmarking);
// make sure addresses reserved for protocol assignment are not global
check!("192.0.0.0");
check!("192.0.0.255");
@ -313,6 +321,7 @@ fn ip_properties() {
check!("ff08::", multicast);
check!("ff0e::", global | multicast);
check!("2001:db8:85a3::8a2e:370:7334", doc);
check!("2001:2::ac32:23ff:21", global | benchmarking);
check!("102:304:506:708:90a:b0c:d0e:f10", global);
}
@ -467,21 +476,22 @@ fn ipv6_properties() {
assert_eq!(&ip!($s).octets(), octets);
assert_eq!(Ipv6Addr::from(*octets), ip!($s));
let unspecified: u16 = 1 << 0;
let loopback: u16 = 1 << 1;
let unique_local: u16 = 1 << 2;
let global: u16 = 1 << 3;
let unicast_link_local: u16 = 1 << 4;
let unicast_global: u16 = 1 << 7;
let documentation: u16 = 1 << 8;
let multicast_interface_local: u16 = 1 << 9;
let multicast_link_local: u16 = 1 << 10;
let multicast_realm_local: u16 = 1 << 11;
let multicast_admin_local: u16 = 1 << 12;
let multicast_site_local: u16 = 1 << 13;
let multicast_organization_local: u16 = 1 << 14;
let multicast_global: u16 = 1 << 15;
let multicast: u16 = multicast_interface_local
let unspecified: u32 = 1 << 0;
let loopback: u32 = 1 << 1;
let unique_local: u32 = 1 << 2;
let global: u32 = 1 << 3;
let unicast_link_local: u32 = 1 << 4;
let unicast_global: u32 = 1 << 7;
let documentation: u32 = 1 << 8;
let benchmarking: u32 = 1 << 16;
let multicast_interface_local: u32 = 1 << 9;
let multicast_link_local: u32 = 1 << 10;
let multicast_realm_local: u32 = 1 << 11;
let multicast_admin_local: u32 = 1 << 12;
let multicast_site_local: u32 = 1 << 13;
let multicast_organization_local: u32 = 1 << 14;
let multicast_global: u32 = 1 << 15;
let multicast: u32 = multicast_interface_local
| multicast_admin_local
| multicast_global
| multicast_link_local
@ -524,6 +534,11 @@ fn ipv6_properties() {
} else {
assert!(!ip!($s).is_documentation());
}
if ($mask & benchmarking) == benchmarking {
assert!(ip!($s).is_benchmarking());
} else {
assert!(!ip!($s).is_benchmarking());
}
if ($mask & multicast) != 0 {
assert!(ip!($s).multicast_scope().is_some());
assert!(ip!($s).is_multicast());
@ -562,20 +577,21 @@ fn ipv6_properties() {
}
}
let unspecified: u16 = 1 << 0;
let loopback: u16 = 1 << 1;
let unique_local: u16 = 1 << 2;
let global: u16 = 1 << 3;
let unicast_link_local: u16 = 1 << 4;
let unicast_global: u16 = 1 << 7;
let documentation: u16 = 1 << 8;
let multicast_interface_local: u16 = 1 << 9;
let multicast_link_local: u16 = 1 << 10;
let multicast_realm_local: u16 = 1 << 11;
let multicast_admin_local: u16 = 1 << 12;
let multicast_site_local: u16 = 1 << 13;
let multicast_organization_local: u16 = 1 << 14;
let multicast_global: u16 = 1 << 15;
let unspecified: u32 = 1 << 0;
let loopback: u32 = 1 << 1;
let unique_local: u32 = 1 << 2;
let global: u32 = 1 << 3;
let unicast_link_local: u32 = 1 << 4;
let unicast_global: u32 = 1 << 7;
let documentation: u32 = 1 << 8;
let benchmarking: u32 = 1 << 16;
let multicast_interface_local: u32 = 1 << 9;
let multicast_link_local: u32 = 1 << 10;
let multicast_realm_local: u32 = 1 << 11;
let multicast_admin_local: u32 = 1 << 12;
let multicast_site_local: u32 = 1 << 13;
let multicast_organization_local: u32 = 1 << 14;
let multicast_global: u32 = 1 << 15;
check!("::", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unspecified);
@ -671,6 +687,12 @@ fn ipv6_properties() {
documentation
);
check!(
"2001:2::ac32:23ff:21",
&[0x20, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0xac, 0x32, 0x23, 0xff, 0, 0x21],
global | unicast_global | benchmarking
);
check!(
"102:304:506:708:90a:b0c:d0e:f10",
&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
@ -874,6 +896,9 @@ fn ipv6_const() {
const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation();
assert!(!IS_DOCUMENTATION);
const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking();
assert!(!IS_BENCHMARKING);
const IS_UNICAST_GLOBAL: bool = IP_ADDRESS.is_unicast_global();
assert!(!IS_UNICAST_GLOBAL);

View File

@ -457,9 +457,15 @@ impl fmt::Debug for Command {
}
}
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct ExitCode(u8);
impl fmt::Debug for ExitCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("unix_exit_status").field(&self.0).finish()
}
}
impl ExitCode {
pub const SUCCESS: ExitCode = ExitCode(EXIT_SUCCESS as _);
pub const FAILURE: ExitCode = ExitCode(EXIT_FAILURE as _);

View File

@ -552,8 +552,7 @@ impl Process {
use crate::os::unix::io::FromRawFd;
use crate::sys_common::FromInner;
// Safety: If `pidfd` is nonnegative, we assume it's valid and otherwise unowned.
let pidfd = (pidfd >= 0)
.then(|| PidFd::from_inner(unsafe { sys::fd::FileDesc::from_raw_fd(pidfd) }));
let pidfd = (pidfd >= 0).then(|| PidFd::from_inner(sys::fd::FileDesc::from_raw_fd(pidfd)));
Process { pid, status: None, pidfd }
}
@ -607,9 +606,15 @@ impl Process {
}
/// Unix exit statuses
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct ExitStatus(c_int);
impl fmt::Debug for ExitStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("unix_wait_status").field(&self.0).finish()
}
}
impl ExitStatus {
pub fn new(status: c_int) -> ExitStatus {
ExitStatus(status)
@ -683,7 +688,7 @@ impl fmt::Display for ExitStatus {
}
}
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct ExitStatusError(NonZero_c_int);
impl Into<ExitStatus> for ExitStatusError {
@ -692,6 +697,12 @@ impl Into<ExitStatus> for ExitStatusError {
}
}
impl fmt::Debug for ExitStatusError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("unix_wait_status").field(&self.0).finish()
}
}
impl ExitStatusError {
pub fn code(self) -> Option<NonZeroI32> {
ExitStatus(self.0.into()).code().map(|st| st.try_into().unwrap())

View File

@ -2,13 +2,13 @@
use crate::cmp;
use crate::io::{self, IoSlice, IoSliceMut, Read};
use crate::lazy::SyncOnceCell;
use crate::mem;
use crate::net::{Shutdown, SocketAddr};
use crate::os::windows::io::{
AsRawSocket, AsSocket, BorrowedSocket, FromRawSocket, IntoRawSocket, OwnedSocket, RawSocket,
};
use crate::ptr;
use crate::sync::Once;
use crate::sys;
use crate::sys::c;
use crate::sys_common::net;
@ -29,26 +29,31 @@ pub mod netc {
pub struct Socket(OwnedSocket);
static INIT: Once = Once::new();
static WSA_CLEANUP: SyncOnceCell<unsafe extern "system" fn() -> i32> = SyncOnceCell::new();
/// Checks whether the Windows socket interface has been started already, and
/// if not, starts it.
pub fn init() {
INIT.call_once(|| unsafe {
let _ = WSA_CLEANUP.get_or_init(|| unsafe {
let mut data: c::WSADATA = mem::zeroed();
let ret = c::WSAStartup(
0x202, // version 2.2
&mut data,
);
assert_eq!(ret, 0);
// Only register `WSACleanup` if `WSAStartup` is actually ever called.
// Workaround to prevent linking to `WS2_32.dll` when no network functionality is used.
// See issue #85441.
c::WSACleanup
});
}
pub fn cleanup() {
if INIT.is_completed() {
// only close the socket interface if it has actually been started
// only perform cleanup if network functionality was actually initialized
if let Some(cleanup) = WSA_CLEANUP.get() {
unsafe {
c::WSACleanup();
cleanup();
}
}
}

View File

@ -70,7 +70,7 @@ This lint **warns by default**. This lint detects when [intra-doc links] from pu
For example:
```rust
#![warn(rustdoc::private_intra_doc_links)] // note: unecessary - warns by default.
#![warn(rustdoc::private_intra_doc_links)] // note: unnecessary - warns by default.
/// [private]
pub fn public() {}
@ -229,7 +229,7 @@ This lint **warns by default**. It detects code block attributes in
documentation examples that have potentially mis-typed values. For example:
```rust
#![warn(rustdoc::invalid_codeblock_attributes)] // note: unecessary - warns by default.
#![warn(rustdoc::invalid_codeblock_attributes)] // note: unnecessary - warns by default.
/// Example.
///
@ -348,7 +348,7 @@ This lint is **warn-by-default**. It detects URLs which are not links.
For example:
```rust
#![warn(rustdoc::bare_urls)] // note: unecessary - warns by default.
#![warn(rustdoc::bare_urls)] // note: unnecessary - warns by default.
/// http://example.org
/// [http://example.net]

View File

@ -0,0 +1,9 @@
# only-windows-msvc
-include ../../run-make-fulldeps/tools.mk
# Tests that WS2_32.dll is not unnecessarily linked, see issue #85441
all:
$(RUSTC) empty.rs
objdump -p $(TMPDIR)/empty.exe | $(CGREP) -v -i "WS2_32.dll"

View File

@ -0,0 +1 @@
fn main() {}

View File

@ -0,0 +1,8 @@
// Regression test for issue #55825
// Tests that we don't emit a spurious warning in NLL mode
#![feature(nll)]
const fn no_dyn_trait_ret() -> &'static dyn std::fmt::Debug { &() } //~ ERROR const
fn main() { }

View File

@ -0,0 +1,12 @@
error[E0658]: trait objects in const fn are unstable
--> $DIR/issue-55825-const-fn.rs:6:32
|
LL | const fn no_dyn_trait_ret() -> &'static dyn std::fmt::Debug { &() }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: see issue #57563 <https://github.com/rust-lang/rust/issues/57563> for more information
= help: add `#![feature(const_fn_trait_bound)]` to the crate attributes to enable
error: aborting due to previous error
For more information about this error, try `rustc --explain E0658`.