mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-25 08:13:41 +00:00
Auto merge of #54668 - RalfJung:use-maybe-uninit, r=SimonSapin
Use MaybeUninit in libcore All code by @japaric. This re-submits the second half of https://github.com/rust-lang/rust/pull/53508 (the first half is at https://github.com/rust-lang/rust/pull/54667). This is likely the one containing the perf regression.
This commit is contained in:
commit
75d937c49b
@ -9,7 +9,7 @@
|
||||
// except according to those terms.
|
||||
|
||||
use fmt::{Formatter, Result, LowerExp, UpperExp, Display, Debug};
|
||||
use mem;
|
||||
use mem::MaybeUninit;
|
||||
use num::flt2dec;
|
||||
|
||||
// Don't inline this so callers don't use the stack space this function
|
||||
@ -20,11 +20,11 @@ fn float_to_decimal_common_exact<T>(fmt: &mut Formatter, num: &T,
|
||||
where T: flt2dec::DecodableFloat
|
||||
{
|
||||
unsafe {
|
||||
let mut buf: [u8; 1024] = mem::uninitialized(); // enough for f32 and f64
|
||||
let mut parts: [flt2dec::Part; 4] = mem::uninitialized();
|
||||
let mut buf = MaybeUninit::<[u8; 1024]>::uninitialized(); // enough for f32 and f64
|
||||
let mut parts = MaybeUninit::<[flt2dec::Part; 4]>::uninitialized();
|
||||
let formatted = flt2dec::to_exact_fixed_str(flt2dec::strategy::grisu::format_exact,
|
||||
*num, sign, precision,
|
||||
false, &mut buf, &mut parts);
|
||||
false, buf.get_mut(), parts.get_mut());
|
||||
fmt.pad_formatted_parts(&formatted)
|
||||
}
|
||||
}
|
||||
@ -38,10 +38,11 @@ fn float_to_decimal_common_shortest<T>(fmt: &mut Formatter, num: &T,
|
||||
{
|
||||
unsafe {
|
||||
// enough for f32 and f64
|
||||
let mut buf: [u8; flt2dec::MAX_SIG_DIGITS] = mem::uninitialized();
|
||||
let mut parts: [flt2dec::Part; 4] = mem::uninitialized();
|
||||
let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninitialized();
|
||||
let mut parts = MaybeUninit::<[flt2dec::Part; 4]>::uninitialized();
|
||||
let formatted = flt2dec::to_shortest_str(flt2dec::strategy::grisu::format_shortest, *num,
|
||||
sign, precision, false, &mut buf, &mut parts);
|
||||
sign, precision, false, buf.get_mut(),
|
||||
parts.get_mut());
|
||||
fmt.pad_formatted_parts(&formatted)
|
||||
}
|
||||
}
|
||||
@ -75,11 +76,11 @@ fn float_to_exponential_common_exact<T>(fmt: &mut Formatter, num: &T,
|
||||
where T: flt2dec::DecodableFloat
|
||||
{
|
||||
unsafe {
|
||||
let mut buf: [u8; 1024] = mem::uninitialized(); // enough for f32 and f64
|
||||
let mut parts: [flt2dec::Part; 6] = mem::uninitialized();
|
||||
let mut buf = MaybeUninit::<[u8; 1024]>::uninitialized(); // enough for f32 and f64
|
||||
let mut parts = MaybeUninit::<[flt2dec::Part; 6]>::uninitialized();
|
||||
let formatted = flt2dec::to_exact_exp_str(flt2dec::strategy::grisu::format_exact,
|
||||
*num, sign, precision,
|
||||
upper, &mut buf, &mut parts);
|
||||
upper, buf.get_mut(), parts.get_mut());
|
||||
fmt.pad_formatted_parts(&formatted)
|
||||
}
|
||||
}
|
||||
@ -94,11 +95,11 @@ fn float_to_exponential_common_shortest<T>(fmt: &mut Formatter,
|
||||
{
|
||||
unsafe {
|
||||
// enough for f32 and f64
|
||||
let mut buf: [u8; flt2dec::MAX_SIG_DIGITS] = mem::uninitialized();
|
||||
let mut parts: [flt2dec::Part; 6] = mem::uninitialized();
|
||||
let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninitialized();
|
||||
let mut parts = MaybeUninit::<[flt2dec::Part; 6]>::uninitialized();
|
||||
let formatted = flt2dec::to_shortest_exp_str(flt2dec::strategy::grisu::format_shortest,
|
||||
*num, sign, (0, 0), upper,
|
||||
&mut buf, &mut parts);
|
||||
buf.get_mut(), parts.get_mut());
|
||||
fmt.pad_formatted_parts(&formatted)
|
||||
}
|
||||
}
|
||||
|
@ -950,7 +950,7 @@ impl<T> ManuallyDrop<T> {
|
||||
/// ManuallyDrop::new(Box::new(()));
|
||||
/// ```
|
||||
#[stable(feature = "manually_drop", since = "1.20.0")]
|
||||
#[inline]
|
||||
#[inline(always)]
|
||||
pub const fn new(value: T) -> ManuallyDrop<T> {
|
||||
ManuallyDrop { value }
|
||||
}
|
||||
@ -967,7 +967,7 @@ impl<T> ManuallyDrop<T> {
|
||||
/// let _: Box<()> = ManuallyDrop::into_inner(x); // This drops the `Box`.
|
||||
/// ```
|
||||
#[stable(feature = "manually_drop", since = "1.20.0")]
|
||||
#[inline]
|
||||
#[inline(always)]
|
||||
pub const fn into_inner(slot: ManuallyDrop<T>) -> T {
|
||||
slot.value
|
||||
}
|
||||
@ -1015,7 +1015,7 @@ impl<T: ?Sized> ManuallyDrop<T> {
|
||||
#[stable(feature = "manually_drop", since = "1.20.0")]
|
||||
impl<T: ?Sized> Deref for ManuallyDrop<T> {
|
||||
type Target = T;
|
||||
#[inline]
|
||||
#[inline(always)]
|
||||
fn deref(&self) -> &T {
|
||||
&self.value
|
||||
}
|
||||
@ -1023,7 +1023,7 @@ impl<T: ?Sized> Deref for ManuallyDrop<T> {
|
||||
|
||||
#[stable(feature = "manually_drop", since = "1.20.0")]
|
||||
impl<T: ?Sized> DerefMut for ManuallyDrop<T> {
|
||||
#[inline]
|
||||
#[inline(always)]
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
&mut self.value
|
||||
}
|
||||
@ -1044,6 +1044,7 @@ impl<T> MaybeUninit<T> {
|
||||
/// Note that dropping a `MaybeUninit` will never call `T`'s drop code.
|
||||
/// It is your responsibility to make sure `T` gets dropped if it got initialized.
|
||||
#[unstable(feature = "maybe_uninit", issue = "53491")]
|
||||
#[inline(always)]
|
||||
pub const fn new(val: T) -> MaybeUninit<T> {
|
||||
MaybeUninit { value: ManuallyDrop::new(val) }
|
||||
}
|
||||
@ -1053,6 +1054,7 @@ impl<T> MaybeUninit<T> {
|
||||
/// Note that dropping a `MaybeUninit` will never call `T`'s drop code.
|
||||
/// It is your responsibility to make sure `T` gets dropped if it got initialized.
|
||||
#[unstable(feature = "maybe_uninit", issue = "53491")]
|
||||
#[inline(always)]
|
||||
pub const fn uninitialized() -> MaybeUninit<T> {
|
||||
MaybeUninit { uninit: () }
|
||||
}
|
||||
@ -1066,6 +1068,7 @@ impl<T> MaybeUninit<T> {
|
||||
/// Note that dropping a `MaybeUninit` will never call `T`'s drop code.
|
||||
/// It is your responsibility to make sure `T` gets dropped if it got initialized.
|
||||
#[unstable(feature = "maybe_uninit", issue = "53491")]
|
||||
#[inline]
|
||||
pub fn zeroed() -> MaybeUninit<T> {
|
||||
let mut u = MaybeUninit::<T>::uninitialized();
|
||||
unsafe {
|
||||
@ -1076,6 +1079,7 @@ impl<T> MaybeUninit<T> {
|
||||
|
||||
/// Set the value of the `MaybeUninit`. This overwrites any previous value without dropping it.
|
||||
#[unstable(feature = "maybe_uninit", issue = "53491")]
|
||||
#[inline(always)]
|
||||
pub fn set(&mut self, val: T) {
|
||||
unsafe {
|
||||
self.value = ManuallyDrop::new(val);
|
||||
@ -1091,6 +1095,7 @@ impl<T> MaybeUninit<T> {
|
||||
/// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized
|
||||
/// state, otherwise this will immediately cause undefined behavior.
|
||||
#[unstable(feature = "maybe_uninit", issue = "53491")]
|
||||
#[inline(always)]
|
||||
pub unsafe fn into_inner(self) -> T {
|
||||
ManuallyDrop::into_inner(self.value)
|
||||
}
|
||||
@ -1102,6 +1107,7 @@ impl<T> MaybeUninit<T> {
|
||||
/// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized
|
||||
/// state, otherwise this will immediately cause undefined behavior.
|
||||
#[unstable(feature = "maybe_uninit", issue = "53491")]
|
||||
#[inline(always)]
|
||||
pub unsafe fn get_ref(&self) -> &T {
|
||||
&*self.value
|
||||
}
|
||||
@ -1113,6 +1119,7 @@ impl<T> MaybeUninit<T> {
|
||||
/// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized
|
||||
/// state, otherwise this will immediately cause undefined behavior.
|
||||
#[unstable(feature = "maybe_uninit", issue = "53491")]
|
||||
#[inline(always)]
|
||||
pub unsafe fn get_mut(&mut self) -> &mut T {
|
||||
&mut *self.value
|
||||
}
|
||||
@ -1120,6 +1127,7 @@ impl<T> MaybeUninit<T> {
|
||||
/// Get a pointer to the contained value. Reading from this pointer will be undefined
|
||||
/// behavior unless the `MaybeUninit` is initialized.
|
||||
#[unstable(feature = "maybe_uninit", issue = "53491")]
|
||||
#[inline(always)]
|
||||
pub fn as_ptr(&self) -> *const T {
|
||||
unsafe { &*self.value as *const T }
|
||||
}
|
||||
@ -1127,6 +1135,7 @@ impl<T> MaybeUninit<T> {
|
||||
/// Get a mutable pointer to the contained value. Reading from this pointer will be undefined
|
||||
/// behavior unless the `MaybeUninit` is initialized.
|
||||
#[unstable(feature = "maybe_uninit", issue = "53491")]
|
||||
#[inline(always)]
|
||||
pub fn as_mut_ptr(&mut self) -> *mut T {
|
||||
unsafe { &mut *self.value as *mut T }
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ use ops::{CoerceUnsized, DispatchFromDyn};
|
||||
use fmt;
|
||||
use hash;
|
||||
use marker::{PhantomData, Unsize};
|
||||
use mem;
|
||||
use mem::{self, MaybeUninit};
|
||||
use nonzero::NonZero;
|
||||
|
||||
use cmp::Ordering::{self, Less, Equal, Greater};
|
||||
@ -295,17 +295,14 @@ pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
|
||||
#[inline]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
|
||||
// Give ourselves some scratch space to work with
|
||||
let mut tmp: T = mem::uninitialized();
|
||||
// Give ourselves some scratch space to work with.
|
||||
// We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
|
||||
let mut tmp = MaybeUninit::<T>::uninitialized();
|
||||
|
||||
// Perform the swap
|
||||
copy_nonoverlapping(x, &mut tmp, 1);
|
||||
copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
|
||||
copy(y, x, 1); // `x` and `y` may overlap
|
||||
copy_nonoverlapping(&tmp, y, 1);
|
||||
|
||||
// y and t now point to the same thing, but we need to completely forget `tmp`
|
||||
// because it's no longer relevant.
|
||||
mem::forget(tmp);
|
||||
copy_nonoverlapping(tmp.get_ref(), y, 1);
|
||||
}
|
||||
|
||||
/// Swaps `count * size_of::<T>()` bytes between the two regions of memory
|
||||
@ -392,8 +389,8 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
|
||||
while i + block_size <= len {
|
||||
// Create some uninitialized memory as scratch space
|
||||
// Declaring `t` here avoids aligning the stack when this loop is unused
|
||||
let mut t: Block = mem::uninitialized();
|
||||
let t = &mut t as *mut _ as *mut u8;
|
||||
let mut t = mem::MaybeUninit::<Block>::uninitialized();
|
||||
let t = t.as_mut_ptr() as *mut u8;
|
||||
let x = x.add(i);
|
||||
let y = y.add(i);
|
||||
|
||||
@ -407,10 +404,10 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
|
||||
|
||||
if i < len {
|
||||
// Swap any remaining bytes
|
||||
let mut t: UnalignedBlock = mem::uninitialized();
|
||||
let mut t = mem::MaybeUninit::<UnalignedBlock>::uninitialized();
|
||||
let rem = len - i;
|
||||
|
||||
let t = &mut t as *mut _ as *mut u8;
|
||||
let t = t.as_mut_ptr() as *mut u8;
|
||||
let x = x.add(i);
|
||||
let y = y.add(i);
|
||||
|
||||
@ -575,9 +572,9 @@ pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
|
||||
#[inline]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub unsafe fn read<T>(src: *const T) -> T {
|
||||
let mut tmp: T = mem::uninitialized();
|
||||
copy_nonoverlapping(src, &mut tmp, 1);
|
||||
tmp
|
||||
let mut tmp = MaybeUninit::<T>::uninitialized();
|
||||
copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
|
||||
tmp.into_inner()
|
||||
}
|
||||
|
||||
/// Reads the value from `src` without moving it. This leaves the
|
||||
@ -642,11 +639,11 @@ pub unsafe fn read<T>(src: *const T) -> T {
|
||||
#[inline]
|
||||
#[stable(feature = "ptr_unaligned", since = "1.17.0")]
|
||||
pub unsafe fn read_unaligned<T>(src: *const T) -> T {
|
||||
let mut tmp: T = mem::uninitialized();
|
||||
let mut tmp = MaybeUninit::<T>::uninitialized();
|
||||
copy_nonoverlapping(src as *const u8,
|
||||
&mut tmp as *mut T as *mut u8,
|
||||
tmp.as_mut_ptr() as *mut u8,
|
||||
mem::size_of::<T>());
|
||||
tmp
|
||||
tmp.into_inner()
|
||||
}
|
||||
|
||||
/// Overwrites a memory location with the given value without reading or
|
||||
|
@ -9,7 +9,7 @@
|
||||
// except according to those terms.
|
||||
|
||||
use cmp;
|
||||
use mem;
|
||||
use mem::{self, MaybeUninit};
|
||||
use ptr;
|
||||
|
||||
/// Rotation is much faster if it has access to a little bit of memory. This
|
||||
@ -26,12 +26,6 @@ union RawArray<T> {
|
||||
}
|
||||
|
||||
impl<T> RawArray<T> {
|
||||
fn new() -> Self {
|
||||
unsafe { mem::uninitialized() }
|
||||
}
|
||||
fn ptr(&self) -> *mut T {
|
||||
unsafe { &self.typed as *const T as *mut T }
|
||||
}
|
||||
fn cap() -> usize {
|
||||
if mem::size_of::<T>() == 0 {
|
||||
usize::max_value()
|
||||
@ -88,8 +82,8 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mid: *mut T, mut right: usize) {
|
||||
}
|
||||
}
|
||||
|
||||
let rawarray = RawArray::new();
|
||||
let buf = rawarray.ptr();
|
||||
let mut rawarray = MaybeUninit::<RawArray<T>>::uninitialized();
|
||||
let buf = &mut (*rawarray.as_mut_ptr()).typed as *mut [T; 2] as *mut T;
|
||||
|
||||
let dim = mid.sub(left).add(right);
|
||||
if left <= right {
|
||||
|
@ -17,7 +17,7 @@
|
||||
//! stable sorting implementation.
|
||||
|
||||
use cmp;
|
||||
use mem;
|
||||
use mem::{self, MaybeUninit};
|
||||
use ptr;
|
||||
|
||||
/// When dropped, copies from `src` into `dest`.
|
||||
@ -226,14 +226,14 @@ fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
|
||||
let mut block_l = BLOCK;
|
||||
let mut start_l = ptr::null_mut();
|
||||
let mut end_l = ptr::null_mut();
|
||||
let mut offsets_l: [u8; BLOCK] = unsafe { mem::uninitialized() };
|
||||
let mut offsets_l = MaybeUninit::<[u8; BLOCK]>::uninitialized();
|
||||
|
||||
// The current block on the right side (from `r.sub(block_r)` to `r`).
|
||||
let mut r = unsafe { l.add(v.len()) };
|
||||
let mut block_r = BLOCK;
|
||||
let mut start_r = ptr::null_mut();
|
||||
let mut end_r = ptr::null_mut();
|
||||
let mut offsets_r: [u8; BLOCK] = unsafe { mem::uninitialized() };
|
||||
let mut offsets_r = MaybeUninit::<[u8; BLOCK]>::uninitialized();
|
||||
|
||||
// FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
|
||||
// than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
|
||||
@ -272,8 +272,8 @@ fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
|
||||
|
||||
if start_l == end_l {
|
||||
// Trace `block_l` elements from the left side.
|
||||
start_l = offsets_l.as_mut_ptr();
|
||||
end_l = offsets_l.as_mut_ptr();
|
||||
start_l = offsets_l.as_mut_ptr() as *mut u8;
|
||||
end_l = offsets_l.as_mut_ptr() as *mut u8;
|
||||
let mut elem = l;
|
||||
|
||||
for i in 0..block_l {
|
||||
@ -288,8 +288,8 @@ fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
|
||||
|
||||
if start_r == end_r {
|
||||
// Trace `block_r` elements from the right side.
|
||||
start_r = offsets_r.as_mut_ptr();
|
||||
end_r = offsets_r.as_mut_ptr();
|
||||
start_r = offsets_r.as_mut_ptr() as *mut u8;
|
||||
end_r = offsets_r.as_mut_ptr() as *mut u8;
|
||||
let mut elem = r;
|
||||
|
||||
for i in 0..block_r {
|
||||
|
Loading…
Reference in New Issue
Block a user