mirror of
https://github.com/Lokathor/bytemuck.git
synced 2024-11-25 00:02:22 +00:00
commit
7603a0f01d
@ -16,3 +16,6 @@ extern_crate_alloc = []
|
||||
[badges]
|
||||
appveyor = { repository = "Lokathor/bytemuck" }
|
||||
travis-ci = { repository = "Lokathor/bytemuck" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
116
src/allocation.rs
Normal file
116
src/allocation.rs
Normal file
@ -0,0 +1,116 @@
|
||||
//! Stuff to boost things in the `alloc` crate.
|
||||
//!
|
||||
//! You must use the crate with the `extern_crate_alloc` feature for the content
|
||||
//! in this module to be compiled in!
|
||||
|
||||
use super::*;
|
||||
use alloc::{
|
||||
alloc::{alloc_zeroed, Layout},
|
||||
boxed::Box,
|
||||
vec::Vec,
|
||||
};
|
||||
|
||||
/// As [`try_cast_box`](try_cast_box), but unwraps for you.
|
||||
#[inline]
|
||||
pub fn cast_box<A: Pod, B: Pod>(input: Box<A>) -> Box<B> {
|
||||
try_cast_box(input).map_err(|(e, _v)| e).unwrap()
|
||||
}
|
||||
|
||||
/// Attempts to cast the content type of a [`Box`](alloc::boxed::Box).
|
||||
///
|
||||
/// On failure you get back an error along with the starting `Box`.
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * The start and end content type of the `Box` must have the exact same
|
||||
/// alignment.
|
||||
/// * The start and end size of the `Box` must have the exact same size.
|
||||
#[inline]
|
||||
pub fn try_cast_box<A: Pod, B: Pod>(input: Box<A>) -> Result<Box<B>, (PodCastError, Box<A>)> {
|
||||
if align_of::<A>() != align_of::<B>() {
|
||||
Err((PodCastError::AlignmentMismatch, input))
|
||||
} else if size_of::<A>() != size_of::<B>() {
|
||||
Err((PodCastError::SizeMismatch, input))
|
||||
} else {
|
||||
// Note(Lokathor): This is much simpler than with the Vec casting!
|
||||
let ptr: *mut B = Box::into_raw(input) as *mut B;
|
||||
Ok(unsafe { Box::from_raw(ptr) })
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a `Box<T>` with all of the contents being zeroed out.
|
||||
///
|
||||
/// This uses the global allocator to create a zeroed allocation and _then_
|
||||
/// turns it into a Box. In other words, it's 100% assured that the zeroed data
|
||||
/// won't be put temporarily on the stack. You can make a box of any size
|
||||
/// without fear of a stack overflow.
|
||||
///
|
||||
/// (As a _small_ detail, a zero sized type will box up `T::zeroed()` normally,
|
||||
/// but since it's zero sized you still can't overflow the stack with it.)
|
||||
#[inline]
|
||||
pub fn try_zeroed_box<T: Zeroable>() -> Result<Box<T>, ()> {
|
||||
if size_of::<T>() == 0 {
|
||||
return Ok(Box::new(T::zeroed()));
|
||||
}
|
||||
let layout = Layout::from_size_align(size_of::<T>(), align_of::<T>()).unwrap();
|
||||
let ptr = unsafe { alloc_zeroed(layout) };
|
||||
if ptr.is_null() {
|
||||
// we don't know what the error is because `alloc_zeroed` is a dumb API
|
||||
Err(())
|
||||
} else {
|
||||
Ok(unsafe { Box::<T>::from_raw(ptr as *mut T) })
|
||||
}
|
||||
}
|
||||
|
||||
/// As [`try_zeroed_box`], but unwraps for you.
|
||||
#[inline]
|
||||
pub fn zeroed_box<T: Zeroable>() -> Box<T> {
|
||||
try_zeroed_box().unwrap()
|
||||
}
|
||||
|
||||
/// As [`try_cast_vec`](try_cast_vec), but unwraps for you.
|
||||
#[inline]
|
||||
pub fn cast_vec<A: Pod, B: Pod>(input: Vec<A>) -> Vec<B> {
|
||||
try_cast_vec(input).map_err(|(e, _v)| e).unwrap()
|
||||
}
|
||||
|
||||
/// Attempts to cast the content type of a [`Vec`](alloc::vec::Vec).
|
||||
///
|
||||
/// On failure you get back an error along with the starting `Vec`.
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * The start and end content type of the `Vec` must have the exact same
|
||||
/// alignment.
|
||||
/// * The start and end size of the `Vec` must have the exact same size.
|
||||
/// * In the future this second restriction might be lessened by having the
|
||||
/// capacity and length get adjusted during transmutation, but for now it's
|
||||
/// absolute.
|
||||
#[inline]
|
||||
pub fn try_cast_vec<A: Pod, B: Pod>(input: Vec<A>) -> Result<Vec<B>, (PodCastError, Vec<A>)> {
|
||||
if align_of::<A>() != align_of::<B>() {
|
||||
Err((PodCastError::AlignmentMismatch, input))
|
||||
} else if size_of::<A>() != size_of::<B>() {
|
||||
// Note(Lokathor): Under some conditions it would be possible to cast
|
||||
// between Vec content types of the same alignment but different sizes by
|
||||
// changing the capacity and len values in the output Vec. However, we will
|
||||
// not attempt that for now.
|
||||
Err((PodCastError::SizeMismatch, input))
|
||||
} else {
|
||||
// Note(Lokathor): First we record the length and capacity, which don't have
|
||||
// any secret provenance metadata.
|
||||
let length: usize = input.len();
|
||||
let capacity: usize = input.capacity();
|
||||
// Note(Lokathor): Next we "pre-forget" the old Vec by wrapping with
|
||||
// ManuallyDrop, because if we used `core::mem::forget` after taking the
|
||||
// pointer then that would invalidate our pointer (I think? If not this
|
||||
// still doesn't hurt).
|
||||
let mut manual_drop_vec = ManuallyDrop::new(input);
|
||||
// Note(Lokathor): Finally, we carefully get the pointer we need, cast the
|
||||
// type, and then make a new Vec to return. This works all the way back to
|
||||
// 1.7, if you're on 1.37 or later you can use `Vec::as_mut_ptr` directly.
|
||||
let vec_ptr: *mut A = Vec::as_mut_slice(&mut *manual_drop_vec).as_mut_ptr();
|
||||
let ptr: *mut B = vec_ptr as *mut B;
|
||||
Ok(unsafe { Vec::from_raw_parts(ptr, length, capacity) })
|
||||
}
|
||||
}
|
236
src/lib.rs
Normal file
236
src/lib.rs
Normal file
@ -0,0 +1,236 @@
|
||||
#![no_std]
|
||||
|
||||
#[cfg(target_arch = "x86")]
|
||||
pub(crate) use core::arch::x86;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
pub(crate) use core::arch::x86_64;
|
||||
//
|
||||
pub(crate) use core::{marker::*, mem::*, num::*, ptr::*};
|
||||
|
||||
macro_rules! impl_unsafe_marker_for_array {
|
||||
( $marker:ident , $( $n:expr ),* ) => {
|
||||
$(unsafe impl<T> $marker for [T; $n] where T: $marker {})*
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "extern_crate_alloc")]
|
||||
extern crate alloc;
|
||||
#[cfg(feature = "extern_crate_alloc")]
|
||||
pub mod allocation;
|
||||
#[cfg(feature = "extern_crate_alloc")]
|
||||
pub use allocation::*;
|
||||
|
||||
mod zeroable;
|
||||
pub use zeroable::*;
|
||||
|
||||
mod pod;
|
||||
pub use pod::*;
|
||||
|
||||
/// Re-interprets `&T` as `&[u8]`.
|
||||
///
|
||||
/// Any ZST becomes an empty slice, and in that case the pointer value of that
|
||||
/// empty slice might not match the pointer value of the input reference.
|
||||
#[inline]
|
||||
pub fn bytes_of<T: Pod>(t: &T) -> &[u8] {
|
||||
try_cast_slice::<T, u8>(core::slice::from_ref(t)).unwrap_or(&[])
|
||||
}
|
||||
|
||||
/// Re-interprets `&mut T` as `&mut [u8]`.
|
||||
///
|
||||
/// Any ZST becomes an empty slice, and in that case the pointer value of that
|
||||
/// empty slice might not match the pointer value of the input reference.
|
||||
#[inline]
|
||||
pub fn bytes_of_mut<T: Pod>(t: &mut T) -> &mut [u8] {
|
||||
try_cast_slice_mut::<T, u8>(core::slice::from_mut(t)).unwrap_or(&mut [])
|
||||
}
|
||||
|
||||
/// The things that can go wrong when casting between [`Pod`] data forms.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum PodCastError {
|
||||
/// You tried to cast a slice to an element type with a higher alignment
|
||||
/// requirement but the slice wasn't aligned.
|
||||
TargetAlignmentGreaterAndInputNotAligned,
|
||||
/// If the element size changes then the output slice changes length
|
||||
/// accordingly. If the output slice wouldn't be a whole number of elements
|
||||
/// then the conversion fails.
|
||||
OutputSliceWouldHaveSlop,
|
||||
/// When casting a slice you can't convert between ZST elements and non-ZST
|
||||
/// elements. When casting an individual `T`, `&T`, or `&mut T` value the
|
||||
/// source size and destination size must be an exact match.
|
||||
SizeMismatch,
|
||||
/// For this type of cast the alignments must be exactly the same and they
|
||||
/// were not so now you're sad.
|
||||
AlignmentMismatch,
|
||||
}
|
||||
|
||||
/// Cast `T` into `U`
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_cast`] with an unwrap.
|
||||
#[inline]
|
||||
pub fn cast<A: Pod, B: Pod>(a: A) -> B {
|
||||
try_cast(a).unwrap()
|
||||
}
|
||||
|
||||
/// Cast `&mut T` into `&mut U`.
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_cast_mut`] with an unwrap.
|
||||
#[inline]
|
||||
pub fn cast_mut<A: Pod, B: Pod>(a: &mut A) -> &mut B {
|
||||
try_cast_mut(a).unwrap()
|
||||
}
|
||||
|
||||
/// Cast `&T` into `&U`.
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_cast_ref`] with an unwrap.
|
||||
#[inline]
|
||||
pub fn cast_ref<A: Pod, B: Pod>(a: &A) -> &B {
|
||||
try_cast_ref(a).unwrap()
|
||||
}
|
||||
|
||||
/// Cast `&[T]` into `&[U]`.
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_cast_slice`] with an unwrap.
|
||||
#[inline]
|
||||
pub fn cast_slice<A: Pod, B: Pod>(a: &[A]) -> &[B] {
|
||||
try_cast_slice(a).unwrap()
|
||||
}
|
||||
|
||||
/// Cast `&mut [T]` into `&mut [U]`.
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_cast_slice_mut`] with an unwrap.
|
||||
#[inline]
|
||||
pub fn cast_slice_mut<A: Pod, B: Pod>(a: &mut [A]) -> &mut [B] {
|
||||
try_cast_slice_mut(a).unwrap()
|
||||
}
|
||||
|
||||
/// As `align_to`, but safe because of the [`Pod`] bound.
|
||||
#[inline]
|
||||
pub fn pod_align_to<T: Pod, U: Pod>(vals: &[T]) -> (&[T], &[U], &[T]) {
|
||||
unsafe { vals.align_to::<U>() }
|
||||
}
|
||||
|
||||
/// As `align_to_mut`, but safe because of the [`Pod`] bound.
|
||||
#[inline]
|
||||
pub fn pod_align_to_mut<T: Pod, U: Pod>(vals: &mut [T]) -> (&mut [T], &mut [U], &mut [T]) {
|
||||
unsafe { vals.align_to_mut::<U>() }
|
||||
}
|
||||
|
||||
/// Try to cast `T` into `U`.
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * If the types don't have the same size this fails.
|
||||
#[inline]
|
||||
pub fn try_cast<A: Pod, B: Pod>(a: A) -> Result<B, PodCastError> {
|
||||
if size_of::<A>() == size_of::<B>() {
|
||||
let mut b = B::zeroed();
|
||||
// Note(Lokathor): We copy in terms of `u8` because that allows us to bypass
|
||||
// any potential alignment difficulties.
|
||||
let ap = &a as *const A as *const u8;
|
||||
let bp = &mut b as *mut B as *mut u8;
|
||||
unsafe { ap.copy_to_nonoverlapping(bp, size_of::<A>()) };
|
||||
Ok(b)
|
||||
} else {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to convert a `&T` into `&U`.
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * If the reference isn't aligned in the new type
|
||||
/// * If the source type and target type aren't the same size.
|
||||
#[inline]
|
||||
pub fn try_cast_ref<A: Pod, B: Pod>(a: &A) -> Result<&B, PodCastError> {
|
||||
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
|
||||
// after monomorphization.
|
||||
if align_of::<B>() > align_of::<A>() && (a as *const A as usize) % align_of::<B>() != 0 {
|
||||
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
|
||||
} else if size_of::<B>() == size_of::<A>() {
|
||||
Ok(unsafe { &*(a as *const A as *const B) })
|
||||
} else {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to convert a `&mut T` into `&mut U`.
|
||||
///
|
||||
/// As [`try_cast_ref`], but `mut`.
|
||||
#[inline]
|
||||
pub fn try_cast_mut<A: Pod, B: Pod>(a: &mut A) -> Result<&mut B, PodCastError> {
|
||||
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
|
||||
// after monomorphization.
|
||||
if align_of::<B>() > align_of::<A>() && (a as *mut A as usize) % align_of::<B>() != 0 {
|
||||
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
|
||||
} else if size_of::<B>() == size_of::<A>() {
|
||||
Ok(unsafe { &mut *(a as *mut A as *mut B) })
|
||||
} else {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to convert `&[T]` into `&[U]` (possibly with a change in length).
|
||||
///
|
||||
/// * `input.as_ptr() as usize == output.as_ptr() as usize`
|
||||
/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * If the target type has a greater alignment requirement and the input slice
|
||||
/// isn't aligned.
|
||||
/// * If the target element type is a different size from the current element
|
||||
/// type, and the output slice wouldn't be a whole number of elements when
|
||||
/// accounting for the size change (eg: three `u16` values is 1.5 `u32`
|
||||
/// values, so that's a failure).
|
||||
/// * Similarly, you can't convert between a
|
||||
/// [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
|
||||
/// and a non-ZST.
|
||||
#[inline]
|
||||
pub fn try_cast_slice<A: Pod, B: Pod>(a: &[A]) -> Result<&[B], PodCastError> {
|
||||
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
|
||||
// after monomorphization.
|
||||
if align_of::<B>() > align_of::<A>() && (a.as_ptr() as usize) % align_of::<B>() != 0 {
|
||||
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
|
||||
} else if size_of::<B>() == size_of::<A>() {
|
||||
Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, a.len()) })
|
||||
} else if size_of::<A>() == 0 || size_of::<B>() == 0 {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
} else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
|
||||
let new_len = core::mem::size_of_val(a) / size_of::<B>();
|
||||
Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) })
|
||||
} else {
|
||||
Err(PodCastError::OutputSliceWouldHaveSlop)
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to convert `&mut [T]` into `mut [U]` (possibly with a change in length).
|
||||
///
|
||||
/// As [`try_cast_slice`], but `mut`.
|
||||
#[inline]
|
||||
pub fn try_cast_slice_mut<A: Pod, B: Pod>(a: &mut [A]) -> Result<&mut [B], PodCastError> {
|
||||
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
|
||||
// after monomorphization.
|
||||
if align_of::<B>() > align_of::<A>() && (a.as_ptr() as usize) % align_of::<B>() != 0 {
|
||||
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
|
||||
} else if size_of::<B>() == size_of::<A>() {
|
||||
Ok(unsafe { core::slice::from_raw_parts_mut(a.as_ptr() as *mut B, a.len()) })
|
||||
} else if size_of::<A>() == 0 || size_of::<B>() == 0 {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
} else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
|
||||
let new_len = core::mem::size_of_val(a) / size_of::<B>();
|
||||
Ok(unsafe { core::slice::from_raw_parts_mut(a.as_ptr() as *mut B, new_len) })
|
||||
} else {
|
||||
Err(PodCastError::OutputSliceWouldHaveSlop)
|
||||
}
|
||||
}
|
90
src/pod.rs
Normal file
90
src/pod.rs
Normal file
@ -0,0 +1,90 @@
|
||||
use super::*;
|
||||
|
||||
/// Marker trait for "plain old data".
|
||||
///
|
||||
/// The point of this trait is that once something is marked "plain old data"
|
||||
/// you can really go to town with the bit fiddling and bit casting. Therefore,
|
||||
/// it's a relatively strong claim to make about a type. Do not add this to your
|
||||
/// type casually.
|
||||
///
|
||||
/// **Reminder:** The results of casting around bytes between data types are
|
||||
/// _endian dependant_. Little-endian machines are the most common, but
|
||||
/// big-endian machines do exist (and big-endian is also used for "network
|
||||
/// order" bytes).
|
||||
///
|
||||
/// ## Safety
|
||||
///
|
||||
/// * The type must be inhabited (eg: no
|
||||
/// [Infallible](core::convert::Infallible)).
|
||||
/// * The type must allow any bit pattern (eg: no `bool` or `char`).
|
||||
/// * The type must not contain any padding bytes (eg: no `(u8, u16)`).
|
||||
/// * A struct needs to have all fields be `Pod` and be `repr(C)`,
|
||||
/// `repr(transparent)`, or `repr(packed)`.
|
||||
pub unsafe trait Pod: Zeroable + Copy + 'static {}
|
||||
|
||||
unsafe impl Pod for () {}
|
||||
unsafe impl Pod for u8 {}
|
||||
unsafe impl Pod for i8 {}
|
||||
unsafe impl Pod for u16 {}
|
||||
unsafe impl Pod for i16 {}
|
||||
unsafe impl Pod for u32 {}
|
||||
unsafe impl Pod for i32 {}
|
||||
unsafe impl Pod for u64 {}
|
||||
unsafe impl Pod for i64 {}
|
||||
unsafe impl Pod for usize {}
|
||||
unsafe impl Pod for isize {}
|
||||
unsafe impl Pod for u128 {}
|
||||
unsafe impl Pod for i128 {}
|
||||
unsafe impl Pod for f32 {}
|
||||
unsafe impl Pod for f64 {}
|
||||
unsafe impl<T: Pod> Pod for Wrapping<T> {}
|
||||
|
||||
unsafe impl Pod for Option<NonZeroI8> {}
|
||||
unsafe impl Pod for Option<NonZeroI16> {}
|
||||
unsafe impl Pod for Option<NonZeroI32> {}
|
||||
unsafe impl Pod for Option<NonZeroI64> {}
|
||||
unsafe impl Pod for Option<NonZeroI128> {}
|
||||
unsafe impl Pod for Option<NonZeroIsize> {}
|
||||
unsafe impl Pod for Option<NonZeroU8> {}
|
||||
unsafe impl Pod for Option<NonZeroU16> {}
|
||||
unsafe impl Pod for Option<NonZeroU32> {}
|
||||
unsafe impl Pod for Option<NonZeroU64> {}
|
||||
unsafe impl Pod for Option<NonZeroU128> {}
|
||||
unsafe impl Pod for Option<NonZeroUsize> {}
|
||||
|
||||
unsafe impl<T: 'static> Pod for *mut T {}
|
||||
unsafe impl<T: 'static> Pod for *const T {}
|
||||
unsafe impl<T: 'static> Pod for Option<NonNull<T>> {}
|
||||
unsafe impl<T: Pod> Pod for PhantomData<T> {}
|
||||
unsafe impl<T: Pod> Pod for ManuallyDrop<T> {}
|
||||
|
||||
impl_unsafe_marker_for_array!(
|
||||
Pod, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
|
||||
25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256, 512, 1024
|
||||
);
|
||||
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m128i {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m128 {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m128d {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m256i {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m256 {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m256d {}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m128i {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m128 {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m128d {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m256i {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m256 {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m256d {}
|
118
src/zeroable.rs
Normal file
118
src/zeroable.rs
Normal file
@ -0,0 +1,118 @@
|
||||
use super::*;
|
||||
|
||||
/// Trait for types that can be safely created with [`zeroed`](core::mem::zeroed).
|
||||
///
|
||||
/// ## Safety
|
||||
///
|
||||
/// * Your type must be _inhabited_ (eg: no
|
||||
/// [Infallible](core::convert::Infallible)).
|
||||
/// * Your type must be allowed to be an "all zeroes" bit pattern (eg: no
|
||||
/// [`NonNull<T>`](core::ptr::NonNull)).
|
||||
pub unsafe trait Zeroable: Sized {
|
||||
/// Calls [`zeroed`](core::mem::zeroed).
|
||||
///
|
||||
/// This is a trait method so that you can write `MyType::zeroed()` in your
|
||||
/// code. It is a contract of this trait that if you implement it on your type
|
||||
/// you **must not** override this method.
|
||||
#[inline]
|
||||
fn zeroed() -> Self {
|
||||
unsafe { core::mem::zeroed() }
|
||||
}
|
||||
}
|
||||
unsafe impl Zeroable for () {}
|
||||
unsafe impl Zeroable for bool {}
|
||||
unsafe impl Zeroable for char {}
|
||||
unsafe impl Zeroable for u8 {}
|
||||
unsafe impl Zeroable for i8 {}
|
||||
unsafe impl Zeroable for u16 {}
|
||||
unsafe impl Zeroable for i16 {}
|
||||
unsafe impl Zeroable for u32 {}
|
||||
unsafe impl Zeroable for i32 {}
|
||||
unsafe impl Zeroable for u64 {}
|
||||
unsafe impl Zeroable for i64 {}
|
||||
unsafe impl Zeroable for usize {}
|
||||
unsafe impl Zeroable for isize {}
|
||||
unsafe impl Zeroable for u128 {}
|
||||
unsafe impl Zeroable for i128 {}
|
||||
unsafe impl Zeroable for f32 {}
|
||||
unsafe impl Zeroable for f64 {}
|
||||
unsafe impl<T: Zeroable> Zeroable for Wrapping<T> {}
|
||||
|
||||
unsafe impl Zeroable for Option<NonZeroI8> {}
|
||||
unsafe impl Zeroable for Option<NonZeroI16> {}
|
||||
unsafe impl Zeroable for Option<NonZeroI32> {}
|
||||
unsafe impl Zeroable for Option<NonZeroI64> {}
|
||||
unsafe impl Zeroable for Option<NonZeroI128> {}
|
||||
unsafe impl Zeroable for Option<NonZeroIsize> {}
|
||||
unsafe impl Zeroable for Option<NonZeroU8> {}
|
||||
unsafe impl Zeroable for Option<NonZeroU16> {}
|
||||
unsafe impl Zeroable for Option<NonZeroU32> {}
|
||||
unsafe impl Zeroable for Option<NonZeroU64> {}
|
||||
unsafe impl Zeroable for Option<NonZeroU128> {}
|
||||
unsafe impl Zeroable for Option<NonZeroUsize> {}
|
||||
|
||||
unsafe impl<T> Zeroable for *mut T {}
|
||||
unsafe impl<T> Zeroable for *const T {}
|
||||
unsafe impl<T> Zeroable for Option<NonNull<T>> {}
|
||||
unsafe impl<T: Zeroable> Zeroable for PhantomData<T> {}
|
||||
unsafe impl<T: Zeroable> Zeroable for ManuallyDrop<T> {}
|
||||
|
||||
unsafe impl<A: Zeroable> Zeroable for (A,) {}
|
||||
unsafe impl<A: Zeroable, B: Zeroable> Zeroable for (A, B) {}
|
||||
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable> Zeroable for (A, B, C) {}
|
||||
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable> Zeroable for (A, B, C, D) {}
|
||||
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable> Zeroable
|
||||
for (A, B, C, D, E)
|
||||
{
|
||||
}
|
||||
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable, F: Zeroable> Zeroable
|
||||
for (A, B, C, D, E, F)
|
||||
{
|
||||
}
|
||||
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable, F: Zeroable, G: Zeroable>
|
||||
Zeroable for (A, B, C, D, E, F, G)
|
||||
{
|
||||
}
|
||||
unsafe impl<
|
||||
A: Zeroable,
|
||||
B: Zeroable,
|
||||
C: Zeroable,
|
||||
D: Zeroable,
|
||||
E: Zeroable,
|
||||
F: Zeroable,
|
||||
G: Zeroable,
|
||||
H: Zeroable,
|
||||
> Zeroable for (A, B, C, D, E, F, G, H)
|
||||
{
|
||||
}
|
||||
|
||||
impl_unsafe_marker_for_array!(
|
||||
Zeroable, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
||||
24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256, 512, 1024
|
||||
);
|
||||
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m128i {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m128 {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m128d {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m256i {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m256 {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m256d {}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m128i {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m128 {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m128d {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m256i {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m256 {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m256d {}
|
Loading…
Reference in New Issue
Block a user