Remove .cast method usage and rustfmt

This commit is contained in:
Lokathor 2019-11-25 18:46:23 -07:00
parent 02cd7c1ba5
commit af499f4c7c
5 changed files with 72 additions and 32 deletions

View File

@ -3,5 +3,6 @@ merge_imports = true
reorder_imports = true reorder_imports = true
use_try_shorthand = true use_try_shorthand = true
tab_spaces = 2 tab_spaces = 2
max_width = 100 max_width = 80
color = "Never" color = "Never"
use_small_heuristics = "Max"

View File

@ -26,14 +26,16 @@ pub fn cast_box<A: Pod, B: Pod>(input: Box<A>) -> Box<B> {
/// alignment. /// alignment.
/// * The start and end size of the `Box` must have the exact same size. /// * The start and end size of the `Box` must have the exact same size.
#[inline] #[inline]
pub fn try_cast_box<A: Pod, B: Pod>(input: Box<A>) -> Result<Box<B>, (PodCastError, Box<A>)> { pub fn try_cast_box<A: Pod, B: Pod>(
input: Box<A>,
) -> Result<Box<B>, (PodCastError, Box<A>)> {
if align_of::<A>() != align_of::<B>() { if align_of::<A>() != align_of::<B>() {
Err((PodCastError::AlignmentMismatch, input)) Err((PodCastError::AlignmentMismatch, input))
} else if size_of::<A>() != size_of::<B>() { } else if size_of::<A>() != size_of::<B>() {
Err((PodCastError::SizeMismatch, input)) Err((PodCastError::SizeMismatch, input))
} else { } else {
// Note(Lokathor): This is much simpler than with the Vec casting! // Note(Lokathor): This is much simpler than with the Vec casting!
let ptr: *mut B = Box::into_raw(input).cast::<B>(); let ptr: *mut B = Box::into_raw(input) as *mut B;
Ok(unsafe { Box::from_raw(ptr) }) Ok(unsafe { Box::from_raw(ptr) })
} }
} }
@ -53,13 +55,14 @@ pub fn try_zeroed_box<T: Zeroable>() -> Result<Box<T>, ()> {
if size_of::<T>() == 0 { if size_of::<T>() == 0 {
return Ok(Box::new(T::zeroed())); return Ok(Box::new(T::zeroed()));
} }
let layout = Layout::from_size_align(size_of::<T>(), align_of::<T>()).unwrap(); let layout =
Layout::from_size_align(size_of::<T>(), align_of::<T>()).unwrap();
let ptr = unsafe { alloc_zeroed(layout) }; let ptr = unsafe { alloc_zeroed(layout) };
if ptr.is_null() { if ptr.is_null() {
// we don't know what the error is because `alloc_zeroed` is a dumb API // we don't know what the error is because `alloc_zeroed` is a dumb API
Err(()) Err(())
} else { } else {
Ok(unsafe { Box::<T>::from_raw(ptr.cast::<T>()) }) Ok(unsafe { Box::<T>::from_raw(ptr as *mut T) })
} }
} }
@ -88,7 +91,9 @@ pub fn cast_vec<A: Pod, B: Pod>(input: Vec<A>) -> Vec<B> {
/// capacity and length get adjusted during transmutation, but for now it's /// capacity and length get adjusted during transmutation, but for now it's
/// absolute. /// absolute.
#[inline] #[inline]
pub fn try_cast_vec<A: Pod, B: Pod>(input: Vec<A>) -> Result<Vec<B>, (PodCastError, Vec<A>)> { pub fn try_cast_vec<A: Pod, B: Pod>(
input: Vec<A>,
) -> Result<Vec<B>, (PodCastError, Vec<A>)> {
if align_of::<A>() != align_of::<B>() { if align_of::<A>() != align_of::<B>() {
Err((PodCastError::AlignmentMismatch, input)) Err((PodCastError::AlignmentMismatch, input))
} else if size_of::<A>() != size_of::<B>() { } else if size_of::<A>() != size_of::<B>() {
@ -108,7 +113,7 @@ pub fn try_cast_vec<A: Pod, B: Pod>(input: Vec<A>) -> Result<Vec<B>, (PodCastErr
// "into raw parts" method, which we can switch this too eventually. // "into raw parts" method, which we can switch this too eventually.
let mut manual_drop_vec = ManuallyDrop::new(input); let mut manual_drop_vec = ManuallyDrop::new(input);
let vec_ptr: *mut A = manual_drop_vec.as_mut_ptr(); let vec_ptr: *mut A = manual_drop_vec.as_mut_ptr();
let ptr: *mut B = vec_ptr.cast::<B>(); let ptr: *mut B = vec_ptr as *mut B;
Ok(unsafe { Vec::from_raw_parts(ptr, length, capacity) }) Ok(unsafe { Vec::from_raw_parts(ptr, length, capacity) })
} }
} }

View File

@ -121,7 +121,9 @@ pub fn pod_align_to<T: Pod, U: Pod>(vals: &[T]) -> (&[T], &[U], &[T]) {
/// As `align_to_mut`, but safe because of the [`Pod`] bound. /// As `align_to_mut`, but safe because of the [`Pod`] bound.
#[inline] #[inline]
pub fn pod_align_to_mut<T: Pod, U: Pod>(vals: &mut [T]) -> (&mut [T], &mut [U], &mut [T]) { pub fn pod_align_to_mut<T: Pod, U: Pod>(
vals: &mut [T],
) -> (&mut [T], &mut [U], &mut [T]) {
unsafe { vals.align_to_mut::<U>() } unsafe { vals.align_to_mut::<U>() }
} }
@ -136,8 +138,8 @@ pub fn try_cast<A: Pod, B: Pod>(a: A) -> Result<B, PodCastError> {
let mut b = B::zeroed(); let mut b = B::zeroed();
// Note(Lokathor): We copy in terms of `u8` because that allows us to bypass // Note(Lokathor): We copy in terms of `u8` because that allows us to bypass
// any potential alignment difficulties. // any potential alignment difficulties.
let ap = (&a as *const A).cast::<u8>(); let ap = &a as *const A as *const u8;
let bp = (&mut b as *mut B).cast::<u8>(); let bp = &mut b as *mut B as *mut u8;
unsafe { ap.copy_to_nonoverlapping(bp, size_of::<A>()) }; unsafe { ap.copy_to_nonoverlapping(bp, size_of::<A>()) };
Ok(b) Ok(b)
} else { } else {
@ -155,10 +157,12 @@ pub fn try_cast<A: Pod, B: Pod>(a: A) -> Result<B, PodCastError> {
pub fn try_cast_ref<A: Pod, B: Pod>(a: &A) -> Result<&B, PodCastError> { pub fn try_cast_ref<A: Pod, B: Pod>(a: &A) -> Result<&B, PodCastError> {
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization. // after monomorphization.
if align_of::<B>() > align_of::<A>() && (a as *const A as usize) % align_of::<B>() != 0 { if align_of::<B>() > align_of::<A>()
&& (a as *const A as usize) % align_of::<B>() != 0
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned) Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() { } else if size_of::<B>() == size_of::<A>() {
Ok(unsafe { &*(a as *const A).cast::<B>() }) Ok(unsafe { &*(a as *const A as *const B) })
} else { } else {
Err(PodCastError::SizeMismatch) Err(PodCastError::SizeMismatch)
} }
@ -171,10 +175,12 @@ pub fn try_cast_ref<A: Pod, B: Pod>(a: &A) -> Result<&B, PodCastError> {
pub fn try_cast_mut<A: Pod, B: Pod>(a: &mut A) -> Result<&mut B, PodCastError> { pub fn try_cast_mut<A: Pod, B: Pod>(a: &mut A) -> Result<&mut B, PodCastError> {
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization. // after monomorphization.
if align_of::<B>() > align_of::<A>() && (a as *mut A as usize) % align_of::<B>() != 0 { if align_of::<B>() > align_of::<A>()
&& (a as *mut A as usize) % align_of::<B>() != 0
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned) Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() { } else if size_of::<B>() == size_of::<A>() {
Ok(unsafe { &mut *(a as *mut A).cast::<B>() }) Ok(unsafe { &mut *(a as *mut A as *mut B) })
} else { } else {
Err(PodCastError::SizeMismatch) Err(PodCastError::SizeMismatch)
} }
@ -200,15 +206,17 @@ pub fn try_cast_mut<A: Pod, B: Pod>(a: &mut A) -> Result<&mut B, PodCastError> {
pub fn try_cast_slice<A: Pod, B: Pod>(a: &[A]) -> Result<&[B], PodCastError> { pub fn try_cast_slice<A: Pod, B: Pod>(a: &[A]) -> Result<&[B], PodCastError> {
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization. // after monomorphization.
if align_of::<B>() > align_of::<A>() && (a.as_ptr() as usize) % align_of::<B>() != 0 { if align_of::<B>() > align_of::<A>()
&& (a.as_ptr() as usize) % align_of::<B>() != 0
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned) Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() { } else if size_of::<B>() == size_of::<A>() {
Ok(unsafe { core::slice::from_raw_parts(a.as_ptr().cast::<B>(), a.len()) }) Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, a.len()) })
} else if size_of::<A>() == 0 || size_of::<B>() == 0 { } else if size_of::<A>() == 0 || size_of::<B>() == 0 {
Err(PodCastError::SizeMismatch) Err(PodCastError::SizeMismatch)
} else if core::mem::size_of_val(a) % size_of::<B>() == 0 { } else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
let new_len = core::mem::size_of_val(a) / size_of::<B>(); let new_len = core::mem::size_of_val(a) / size_of::<B>();
Ok(unsafe { core::slice::from_raw_parts(a.as_ptr().cast::<B>(), new_len) }) Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) })
} else { } else {
Err(PodCastError::OutputSliceWouldHaveSlop) Err(PodCastError::OutputSliceWouldHaveSlop)
} }
@ -218,18 +226,26 @@ pub fn try_cast_slice<A: Pod, B: Pod>(a: &[A]) -> Result<&[B], PodCastError> {
/// ///
/// As [`try_cast_slice`], but `&mut`. /// As [`try_cast_slice`], but `&mut`.
#[inline] #[inline]
pub fn try_cast_slice_mut<A: Pod, B: Pod>(a: &mut [A]) -> Result<&mut [B], PodCastError> { pub fn try_cast_slice_mut<A: Pod, B: Pod>(
a: &mut [A],
) -> Result<&mut [B], PodCastError> {
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization. // after monomorphization.
if align_of::<B>() > align_of::<A>() && (a.as_mut_ptr() as usize) % align_of::<B>() != 0 { if align_of::<B>() > align_of::<A>()
&& (a.as_mut_ptr() as usize) % align_of::<B>() != 0
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned) Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() { } else if size_of::<B>() == size_of::<A>() {
Ok(unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr().cast::<B>(), a.len()) }) Ok(unsafe {
core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, a.len())
})
} else if size_of::<A>() == 0 || size_of::<B>() == 0 { } else if size_of::<A>() == 0 || size_of::<B>() == 0 {
Err(PodCastError::SizeMismatch) Err(PodCastError::SizeMismatch)
} else if core::mem::size_of_val(a) % size_of::<B>() == 0 { } else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
let new_len = core::mem::size_of_val(a) / size_of::<B>(); let new_len = core::mem::size_of_val(a) / size_of::<B>();
Ok(unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr().cast::<B>(), new_len) }) Ok(unsafe {
core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len)
})
} else { } else {
Err(PodCastError::OutputSliceWouldHaveSlop) Err(PodCastError::OutputSliceWouldHaveSlop)
} }

View File

@ -59,8 +59,9 @@ unsafe impl<T: Pod> Pod for PhantomData<T> {}
unsafe impl<T: Pod> Pod for ManuallyDrop<T> {} unsafe impl<T: Pod> Pod for ManuallyDrop<T> {}
impl_unsafe_marker_for_array!( impl_unsafe_marker_for_array!(
Pod, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, Pod, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256, 512, 1024, 2048, 4096 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
512, 1024, 2048, 4096
); );
#[cfg(target_arch = "x86")] #[cfg(target_arch = "x86")]

View File

@ -61,17 +61,33 @@ unsafe impl<T> Zeroable for MaybeUninit<T> {}
unsafe impl<A: Zeroable> Zeroable for (A,) {} unsafe impl<A: Zeroable> Zeroable for (A,) {}
unsafe impl<A: Zeroable, B: Zeroable> Zeroable for (A, B) {} unsafe impl<A: Zeroable, B: Zeroable> Zeroable for (A, B) {}
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable> Zeroable for (A, B, C) {} unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable> Zeroable for (A, B, C) {}
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable> Zeroable for (A, B, C, D) {} unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable> Zeroable
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable> Zeroable for (A, B, C, D)
for (A, B, C, D, E)
{ {
} }
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable, F: Zeroable> Zeroable unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable>
for (A, B, C, D, E, F) Zeroable for (A, B, C, D, E)
{ {
} }
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable, F: Zeroable, G: Zeroable> unsafe impl<
Zeroable for (A, B, C, D, E, F, G) A: Zeroable,
B: Zeroable,
C: Zeroable,
D: Zeroable,
E: Zeroable,
F: Zeroable,
> Zeroable for (A, B, C, D, E, F)
{
}
unsafe impl<
A: Zeroable,
B: Zeroable,
C: Zeroable,
D: Zeroable,
E: Zeroable,
F: Zeroable,
G: Zeroable,
> Zeroable for (A, B, C, D, E, F, G)
{ {
} }
unsafe impl< unsafe impl<
@ -88,8 +104,9 @@ unsafe impl<
} }
impl_unsafe_marker_for_array!( impl_unsafe_marker_for_array!(
Zeroable, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, Zeroable, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256, 512, 1024, 2048, 4096 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
512, 1024, 2048, 4096
); );
#[cfg(target_arch = "x86")] #[cfg(target_arch = "x86")]