diff --git a/rustfmt.toml b/rustfmt.toml index 67d949d..50860b8 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -3,5 +3,6 @@ merge_imports = true reorder_imports = true use_try_shorthand = true tab_spaces = 2 -max_width = 100 +max_width = 80 color = "Never" +use_small_heuristics = "Max" diff --git a/src/allocation.rs b/src/allocation.rs index d07fc71..2c92dce 100644 --- a/src/allocation.rs +++ b/src/allocation.rs @@ -26,14 +26,16 @@ pub fn cast_box(input: Box) -> Box { /// alignment. /// * The start and end size of the `Box` must have the exact same size. #[inline] -pub fn try_cast_box(input: Box) -> Result, (PodCastError, Box)> { +pub fn try_cast_box( + input: Box, +) -> Result, (PodCastError, Box)> { if align_of::() != align_of::() { Err((PodCastError::AlignmentMismatch, input)) } else if size_of::() != size_of::() { Err((PodCastError::SizeMismatch, input)) } else { // Note(Lokathor): This is much simpler than with the Vec casting! - let ptr: *mut B = Box::into_raw(input).cast::(); + let ptr: *mut B = Box::into_raw(input) as *mut B; Ok(unsafe { Box::from_raw(ptr) }) } } @@ -53,13 +55,14 @@ pub fn try_zeroed_box() -> Result, ()> { if size_of::() == 0 { return Ok(Box::new(T::zeroed())); } - let layout = Layout::from_size_align(size_of::(), align_of::()).unwrap(); + let layout = + Layout::from_size_align(size_of::(), align_of::()).unwrap(); let ptr = unsafe { alloc_zeroed(layout) }; if ptr.is_null() { // we don't know what the error is because `alloc_zeroed` is a dumb API Err(()) } else { - Ok(unsafe { Box::::from_raw(ptr.cast::()) }) + Ok(unsafe { Box::::from_raw(ptr as *mut T) }) } } @@ -88,7 +91,9 @@ pub fn cast_vec(input: Vec) -> Vec { /// capacity and length get adjusted during transmutation, but for now it's /// absolute. #[inline] -pub fn try_cast_vec(input: Vec) -> Result, (PodCastError, Vec)> { +pub fn try_cast_vec( + input: Vec, +) -> Result, (PodCastError, Vec)> { if align_of::() != align_of::() { Err((PodCastError::AlignmentMismatch, input)) } else if size_of::() != size_of::() { @@ -108,7 +113,7 @@ pub fn try_cast_vec(input: Vec) -> Result, (PodCastErr // "into raw parts" method, which we can switch this too eventually. let mut manual_drop_vec = ManuallyDrop::new(input); let vec_ptr: *mut A = manual_drop_vec.as_mut_ptr(); - let ptr: *mut B = vec_ptr.cast::(); + let ptr: *mut B = vec_ptr as *mut B; Ok(unsafe { Vec::from_raw_parts(ptr, length, capacity) }) } } diff --git a/src/lib.rs b/src/lib.rs index a6f7b5b..818af35 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -121,7 +121,9 @@ pub fn pod_align_to(vals: &[T]) -> (&[T], &[U], &[T]) { /// As `align_to_mut`, but safe because of the [`Pod`] bound. #[inline] -pub fn pod_align_to_mut(vals: &mut [T]) -> (&mut [T], &mut [U], &mut [T]) { +pub fn pod_align_to_mut( + vals: &mut [T], +) -> (&mut [T], &mut [U], &mut [T]) { unsafe { vals.align_to_mut::() } } @@ -136,8 +138,8 @@ pub fn try_cast(a: A) -> Result { let mut b = B::zeroed(); // Note(Lokathor): We copy in terms of `u8` because that allows us to bypass // any potential alignment difficulties. - let ap = (&a as *const A).cast::(); - let bp = (&mut b as *mut B).cast::(); + let ap = &a as *const A as *const u8; + let bp = &mut b as *mut B as *mut u8; unsafe { ap.copy_to_nonoverlapping(bp, size_of::()) }; Ok(b) } else { @@ -155,10 +157,12 @@ pub fn try_cast(a: A) -> Result { pub fn try_cast_ref(a: &A) -> Result<&B, PodCastError> { // Note(Lokathor): everything with `align_of` and `size_of` will optimize away // after monomorphization. - if align_of::() > align_of::() && (a as *const A as usize) % align_of::() != 0 { + if align_of::() > align_of::() + && (a as *const A as usize) % align_of::() != 0 + { Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned) } else if size_of::() == size_of::() { - Ok(unsafe { &*(a as *const A).cast::() }) + Ok(unsafe { &*(a as *const A as *const B) }) } else { Err(PodCastError::SizeMismatch) } @@ -171,10 +175,12 @@ pub fn try_cast_ref(a: &A) -> Result<&B, PodCastError> { pub fn try_cast_mut(a: &mut A) -> Result<&mut B, PodCastError> { // Note(Lokathor): everything with `align_of` and `size_of` will optimize away // after monomorphization. - if align_of::() > align_of::() && (a as *mut A as usize) % align_of::() != 0 { + if align_of::() > align_of::() + && (a as *mut A as usize) % align_of::() != 0 + { Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned) } else if size_of::() == size_of::() { - Ok(unsafe { &mut *(a as *mut A).cast::() }) + Ok(unsafe { &mut *(a as *mut A as *mut B) }) } else { Err(PodCastError::SizeMismatch) } @@ -200,15 +206,17 @@ pub fn try_cast_mut(a: &mut A) -> Result<&mut B, PodCastError> { pub fn try_cast_slice(a: &[A]) -> Result<&[B], PodCastError> { // Note(Lokathor): everything with `align_of` and `size_of` will optimize away // after monomorphization. - if align_of::() > align_of::() && (a.as_ptr() as usize) % align_of::() != 0 { + if align_of::() > align_of::() + && (a.as_ptr() as usize) % align_of::() != 0 + { Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned) } else if size_of::() == size_of::() { - Ok(unsafe { core::slice::from_raw_parts(a.as_ptr().cast::(), a.len()) }) + Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, a.len()) }) } else if size_of::() == 0 || size_of::() == 0 { Err(PodCastError::SizeMismatch) } else if core::mem::size_of_val(a) % size_of::() == 0 { let new_len = core::mem::size_of_val(a) / size_of::(); - Ok(unsafe { core::slice::from_raw_parts(a.as_ptr().cast::(), new_len) }) + Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) }) } else { Err(PodCastError::OutputSliceWouldHaveSlop) } @@ -218,18 +226,26 @@ pub fn try_cast_slice(a: &[A]) -> Result<&[B], PodCastError> { /// /// As [`try_cast_slice`], but `&mut`. #[inline] -pub fn try_cast_slice_mut(a: &mut [A]) -> Result<&mut [B], PodCastError> { +pub fn try_cast_slice_mut( + a: &mut [A], +) -> Result<&mut [B], PodCastError> { // Note(Lokathor): everything with `align_of` and `size_of` will optimize away // after monomorphization. - if align_of::() > align_of::() && (a.as_mut_ptr() as usize) % align_of::() != 0 { + if align_of::() > align_of::() + && (a.as_mut_ptr() as usize) % align_of::() != 0 + { Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned) } else if size_of::() == size_of::() { - Ok(unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr().cast::(), a.len()) }) + Ok(unsafe { + core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, a.len()) + }) } else if size_of::() == 0 || size_of::() == 0 { Err(PodCastError::SizeMismatch) } else if core::mem::size_of_val(a) % size_of::() == 0 { let new_len = core::mem::size_of_val(a) / size_of::(); - Ok(unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr().cast::(), new_len) }) + Ok(unsafe { + core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len) + }) } else { Err(PodCastError::OutputSliceWouldHaveSlop) } diff --git a/src/pod.rs b/src/pod.rs index 36848c5..42c31c0 100644 --- a/src/pod.rs +++ b/src/pod.rs @@ -59,8 +59,9 @@ unsafe impl Pod for PhantomData {} unsafe impl Pod for ManuallyDrop {} impl_unsafe_marker_for_array!( - Pod, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256, 512, 1024, 2048, 4096 + Pod, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256, + 512, 1024, 2048, 4096 ); #[cfg(target_arch = "x86")] diff --git a/src/zeroable.rs b/src/zeroable.rs index 49948a4..e07e770 100644 --- a/src/zeroable.rs +++ b/src/zeroable.rs @@ -61,17 +61,33 @@ unsafe impl Zeroable for MaybeUninit {} unsafe impl Zeroable for (A,) {} unsafe impl Zeroable for (A, B) {} unsafe impl Zeroable for (A, B, C) {} -unsafe impl Zeroable for (A, B, C, D) {} -unsafe impl Zeroable - for (A, B, C, D, E) +unsafe impl Zeroable + for (A, B, C, D) { } -unsafe impl Zeroable - for (A, B, C, D, E, F) +unsafe impl + Zeroable for (A, B, C, D, E) { } -unsafe impl - Zeroable for (A, B, C, D, E, F, G) +unsafe impl< + A: Zeroable, + B: Zeroable, + C: Zeroable, + D: Zeroable, + E: Zeroable, + F: Zeroable, + > Zeroable for (A, B, C, D, E, F) +{ +} +unsafe impl< + A: Zeroable, + B: Zeroable, + C: Zeroable, + D: Zeroable, + E: Zeroable, + F: Zeroable, + G: Zeroable, + > Zeroable for (A, B, C, D, E, F, G) { } unsafe impl< @@ -88,8 +104,9 @@ unsafe impl< } impl_unsafe_marker_for_array!( - Zeroable, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256, 512, 1024, 2048, 4096 + Zeroable, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256, + 512, 1024, 2048, 4096 ); #[cfg(target_arch = "x86")]