Merge branch 'main' of https://github.com/Lokathor/bytemuck into main

This commit is contained in:
Lokathor 2023-09-05 13:26:13 -06:00
commit 763d69eb15
10 changed files with 414 additions and 27 deletions

View File

@ -46,8 +46,8 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
# note: the mips targets are here so that we have big-endian coverage (both 32bit and 64bit)
target: [i686-unknown-linux-gnu, mips-unknown-linux-gnu, mips64-unknown-linux-gnuabi64]
# we once had mips runners for Big-endian coverage but those got demoted to tier 3.
target: [i686-unknown-linux-gnu]
steps:
- uses: hecrj/setup-rust-action@v1
with:

View File

@ -1,7 +1,7 @@
[package]
name = "bytemuck"
description = "A crate for mucking around with piles of bytes."
version = "1.13.1"
version = "1.13.2"
authors = ["Lokathor <zefria@gmail.com>"]
repository = "https://github.com/Lokathor/bytemuck"
readme = "README.md"
@ -20,6 +20,7 @@ zeroable_maybe_uninit = []
zeroable_atomics = []
min_const_generics = []
wasm_simd = [] # Until >= 1.54.0 is MSRV this is an off-by-default feature.
must_cast = [] # Until >= 1.57.0 is MSRV this is an off-by-default feature.
aarch64_simd = [] # Until >= 1.59.0 is MSRV this is an off-by-default feature.
# Do not use if you can avoid it, because this is unsound.
@ -42,6 +43,7 @@ features = [
"zeroable_atomics",
"min_const_generics",
"wasm_simd",
"must_cast",
]
[package.metadata.playground]
@ -54,4 +56,5 @@ features = [
"zeroable_atomics",
"min_const_generics",
"wasm_simd",
"must_cast",
]

View File

@ -18,7 +18,7 @@ proc-macro = true
# syn seems to have broken backwards compatibility in this version https://github.com/dtolnay/syn/issues/1194
syn = "2.0.1"
quote = "1"
proc-macro2 = "1"
proc-macro2 = "1.0.60"
[dev-dependencies]
bytemuck = { path = "../", features = ["derive"] }

View File

@ -77,7 +77,6 @@ pub fn derive_anybitpattern(
///
/// ```rust
/// # use bytemuck_derive::{Zeroable};
///
/// #[derive(Copy, Clone, Zeroable)]
/// #[repr(C)]
/// struct Test {
@ -85,7 +84,65 @@ pub fn derive_anybitpattern(
/// b: u16,
/// }
/// ```
#[proc_macro_derive(Zeroable)]
///
/// # Custom bounds
///
/// Custom bounds for the derived `Zeroable` impl can be given using the
/// `#[zeroable(bound = "")]` helper attribute.
///
/// Using this attribute additionally opts-in to "perfect derive" semantics,
/// where instead of adding bounds for each generic type parameter, bounds are
/// added for each field's type.
///
/// ## Examples
///
/// ```rust
/// # use bytemuck::Zeroable;
/// # use std::marker::PhantomData;
/// #[derive(Clone, Zeroable)]
/// #[zeroable(bound = "")]
/// struct AlwaysZeroable<T> {
/// a: PhantomData<T>,
/// }
///
/// AlwaysZeroable::<std::num::NonZeroU8>::zeroed();
/// ```
///
/// ```rust,compile_fail
/// # use bytemuck::Zeroable;
/// # use std::marker::PhantomData;
/// #[derive(Clone, Zeroable)]
/// #[zeroable(bound = "T: Copy")]
/// struct ZeroableWhenTIsCopy<T> {
/// a: PhantomData<T>,
/// }
///
/// ZeroableWhenTIsCopy::<String>::zeroed();
/// ```
///
/// The restriction that all fields must be Zeroable is still applied, and this
/// is enforced using the mentioned "perfect derive" semantics.
///
/// ```rust
/// # use bytemuck::Zeroable;
/// #[derive(Clone, Zeroable)]
/// #[zeroable(bound = "")]
/// struct ZeroableWhenTIsZeroable<T> {
/// a: T,
/// }
/// ZeroableWhenTIsZeroable::<u32>::zeroed();
/// ```
///
/// ```rust,compile_fail
/// # use bytemuck::Zeroable;
/// # #[derive(Clone, Zeroable)]
/// # #[zeroable(bound = "")]
/// # struct ZeroableWhenTIsZeroable<T> {
/// # a: T,
/// # }
/// ZeroableWhenTIsZeroable::<String>::zeroed();
/// ```
#[proc_macro_derive(Zeroable, attributes(zeroable))]
pub fn derive_zeroable(
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
@ -317,12 +374,127 @@ fn derive_marker_trait<Trait: Derivable>(input: DeriveInput) -> TokenStream {
.unwrap_or_else(|err| err.into_compile_error())
}
/// Find `#[name(key = "value")]` helper attributes on the struct, and return
/// their `"value"`s parsed with `parser`.
///
/// Returns an error if any attributes with the given `name` do not match the
/// expected format. Returns `Ok([])` if no attributes with `name` are found.
fn find_and_parse_helper_attributes<P: syn::parse::Parser + Copy>(
attributes: &[syn::Attribute], name: &str, key: &str, parser: P,
example_value: &str, invalid_value_msg: &str,
) -> Result<Vec<P::Output>> {
let invalid_format_msg =
format!("{name} attribute must be `{name}({key} = \"{example_value}\")`",);
let values_to_check = attributes.iter().filter_map(|attr| match &attr.meta {
// If a `Path` matches our `name`, return an error, else ignore it.
// e.g. `#[zeroable]`
syn::Meta::Path(path) => path
.is_ident(name)
.then(|| Err(syn::Error::new_spanned(path, &invalid_format_msg))),
// If a `NameValue` matches our `name`, return an error, else ignore it.
// e.g. `#[zeroable = "hello"]`
syn::Meta::NameValue(namevalue) => {
namevalue.path.is_ident(name).then(|| {
Err(syn::Error::new_spanned(&namevalue.path, &invalid_format_msg))
})
}
// If a `List` matches our `name`, match its contents to our format, else
// ignore it. If its contents match our format, return the value, else
// return an error.
syn::Meta::List(list) => list.path.is_ident(name).then(|| {
let namevalue: syn::MetaNameValue = syn::parse2(list.tokens.clone())
.map_err(|_| {
syn::Error::new_spanned(&list.tokens, &invalid_format_msg)
})?;
if namevalue.path.is_ident(key) {
match namevalue.value {
syn::Expr::Lit(syn::ExprLit {
lit: syn::Lit::Str(strlit), ..
}) => Ok(strlit),
_ => {
Err(syn::Error::new_spanned(&namevalue.path, &invalid_format_msg))
}
}
} else {
Err(syn::Error::new_spanned(&namevalue.path, &invalid_format_msg))
}
}),
});
// Parse each value found with the given parser, and return them if no errors
// occur.
values_to_check
.map(|lit| {
let lit = lit?;
lit.parse_with(parser).map_err(|err| {
syn::Error::new_spanned(&lit, format!("{invalid_value_msg}: {err}"))
})
})
.collect()
}
fn derive_marker_trait_inner<Trait: Derivable>(
mut input: DeriveInput,
) -> Result<TokenStream> {
// Enforce Pod on all generic fields.
let trait_ = Trait::ident(&input)?;
add_trait_marker(&mut input.generics, &trait_);
// If this trait allows explicit bounds, and any explicit bounds were given,
// then use those explicit bounds. Else, apply the default bounds (bound
// each generic type on this trait).
if let Some(name) = Trait::explicit_bounds_attribute_name() {
// See if any explicit bounds were given in attributes.
let explicit_bounds = find_and_parse_helper_attributes(
&input.attrs,
name,
"bound",
<syn::punctuated::Punctuated<syn::WherePredicate, syn::Token![,]>>::parse_terminated,
"Type: Trait",
"invalid where predicate",
)?;
if !explicit_bounds.is_empty() {
// Explicit bounds were given.
// Enforce explicitly given bounds, and emit "perfect derive" (i.e. add
// bounds for each field's type).
let explicit_bounds = explicit_bounds
.into_iter()
.flatten()
.collect::<Vec<syn::WherePredicate>>();
let predicates = &mut input.generics.make_where_clause().predicates;
predicates.extend(explicit_bounds);
let fields = match &input.data {
syn::Data::Struct(syn::DataStruct { fields, .. }) => fields.clone(),
syn::Data::Union(_) => {
return Err(syn::Error::new_spanned(
trait_,
&"perfect derive is not supported for unions",
));
}
syn::Data::Enum(_) => {
return Err(syn::Error::new_spanned(
trait_,
&"perfect derive is not supported for enums",
));
}
};
for field in fields {
let ty = field.ty;
predicates.push(syn::parse_quote!(
#ty: #trait_
));
}
} else {
// No explicit bounds were given.
// Enforce trait bound on all type generics.
add_trait_marker(&mut input.generics, &trait_);
}
} else {
// This trait does not allow explicit bounds.
// Enforce trait bound on all type generics.
add_trait_marker(&mut input.generics, &trait_);
}
let name = &input.ident;
@ -339,11 +511,8 @@ fn derive_marker_trait_inner<Trait: Derivable>(
quote!()
};
let where_clause = if Trait::requires_where_clause() {
where_clause
} else {
None
};
let where_clause =
if Trait::requires_where_clause() { where_clause } else { None };
Ok(quote! {
#asserts
@ -364,9 +533,12 @@ fn add_trait_marker(generics: &mut syn::Generics, trait_name: &syn::Path) {
let type_params = generics
.type_params()
.map(|param| &param.ident)
.map(|param| syn::parse_quote!(
#param: #trait_name
)).collect::<Vec<syn::WherePredicate>>();
.map(|param| {
syn::parse_quote!(
#param: #trait_name
)
})
.collect::<Vec<syn::WherePredicate>>();
generics.make_where_clause().predicates.extend(type_params);
}

View File

@ -35,6 +35,9 @@ pub trait Derivable {
fn requires_where_clause() -> bool {
true
}
fn explicit_bounds_attribute_name() -> Option<&'static str> {
None
}
}
pub struct Pod;
@ -126,6 +129,10 @@ impl Derivable for Zeroable {
Data::Enum(_) => bail!("Deriving Zeroable is not supported for enums"),
}
}
fn explicit_bounds_attribute_name() -> Option<&'static str> {
Some("zeroable")
}
}
pub struct NoUninit;
@ -532,12 +539,13 @@ fn generate_assert_no_padding(input: &DeriveInput) -> Result<TokenStream> {
let size_rest =
quote_spanned!(span => #( + ::core::mem::size_of::<#field_types>() )*);
quote_spanned!(span => #size_first#size_rest)
quote_spanned!(span => #size_first #size_rest)
} else {
quote_spanned!(span => 0)
};
Ok(quote_spanned! {span => const _: fn() = || {
#[doc(hidden)]
struct TypeWithoutPadding([u8; #size_sum]);
let _ = ::core::mem::transmute::<#struct_type, TypeWithoutPadding>;
};})
@ -554,6 +562,7 @@ fn generate_fields_are_trait(
let field_types = get_field_types(&fields);
Ok(quote_spanned! {span => #(const _: fn() = || {
#[allow(clippy::missing_const_for_fn)]
#[doc(hidden)]
fn check #impl_generics () #where_clause {
fn assert_impl<T: #trait_>() {}
assert_impl::<#field_types>();

View File

@ -685,4 +685,5 @@ pub trait TransparentWrapperAlloc<Inner: ?Sized>:
}
}
}
impl<I: ?Sized, T: TransparentWrapper<I>> TransparentWrapperAlloc<I> for T {}
impl<I: ?Sized, T: ?Sized + TransparentWrapper<I>> TransparentWrapperAlloc<I> for T {}

View File

@ -170,6 +170,7 @@ unsafe impl CheckedBitPattern for bool {
}
}
// Rust 1.70.0 documents that NonZero[int] has the same layout as [int].
macro_rules! impl_checked_for_nonzero {
($($nonzero:ty: $primitive:ty),* $(,)?) => {
$(
@ -178,14 +179,7 @@ macro_rules! impl_checked_for_nonzero {
#[inline]
fn is_valid_bit_pattern(bits: &Self::Bits) -> bool {
// Note(zachs18): The size and alignment check are almost certainly
// not necessary, but Rust currently doesn't explicitly document that
// NonZero[int] has the same layout as [int], so we check it to be safe.
// In a const to reduce debug-profile overhead.
const LAYOUT_SAME: bool =
core::mem::size_of::<$nonzero>() == core::mem::size_of::<$primitive>()
&& core::mem::align_of::<$nonzero>() == core::mem::align_of::<$primitive>();
LAYOUT_SAME && *bits != 0
*bits != 0
}
}
)*

View File

@ -113,6 +113,11 @@ pub use pod::*;
mod pod_in_option;
pub use pod_in_option::*;
#[cfg(feature = "must_cast")]
mod must;
#[cfg(feature = "must_cast")]
pub use must::*;
mod no_uninit;
pub use no_uninit::*;

203
src/must.rs Normal file
View File

@ -0,0 +1,203 @@
#![allow(clippy::module_name_repetitions)]
#![allow(clippy::let_unit_value)]
#![allow(clippy::let_underscore_untyped)]
#![allow(clippy::ptr_as_ptr)]
use crate::{AnyBitPattern, NoUninit};
use core::mem::{align_of, size_of};
struct Cast<A, B>((A, B));
impl<A, B> Cast<A, B> {
const ASSERT_ALIGN_GREATER_THAN_EQUAL: () =
assert!(align_of::<A>() >= align_of::<B>());
const ASSERT_SIZE_EQUAL: () = assert!(size_of::<A>() == size_of::<B>());
const ASSERT_SIZE_MULTIPLE_OF: () = assert!(
(size_of::<A>() == 0) == (size_of::<B>() == 0)
&& (size_of::<A>() % size_of::<B>() == 0)
);
}
// Workaround for https://github.com/rust-lang/miri/issues/2423.
// Miri currently doesn't see post-monomorphization errors until runtime,
// so `compile_fail` tests relying on post-monomorphization errors don't
// actually fail. Instead use `should_panic` under miri as a workaround.
#[cfg(miri)]
macro_rules! post_mono_compile_fail_doctest {
() => {
"```should_panic"
};
}
#[cfg(not(miri))]
macro_rules! post_mono_compile_fail_doctest {
() => {
"```compile_fail,E0080"
};
}
/// Cast `A` into `B` if infalliable, or fail to compile.
///
/// Note that for this particular type of cast, alignment isn't a factor. The
/// input value is semantically copied into the function and then returned to a
/// new memory location which will have whatever the required alignment of the
/// output type is.
///
/// ## Failure
///
/// * If the types don't have the same size this fails to compile.
///
/// ## Examples
/// ```
/// // compiles:
/// let bytes: [u8; 2] = bytemuck::must_cast(12_u16);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// // fails to compile (size mismatch):
/// let bytes : [u8; 3] = bytemuck::must_cast(12_u16);
/// ```
#[inline]
pub fn must_cast<A: NoUninit, B: AnyBitPattern>(a: A) -> B {
let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
unsafe { transmute!(a) }
}
/// Convert `&A` into `&B` if infalliable, or fail to compile.
///
/// ## Failure
///
/// * If the target type has a greater alignment requirement.
/// * If the source type and target type aren't the same size.
///
/// ## Examples
/// ```
/// // compiles:
/// let bytes: &[u8; 2] = bytemuck::must_cast_ref(&12_u16);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// // fails to compile (size mismatch):
/// let bytes : &[u8; 3] = bytemuck::must_cast_ref(&12_u16);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// // fails to compile (alignment requirements increased):
/// let bytes : &u16 = bytemuck::must_cast_ref(&[1u8, 2u8]);
/// ```
#[inline]
pub fn must_cast_ref<A: NoUninit, B: AnyBitPattern>(a: &A) -> &B {
let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
unsafe { &*(a as *const A as *const B) }
}
/// Convert a `&mut A` into `&mut B` if infalliable, or fail to compile.
///
/// As [`must_cast_ref`], but `mut`.
///
/// ## Examples
/// ```
/// let mut i = 12_u16;
/// // compiles:
/// let bytes: &mut [u8; 2] = bytemuck::must_cast_mut(&mut i);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let mut bytes: &mut [u8; 2] = &mut [1, 2];
/// // fails to compile (alignment requirements increased):
/// let i : &mut u16 = bytemuck::must_cast_mut(bytes);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let mut i = 12_u16;
/// // fails to compile (size mismatch):
/// let bytes : &mut [u8; 3] = bytemuck::must_cast_mut(&mut i);
/// ```
#[inline]
pub fn must_cast_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
a: &mut A,
) -> &mut B {
let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
unsafe { &mut *(a as *mut A as *mut B) }
}
/// Convert `&[A]` into `&[B]` (possibly with a change in length) if
/// infalliable, or fail to compile.
///
/// * `input.as_ptr() as usize == output.as_ptr() as usize`
/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
///
/// ## Failure
///
/// * If the target type has a greater alignment requirement.
/// * If the target element type doesn't evenly fit into the the current element
/// type (eg: 3 `u16` values is 1.5 `u32` values, so that's a failure).
/// * Similarly, you can't convert between a [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
/// and a non-ZST.
///
/// ## Examples
/// ```
/// let indicies: &[u16] = &[1, 2, 3];
/// // compiles:
/// let bytes: &[u8] = bytemuck::must_cast_slice(indicies);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let bytes : &[u8] = &[1, 0, 2, 0, 3, 0];
/// // fails to compile (bytes.len() might not be a multiple of 2):
/// let byte_pairs : &[[u8; 2]] = bytemuck::must_cast_slice(bytes);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let byte_pairs : &[[u8; 2]] = &[[1, 0], [2, 0], [3, 0]];
/// // fails to compile (alignment requirements increased):
/// let indicies : &[u16] = bytemuck::must_cast_slice(byte_pairs);
/// ```
#[inline]
pub fn must_cast_slice<A: NoUninit, B: AnyBitPattern>(a: &[A]) -> &[B] {
let _ = Cast::<A, B>::ASSERT_SIZE_MULTIPLE_OF;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
let new_len = if size_of::<A>() == size_of::<B>() {
a.len()
} else {
a.len() * (size_of::<A>() / size_of::<B>())
};
unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) }
}
/// Convert `&mut [A]` into `&mut [B]` (possibly with a change in length) if
/// infalliable, or fail to compile.
///
/// As [`must_cast_slice`], but `&mut`.
///
/// ## Examples
/// ```
/// let mut indicies = [1, 2, 3];
/// let indicies: &mut [u16] = &mut indicies;
/// // compiles:
/// let bytes: &mut [u8] = bytemuck::must_cast_slice_mut(indicies);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let mut bytes = [1, 0, 2, 0, 3, 0];
/// # let bytes : &mut [u8] = &mut bytes[..];
/// // fails to compile (bytes.len() might not be a multiple of 2):
/// let byte_pairs : &mut [[u8; 2]] = bytemuck::must_cast_slice_mut(bytes);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let mut byte_pairs = [[1, 0], [2, 0], [3, 0]];
/// # let byte_pairs : &mut [[u8; 2]] = &mut byte_pairs[..];
/// // fails to compile (alignment requirements increased):
/// let indicies : &mut [u16] = bytemuck::must_cast_slice_mut(byte_pairs);
/// ```
#[inline]
pub fn must_cast_slice_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
a: &mut [A],
) -> &mut [B] {
let _ = Cast::<A, B>::ASSERT_SIZE_MULTIPLE_OF;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
let new_len = if size_of::<A>() == size_of::<B>() {
a.len()
} else {
a.len() * (size_of::<A>() / size_of::<B>())
};
unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len) }
}

View File

@ -60,7 +60,7 @@ unsafe impl<T: 'static> Pod for *const T {}
#[cfg(feature = "unsound_ptr_pod_impl")]
unsafe impl<T: 'static> PodInOption for NonNull<T> {}
unsafe impl<T: Pod> Pod for PhantomData<T> {}
unsafe impl<T: ?Sized + 'static> Pod for PhantomData<T> {}
unsafe impl Pod for PhantomPinned {}
unsafe impl<T: Pod> Pod for ManuallyDrop<T> {}