diff --git a/Cargo.lock b/Cargo.lock
index c987bf44ec0..d8612b3a256 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3202,6 +3202,20 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "rustc_abi"
+version = "0.0.0"
+dependencies = [
+ "bitflags",
+ "rand 0.8.5",
+ "rand_xoshiro",
+ "rustc_data_structures",
+ "rustc_index",
+ "rustc_macros",
+ "rustc_serialize",
+ "tracing",
+]
+
[[package]]
name = "rustc_apfloat"
version = "0.0.0"
@@ -4281,6 +4295,7 @@ name = "rustc_target"
version = "0.0.0"
dependencies = [
"bitflags",
+ "rustc_abi",
"rustc_data_structures",
"rustc_feature",
"rustc_index",
@@ -4336,6 +4351,7 @@ dependencies = [
"rustc_infer",
"rustc_middle",
"rustc_span",
+ "rustc_target",
"rustc_trait_selection",
"smallvec",
"tracing",
@@ -4360,8 +4376,6 @@ dependencies = [
name = "rustc_ty_utils"
version = "0.0.0"
dependencies = [
- "rand 0.8.5",
- "rand_xoshiro",
"rustc_data_structures",
"rustc_errors",
"rustc_hir",
diff --git a/compiler/rustc_abi/Cargo.toml b/compiler/rustc_abi/Cargo.toml
new file mode 100644
index 00000000000..48b199cb8ee
--- /dev/null
+++ b/compiler/rustc_abi/Cargo.toml
@@ -0,0 +1,24 @@
+[package]
+name = "rustc_abi"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+bitflags = "1.2.1"
+tracing = "0.1"
+rand = { version = "0.8.4", default-features = false, optional = true }
+rand_xoshiro = { version = "0.6.0", optional = true }
+rustc_data_structures = { path = "../rustc_data_structures", optional = true }
+rustc_index = { path = "../rustc_index", default-features = false }
+rustc_macros = { path = "../rustc_macros", optional = true }
+rustc_serialize = { path = "../rustc_serialize", optional = true }
+
+[features]
+default = ["nightly", "randomize"]
+randomize = ["rand", "rand_xoshiro"]
+nightly = [
+ "rustc_data_structures",
+ "rustc_index/nightly",
+ "rustc_macros",
+ "rustc_serialize",
+]
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
new file mode 100644
index 00000000000..39ea7a85be6
--- /dev/null
+++ b/compiler/rustc_abi/src/layout.rs
@@ -0,0 +1,947 @@
+use super::*;
+use std::{
+ borrow::Borrow,
+ cmp,
+ fmt::Debug,
+ iter,
+ ops::{Bound, Deref},
+};
+
+#[cfg(feature = "randomize")]
+use rand::{seq::SliceRandom, SeedableRng};
+#[cfg(feature = "randomize")]
+use rand_xoshiro::Xoshiro128StarStar;
+
+use tracing::debug;
+
+// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
+// This is used to go between `memory_index` (source field order to memory order)
+// and `inverse_memory_index` (memory order to source field order).
+// See also `FieldsShape::Arbitrary::memory_index` for more details.
+// FIXME(eddyb) build a better abstraction for permutations, if possible.
+fn invert_mapping(map: &[u32]) -> Vec {
+ let mut inverse = vec![0; map.len()];
+ for i in 0..map.len() {
+ inverse[map[i] as usize] = i as u32;
+ }
+ inverse
+}
+
+pub trait LayoutCalculator {
+ type TargetDataLayoutRef: Borrow;
+
+ fn delay_bug(&self, txt: &str);
+ fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
+
+ fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS {
+ let dl = self.current_data_layout();
+ let dl = dl.borrow();
+ let b_align = b.align(dl);
+ let align = a.align(dl).max(b_align).max(dl.aggregate_align);
+ let b_offset = a.size(dl).align_to(b_align.abi);
+ let size = (b_offset + b.size(dl)).align_to(align.abi);
+
+ // HACK(nox): We iter on `b` and then `a` because `max_by_key`
+ // returns the last maximum.
+ let largest_niche = Niche::from_scalar(dl, b_offset, b)
+ .into_iter()
+ .chain(Niche::from_scalar(dl, Size::ZERO, a))
+ .max_by_key(|niche| niche.available(dl));
+
+ LayoutS {
+ variants: Variants::Single { index: V::new(0) },
+ fields: FieldsShape::Arbitrary {
+ offsets: vec![Size::ZERO, b_offset],
+ memory_index: vec![0, 1],
+ },
+ abi: Abi::ScalarPair(a, b),
+ largest_niche,
+ align,
+ size,
+ }
+ }
+
+ fn univariant<'a, V: Idx, F: Deref> + Debug>(
+ &self,
+ dl: &TargetDataLayout,
+ fields: &[F],
+ repr: &ReprOptions,
+ kind: StructKind,
+ ) -> Option> {
+ let pack = repr.pack;
+ let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+ let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect();
+ let optimize = !repr.inhibit_struct_field_reordering_opt();
+ if optimize {
+ let end =
+ if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
+ let optimizing = &mut inverse_memory_index[..end];
+ let effective_field_align = |f: &F| {
+ if let Some(pack) = pack {
+ // return the packed alignment in bytes
+ f.align.abi.min(pack).bytes()
+ } else {
+ // returns log2(effective-align).
+ // This is ok since `pack` applies to all fields equally.
+ // The calculation assumes that size is an integer multiple of align, except for ZSTs.
+ //
+ // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
+ f.align.abi.bytes().max(f.size.bytes()).trailing_zeros() as u64
+ }
+ };
+
+ // If `-Z randomize-layout` was enabled for the type definition we can shuffle
+ // the field ordering to try and catch some code making assumptions about layouts
+ // we don't guarantee
+ if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
+ #[cfg(feature = "randomize")]
+ {
+ // `ReprOptions.layout_seed` is a deterministic seed that we can use to
+ // randomize field ordering with
+ let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
+
+ // Shuffle the ordering of the fields
+ optimizing.shuffle(&mut rng);
+ }
+ // Otherwise we just leave things alone and actually optimize the type's fields
+ } else {
+ match kind {
+ StructKind::AlwaysSized | StructKind::MaybeUnsized => {
+ optimizing.sort_by_key(|&x| {
+ // Place ZSTs first to avoid "interesting offsets",
+ // especially with only one or two non-ZST fields.
+ // Then place largest alignments first, largest niches within an alignment group last
+ let f = &fields[x as usize];
+ let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
+ (!f.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
+ });
+ }
+
+ StructKind::Prefixed(..) => {
+ // Sort in ascending alignment so that the layout stays optimal
+ // regardless of the prefix.
+ // And put the largest niche in an alignment group at the end
+ // so it can be used as discriminant in jagged enums
+ optimizing.sort_by_key(|&x| {
+ let f = &fields[x as usize];
+ let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
+ (effective_field_align(f), niche_size)
+ });
+ }
+ }
+
+ // FIXME(Kixiron): We can always shuffle fields within a given alignment class
+ // regardless of the status of `-Z randomize-layout`
+ }
+ }
+ // inverse_memory_index holds field indices by increasing memory offset.
+ // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
+ // We now write field offsets to the corresponding offset slot;
+ // field 5 with offset 0 puts 0 in offsets[5].
+ // At the bottom of this function, we invert `inverse_memory_index` to
+ // produce `memory_index` (see `invert_mapping`).
+ let mut sized = true;
+ let mut offsets = vec![Size::ZERO; fields.len()];
+ let mut offset = Size::ZERO;
+ let mut largest_niche = None;
+ let mut largest_niche_available = 0;
+ if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
+ let prefix_align =
+ if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
+ align = align.max(AbiAndPrefAlign::new(prefix_align));
+ offset = prefix_size.align_to(prefix_align);
+ }
+ for &i in &inverse_memory_index {
+ let field = &fields[i as usize];
+ if !sized {
+ self.delay_bug(&format!(
+ "univariant: field #{} comes after unsized field",
+ offsets.len(),
+ ));
+ }
+
+ if field.is_unsized() {
+ sized = false;
+ }
+
+ // Invariant: offset < dl.obj_size_bound() <= 1<<61
+ let field_align = if let Some(pack) = pack {
+ field.align.min(AbiAndPrefAlign::new(pack))
+ } else {
+ field.align
+ };
+ offset = offset.align_to(field_align.abi);
+ align = align.max(field_align);
+
+ debug!("univariant offset: {:?} field: {:#?}", offset, field);
+ offsets[i as usize] = offset;
+
+ if let Some(mut niche) = field.largest_niche {
+ let available = niche.available(dl);
+ if available > largest_niche_available {
+ largest_niche_available = available;
+ niche.offset += offset;
+ largest_niche = Some(niche);
+ }
+ }
+
+ offset = offset.checked_add(field.size, dl)?;
+ }
+ if let Some(repr_align) = repr.align {
+ align = align.max(AbiAndPrefAlign::new(repr_align));
+ }
+ debug!("univariant min_size: {:?}", offset);
+ let min_size = offset;
+ // As stated above, inverse_memory_index holds field indices by increasing offset.
+ // This makes it an already-sorted view of the offsets vec.
+ // To invert it, consider:
+ // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
+ // Field 5 would be the first element, so memory_index is i:
+ // Note: if we didn't optimize, it's already right.
+ let memory_index =
+ if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
+ let size = min_size.align_to(align.abi);
+ let mut abi = Abi::Aggregate { sized };
+ // Unpack newtype ABIs and find scalar pairs.
+ if sized && size.bytes() > 0 {
+ // All other fields must be ZSTs.
+ let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
+
+ match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
+ // We have exactly one non-ZST field.
+ (Some((i, field)), None, None) => {
+ // Field fills the struct and it has a scalar or scalar pair ABI.
+ if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
+ {
+ match field.abi {
+ // For plain scalars, or vectors of them, we can't unpack
+ // newtypes for `#[repr(C)]`, as that affects C ABIs.
+ Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
+ abi = field.abi;
+ }
+ // But scalar pairs are Rust-specific and get
+ // treated as aggregates by C ABIs anyway.
+ Abi::ScalarPair(..) => {
+ abi = field.abi;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ // Two non-ZST fields, and they're both scalars.
+ (Some((i, a)), Some((j, b)), None) => {
+ match (a.abi, b.abi) {
+ (Abi::Scalar(a), Abi::Scalar(b)) => {
+ // Order by the memory placement, not source order.
+ let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
+ ((i, a), (j, b))
+ } else {
+ ((j, b), (i, a))
+ };
+ let pair = self.scalar_pair::(a, b);
+ let pair_offsets = match pair.fields {
+ FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+ assert_eq!(memory_index, &[0, 1]);
+ offsets
+ }
+ _ => panic!(),
+ };
+ if offsets[i] == pair_offsets[0]
+ && offsets[j] == pair_offsets[1]
+ && align == pair.align
+ && size == pair.size
+ {
+ // We can use `ScalarPair` only when it matches our
+ // already computed layout (including `#[repr(C)]`).
+ abi = pair.abi;
+ }
+ }
+ _ => {}
+ }
+ }
+
+ _ => {}
+ }
+ }
+ if fields.iter().any(|f| f.abi.is_uninhabited()) {
+ abi = Abi::Uninhabited;
+ }
+ Some(LayoutS {
+ variants: Variants::Single { index: V::new(0) },
+ fields: FieldsShape::Arbitrary { offsets, memory_index },
+ abi,
+ largest_niche,
+ align,
+ size,
+ })
+ }
+
+ fn layout_of_never_type(&self) -> LayoutS {
+ let dl = self.current_data_layout();
+ let dl = dl.borrow();
+ LayoutS {
+ variants: Variants::Single { index: V::new(0) },
+ fields: FieldsShape::Primitive,
+ abi: Abi::Uninhabited,
+ largest_niche: None,
+ align: dl.i8_align,
+ size: Size::ZERO,
+ }
+ }
+
+ fn layout_of_struct_or_enum<'a, V: Idx, F: Deref> + Debug>(
+ &self,
+ repr: &ReprOptions,
+ variants: &IndexVec>,
+ is_enum: bool,
+ is_unsafe_cell: bool,
+ scalar_valid_range: (Bound, Bound),
+ discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
+ discriminants: impl Iterator- ,
+ niche_optimize_enum: bool,
+ always_sized: bool,
+ ) -> Option> {
+ let dl = self.current_data_layout();
+ let dl = dl.borrow();
+
+ let scalar_unit = |value: Primitive| {
+ let size = value.size(dl);
+ assert!(size.bits() <= 128);
+ Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
+ };
+
+ // A variant is absent if it's uninhabited and only has ZST fields.
+ // Present uninhabited variants only require space for their fields,
+ // but *not* an encoding of the discriminant (e.g., a tag value).
+ // See issue #49298 for more details on the need to leave space
+ // for non-ZST uninhabited data (mostly partial initialization).
+ let absent = |fields: &[F]| {
+ let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
+ let is_zst = fields.iter().all(|f| f.is_zst());
+ uninhabited && is_zst
+ };
+ let (present_first, present_second) = {
+ let mut present_variants = variants
+ .iter_enumerated()
+ .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
+ (present_variants.next(), present_variants.next())
+ };
+ let present_first = match present_first {
+ Some(present_first) => present_first,
+ // Uninhabited because it has no variants, or only absent ones.
+ None if is_enum => {
+ return Some(self.layout_of_never_type());
+ }
+ // If it's a struct, still compute a layout so that we can still compute the
+ // field offsets.
+ None => V::new(0),
+ };
+
+ let is_struct = !is_enum ||
+ // Only one variant is present.
+ (present_second.is_none() &&
+ // Representation optimizations are allowed.
+ !repr.inhibit_enum_layout_opt());
+ if is_struct {
+ // Struct, or univariant enum equivalent to a struct.
+ // (Typechecking will reject discriminant-sizing attrs.)
+
+ let v = present_first;
+ let kind = if is_enum || variants[v].is_empty() {
+ StructKind::AlwaysSized
+ } else {
+ if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
+ };
+
+ let mut st = self.univariant(dl, &variants[v], &repr, kind)?;
+ st.variants = Variants::Single { index: v };
+
+ if is_unsafe_cell {
+ let hide_niches = |scalar: &mut _| match scalar {
+ Scalar::Initialized { value, valid_range } => {
+ *valid_range = WrappingRange::full(value.size(dl))
+ }
+ // Already doesn't have any niches
+ Scalar::Union { .. } => {}
+ };
+ match &mut st.abi {
+ Abi::Uninhabited => {}
+ Abi::Scalar(scalar) => hide_niches(scalar),
+ Abi::ScalarPair(a, b) => {
+ hide_niches(a);
+ hide_niches(b);
+ }
+ Abi::Vector { element, count: _ } => hide_niches(element),
+ Abi::Aggregate { sized: _ } => {}
+ }
+ st.largest_niche = None;
+ return Some(st);
+ }
+
+ let (start, end) = scalar_valid_range;
+ match st.abi {
+ Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
+ // the asserts ensure that we are not using the
+ // `#[rustc_layout_scalar_valid_range(n)]`
+ // attribute to widen the range of anything as that would probably
+ // result in UB somewhere
+ // FIXME(eddyb) the asserts are probably not needed,
+ // as larger validity ranges would result in missed
+ // optimizations, *not* wrongly assuming the inner
+ // value is valid. e.g. unions enlarge validity ranges,
+ // because the values may be uninitialized.
+ if let Bound::Included(start) = start {
+ // FIXME(eddyb) this might be incorrect - it doesn't
+ // account for wrap-around (end < start) ranges.
+ let valid_range = scalar.valid_range_mut();
+ assert!(valid_range.start <= start);
+ valid_range.start = start;
+ }
+ if let Bound::Included(end) = end {
+ // FIXME(eddyb) this might be incorrect - it doesn't
+ // account for wrap-around (end < start) ranges.
+ let valid_range = scalar.valid_range_mut();
+ assert!(valid_range.end >= end);
+ valid_range.end = end;
+ }
+
+ // Update `largest_niche` if we have introduced a larger niche.
+ let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
+ if let Some(niche) = niche {
+ match st.largest_niche {
+ Some(largest_niche) => {
+ // Replace the existing niche even if they're equal,
+ // because this one is at a lower offset.
+ if largest_niche.available(dl) <= niche.available(dl) {
+ st.largest_niche = Some(niche);
+ }
+ }
+ None => st.largest_niche = Some(niche),
+ }
+ }
+ }
+ _ => assert!(
+ start == Bound::Unbounded && end == Bound::Unbounded,
+ "nonscalar layout for layout_scalar_valid_range type: {:#?}",
+ st,
+ ),
+ }
+
+ return Some(st);
+ }
+
+ // At this point, we have handled all unions and
+ // structs. (We have also handled univariant enums
+ // that allow representation optimization.)
+ assert!(is_enum);
+
+ // Until we've decided whether to use the tagged or
+ // niche filling LayoutS, we don't want to intern the
+ // variant layouts, so we can't store them in the
+ // overall LayoutS. Store the overall LayoutS
+ // and the variant LayoutSs here until then.
+ struct TmpLayout {
+ layout: LayoutS,
+ variants: IndexVec>,
+ }
+
+ let calculate_niche_filling_layout = || -> Option> {
+ if niche_optimize_enum {
+ return None;
+ }
+
+ if variants.len() < 2 {
+ return None;
+ }
+
+ let mut align = dl.aggregate_align;
+ let mut variant_layouts = variants
+ .iter_enumerated()
+ .map(|(j, v)| {
+ let mut st = self.univariant(dl, v, &repr, StructKind::AlwaysSized)?;
+ st.variants = Variants::Single { index: j };
+
+ align = align.max(st.align);
+
+ Some(st)
+ })
+ .collect::
"
);
w.write_str("Size: ");
- write_size_of_layout(w, ty_layout.layout, 0);
+ write_size_of_layout(w, &ty_layout.layout.0, 0);
writeln!(w, "
");
if let Variants::Multiple { variants, tag, tag_encoding, .. } =
&ty_layout.layout.variants()
@@ -1953,7 +1953,7 @@ fn document_type_layout(w: &mut Buffer, cx: &Context<'_>, ty_def_id: DefId) {
for (index, layout) in variants.iter_enumerated() {
let name = adt.variant(index).name;
write!(w, "{name}
: ", name = name);
- write_size_of_layout(w, *layout, tag_size);
+ write_size_of_layout(w, layout, tag_size);
writeln!(w, "");
}
w.write_str("");
diff --git a/src/tools/clippy/clippy_lints/src/casts/cast_possible_truncation.rs b/src/tools/clippy/clippy_lints/src/casts/cast_possible_truncation.rs
index 88deb4565eb..adbcfd3189b 100644
--- a/src/tools/clippy/clippy_lints/src/casts/cast_possible_truncation.rs
+++ b/src/tools/clippy/clippy_lints/src/casts/cast_possible_truncation.rs
@@ -2,12 +2,11 @@ use clippy_utils::consts::{constant, Constant};
use clippy_utils::diagnostics::span_lint;
use clippy_utils::expr_or_init;
use clippy_utils::ty::{get_discriminant_value, is_isize_or_usize};
-use rustc_ast::ast;
-use rustc_attr::IntType;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::{BinOpKind, Expr, ExprKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, FloatTy, Ty};
+use rustc_target::abi::IntegerType;
use super::{utils, CAST_ENUM_TRUNCATION, CAST_POSSIBLE_TRUNCATION};
@@ -122,7 +121,7 @@ pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>,
let cast_from_ptr_size = def.repr().int.map_or(true, |ty| {
matches!(
ty,
- IntType::SignedInt(ast::IntTy::Isize) | IntType::UnsignedInt(ast::UintTy::Usize)
+ IntegerType::Pointer(_),
)
});
let suffix = match (cast_from_ptr_size, is_isize_or_usize(cast_to)) {
diff --git a/src/tools/clippy/clippy_lints/src/lib.rs b/src/tools/clippy/clippy_lints/src/lib.rs
index b481314abed..601990cd6a3 100644
--- a/src/tools/clippy/clippy_lints/src/lib.rs
+++ b/src/tools/clippy/clippy_lints/src/lib.rs
@@ -26,7 +26,6 @@
extern crate rustc_arena;
extern crate rustc_ast;
extern crate rustc_ast_pretty;
-extern crate rustc_attr;
extern crate rustc_data_structures;
extern crate rustc_driver;
extern crate rustc_errors;