mirror of
https://github.com/rust-lang/rust.git
synced 2025-04-28 11:07:42 +00:00
Auto merge of #138267 - matthiaskrgr:rollup-vt76bhs, r=matthiaskrgr
Rollup of 12 pull requests Successful merges: - #136127 (Allow `*const W<dyn A> -> *const dyn A` ptr cast) - #136968 (Turn order dependent trait objects future incompat warning into a hard error) - #137319 (Stabilize `const_vec_string_slice`) - #137885 (tidy: add triagebot checks) - #138040 (compiler: Use `size_of` from the prelude instead of imported) - #138084 (Use workspace lints for crates in `compiler/`) - #138158 (Move more layouting logic to `rustc_abi`) - #138160 (depend more on attr_data_structures and move find_attr! there) - #138192 (crashes: couple more tests) - #138216 (bootstrap: Fix stack printing when a step cycle is detected) - #138232 (Reduce verbosity of GCC build log) - #138242 (Revert "Don't test new error messages with the stage 0 compiler") r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
ed897d5f85
@ -3297,6 +3297,7 @@ dependencies = [
|
|||||||
"rustc_hir",
|
"rustc_hir",
|
||||||
"rustc_lexer",
|
"rustc_lexer",
|
||||||
"rustc_macros",
|
"rustc_macros",
|
||||||
|
"rustc_middle",
|
||||||
"rustc_serialize",
|
"rustc_serialize",
|
||||||
"rustc_session",
|
"rustc_session",
|
||||||
"rustc_span",
|
"rustc_span",
|
||||||
@ -3752,7 +3753,7 @@ dependencies = [
|
|||||||
"rustc_abi",
|
"rustc_abi",
|
||||||
"rustc_ast",
|
"rustc_ast",
|
||||||
"rustc_ast_pretty",
|
"rustc_ast_pretty",
|
||||||
"rustc_attr_parsing",
|
"rustc_attr_data_structures",
|
||||||
"rustc_hir",
|
"rustc_hir",
|
||||||
"rustc_span",
|
"rustc_span",
|
||||||
]
|
]
|
||||||
@ -4020,7 +4021,8 @@ dependencies = [
|
|||||||
"rustc_apfloat",
|
"rustc_apfloat",
|
||||||
"rustc_arena",
|
"rustc_arena",
|
||||||
"rustc_ast",
|
"rustc_ast",
|
||||||
"rustc_attr_parsing",
|
"rustc_ast_ir",
|
||||||
|
"rustc_attr_data_structures",
|
||||||
"rustc_data_structures",
|
"rustc_data_structures",
|
||||||
"rustc_error_messages",
|
"rustc_error_messages",
|
||||||
"rustc_errors",
|
"rustc_errors",
|
||||||
@ -5271,6 +5273,7 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
"similar",
|
"similar",
|
||||||
"termcolor",
|
"termcolor",
|
||||||
|
"toml 0.7.8",
|
||||||
"walkdir",
|
"walkdir",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
14
Cargo.toml
14
Cargo.toml
@ -63,6 +63,20 @@ exclude = [
|
|||||||
"src/tools/x",
|
"src/tools/x",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# These lints are applied to many crates in the workspace. In practice, this is
|
||||||
|
# all crates under `compiler/`.
|
||||||
|
#
|
||||||
|
# NOTE: rustc-specific lints (e.g. `rustc::internal`) aren't supported by
|
||||||
|
# Cargo. (Support for them is possibly blocked by #44690 (attributes for
|
||||||
|
# tools).) Those lints are instead specified for `compiler/` crates in
|
||||||
|
# `src/bootstrap/src/core/builder/cargo.rs`.
|
||||||
|
[workspace.lints.rust]
|
||||||
|
# FIXME(edition_2024): Change this to `-Wrust_2024_idioms` when all of the
|
||||||
|
# individual lints are satisfied.
|
||||||
|
keyword_idents_2024 = "warn"
|
||||||
|
unreachable_pub = "warn"
|
||||||
|
unsafe_op_in_unsafe_fn = "warn"
|
||||||
|
|
||||||
[profile.release.package.rustc-rayon-core]
|
[profile.release.package.rustc-rayon-core]
|
||||||
# The rustc fork of Rayon has deadlock detection code which intermittently
|
# The rustc fork of Rayon has deadlock detection code which intermittently
|
||||||
# causes overflows in the CI (see https://github.com/rust-lang/rust/issues/90227)
|
# causes overflows in the CI (see https://github.com/rust-lang/rust/issues/90227)
|
||||||
|
@ -32,3 +32,6 @@ llvm = ['rustc_driver_impl/llvm']
|
|||||||
max_level_info = ['rustc_driver_impl/max_level_info']
|
max_level_info = ['rustc_driver_impl/max_level_info']
|
||||||
rustc_randomized_layouts = ['rustc_driver_impl/rustc_randomized_layouts']
|
rustc_randomized_layouts = ['rustc_driver_impl/rustc_randomized_layouts']
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -31,3 +31,6 @@ nightly = [
|
|||||||
]
|
]
|
||||||
randomize = ["dep:rand", "dep:rand_xoshiro", "nightly"]
|
randomize = ["dep:rand", "dep:rand_xoshiro", "nightly"]
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -4,6 +4,7 @@ use std::{cmp, iter};
|
|||||||
|
|
||||||
use rustc_hashes::Hash64;
|
use rustc_hashes::Hash64;
|
||||||
use rustc_index::Idx;
|
use rustc_index::Idx;
|
||||||
|
use rustc_index::bit_set::BitMatrix;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -12,6 +13,9 @@ use crate::{
|
|||||||
Variants, WrappingRange,
|
Variants, WrappingRange,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
mod coroutine;
|
||||||
|
mod simple;
|
||||||
|
|
||||||
#[cfg(feature = "nightly")]
|
#[cfg(feature = "nightly")]
|
||||||
mod ty;
|
mod ty;
|
||||||
|
|
||||||
@ -60,17 +64,28 @@ pub enum LayoutCalculatorError<F> {
|
|||||||
|
|
||||||
/// The fields or variants have irreconcilable reprs
|
/// The fields or variants have irreconcilable reprs
|
||||||
ReprConflict,
|
ReprConflict,
|
||||||
|
|
||||||
|
/// The length of an SIMD type is zero
|
||||||
|
ZeroLengthSimdType,
|
||||||
|
|
||||||
|
/// The length of an SIMD type exceeds the maximum number of lanes
|
||||||
|
OversizedSimdType { max_lanes: u64 },
|
||||||
|
|
||||||
|
/// An element type of an SIMD type isn't a primitive
|
||||||
|
NonPrimitiveSimdType(F),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<F> LayoutCalculatorError<F> {
|
impl<F> LayoutCalculatorError<F> {
|
||||||
pub fn without_payload(&self) -> LayoutCalculatorError<()> {
|
pub fn without_payload(&self) -> LayoutCalculatorError<()> {
|
||||||
match self {
|
use LayoutCalculatorError::*;
|
||||||
LayoutCalculatorError::UnexpectedUnsized(_) => {
|
match *self {
|
||||||
LayoutCalculatorError::UnexpectedUnsized(())
|
UnexpectedUnsized(_) => UnexpectedUnsized(()),
|
||||||
}
|
SizeOverflow => SizeOverflow,
|
||||||
LayoutCalculatorError::SizeOverflow => LayoutCalculatorError::SizeOverflow,
|
EmptyUnion => EmptyUnion,
|
||||||
LayoutCalculatorError::EmptyUnion => LayoutCalculatorError::EmptyUnion,
|
ReprConflict => ReprConflict,
|
||||||
LayoutCalculatorError::ReprConflict => LayoutCalculatorError::ReprConflict,
|
ZeroLengthSimdType => ZeroLengthSimdType,
|
||||||
|
OversizedSimdType { max_lanes } => OversizedSimdType { max_lanes },
|
||||||
|
NonPrimitiveSimdType(_) => NonPrimitiveSimdType(()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,13 +93,15 @@ impl<F> LayoutCalculatorError<F> {
|
|||||||
///
|
///
|
||||||
/// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.
|
/// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.
|
||||||
pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
use LayoutCalculatorError::*;
|
||||||
f.write_str(match self {
|
f.write_str(match self {
|
||||||
LayoutCalculatorError::UnexpectedUnsized(_) => {
|
UnexpectedUnsized(_) => "an unsized type was found where a sized type was expected",
|
||||||
"an unsized type was found where a sized type was expected"
|
SizeOverflow => "size overflow",
|
||||||
|
EmptyUnion => "type is a union with no fields",
|
||||||
|
ReprConflict => "type has an invalid repr",
|
||||||
|
ZeroLengthSimdType | OversizedSimdType { .. } | NonPrimitiveSimdType(_) => {
|
||||||
|
"invalid simd type definition"
|
||||||
}
|
}
|
||||||
LayoutCalculatorError::SizeOverflow => "size overflow",
|
|
||||||
LayoutCalculatorError::EmptyUnion => "type is a union with no fields",
|
|
||||||
LayoutCalculatorError::ReprConflict => "type has an invalid repr",
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -102,41 +119,115 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
|||||||
Self { cx }
|
Self { cx }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn scalar_pair<FieldIdx: Idx, VariantIdx: Idx>(
|
pub fn array_like<FieldIdx: Idx, VariantIdx: Idx, F>(
|
||||||
&self,
|
&self,
|
||||||
a: Scalar,
|
element: &LayoutData<FieldIdx, VariantIdx>,
|
||||||
b: Scalar,
|
count_if_sized: Option<u64>, // None for slices
|
||||||
) -> LayoutData<FieldIdx, VariantIdx> {
|
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
|
||||||
let dl = self.cx.data_layout();
|
let count = count_if_sized.unwrap_or(0);
|
||||||
let b_align = b.align(dl);
|
let size =
|
||||||
let align = a.align(dl).max(b_align).max(dl.aggregate_align);
|
element.size.checked_mul(count, &self.cx).ok_or(LayoutCalculatorError::SizeOverflow)?;
|
||||||
let b_offset = a.size(dl).align_to(b_align.abi);
|
|
||||||
let size = (b_offset + b.size(dl)).align_to(align.abi);
|
|
||||||
|
|
||||||
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
|
Ok(LayoutData {
|
||||||
// returns the last maximum.
|
|
||||||
let largest_niche = Niche::from_scalar(dl, b_offset, b)
|
|
||||||
.into_iter()
|
|
||||||
.chain(Niche::from_scalar(dl, Size::ZERO, a))
|
|
||||||
.max_by_key(|niche| niche.available(dl));
|
|
||||||
|
|
||||||
let combined_seed = a.size(&self.cx).bytes().wrapping_add(b.size(&self.cx).bytes());
|
|
||||||
|
|
||||||
LayoutData {
|
|
||||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||||
fields: FieldsShape::Arbitrary {
|
fields: FieldsShape::Array { stride: element.size, count },
|
||||||
offsets: [Size::ZERO, b_offset].into(),
|
backend_repr: BackendRepr::Memory { sized: count_if_sized.is_some() },
|
||||||
memory_index: [0, 1].into(),
|
largest_niche: element.largest_niche.filter(|_| count != 0),
|
||||||
},
|
uninhabited: element.uninhabited && count != 0,
|
||||||
backend_repr: BackendRepr::ScalarPair(a, b),
|
align: element.align,
|
||||||
largest_niche,
|
|
||||||
uninhabited: false,
|
|
||||||
align,
|
|
||||||
size,
|
size,
|
||||||
max_repr_align: None,
|
max_repr_align: None,
|
||||||
unadjusted_abi_align: align.abi,
|
unadjusted_abi_align: element.align.abi,
|
||||||
randomization_seed: Hash64::new(combined_seed),
|
randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn simd_type<
|
||||||
|
FieldIdx: Idx,
|
||||||
|
VariantIdx: Idx,
|
||||||
|
F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
|
||||||
|
>(
|
||||||
|
&self,
|
||||||
|
element: F,
|
||||||
|
count: u64,
|
||||||
|
repr_packed: bool,
|
||||||
|
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
|
||||||
|
let elt = element.as_ref();
|
||||||
|
if count == 0 {
|
||||||
|
return Err(LayoutCalculatorError::ZeroLengthSimdType);
|
||||||
|
} else if count > crate::MAX_SIMD_LANES {
|
||||||
|
return Err(LayoutCalculatorError::OversizedSimdType {
|
||||||
|
max_lanes: crate::MAX_SIMD_LANES,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let BackendRepr::Scalar(e_repr) = elt.backend_repr else {
|
||||||
|
return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));
|
||||||
|
};
|
||||||
|
|
||||||
|
// Compute the size and alignment of the vector
|
||||||
|
let dl = self.cx.data_layout();
|
||||||
|
let size =
|
||||||
|
elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;
|
||||||
|
let (repr, align) = if repr_packed && !count.is_power_of_two() {
|
||||||
|
// Non-power-of-two vectors have padding up to the next power-of-two.
|
||||||
|
// If we're a packed repr, remove the padding while keeping the alignment as close
|
||||||
|
// to a vector as possible.
|
||||||
|
(
|
||||||
|
BackendRepr::Memory { sized: true },
|
||||||
|
AbiAndPrefAlign {
|
||||||
|
abi: Align::max_aligned_factor(size),
|
||||||
|
pref: dl.llvmlike_vector_align(size).pref,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
(BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size))
|
||||||
|
};
|
||||||
|
let size = size.align_to(align.abi);
|
||||||
|
|
||||||
|
Ok(LayoutData {
|
||||||
|
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||||
|
fields: FieldsShape::Arbitrary {
|
||||||
|
offsets: [Size::ZERO].into(),
|
||||||
|
memory_index: [0].into(),
|
||||||
|
},
|
||||||
|
backend_repr: repr,
|
||||||
|
largest_niche: elt.largest_niche,
|
||||||
|
uninhabited: false,
|
||||||
|
size,
|
||||||
|
align,
|
||||||
|
max_repr_align: None,
|
||||||
|
unadjusted_abi_align: elt.align.abi,
|
||||||
|
randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute the layout for a coroutine.
|
||||||
|
///
|
||||||
|
/// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine
|
||||||
|
/// fields may be shared between multiple variants (see the [`coroutine`] module for details).
|
||||||
|
pub fn coroutine<
|
||||||
|
'a,
|
||||||
|
F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
|
||||||
|
VariantIdx: Idx,
|
||||||
|
FieldIdx: Idx,
|
||||||
|
LocalIdx: Idx,
|
||||||
|
>(
|
||||||
|
&self,
|
||||||
|
local_layouts: &IndexSlice<LocalIdx, F>,
|
||||||
|
prefix_layouts: IndexVec<FieldIdx, F>,
|
||||||
|
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
|
||||||
|
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
|
||||||
|
tag_to_layout: impl Fn(Scalar) -> F,
|
||||||
|
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
|
||||||
|
coroutine::layout(
|
||||||
|
self,
|
||||||
|
local_layouts,
|
||||||
|
prefix_layouts,
|
||||||
|
variant_fields,
|
||||||
|
storage_conflicts,
|
||||||
|
tag_to_layout,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn univariant<
|
pub fn univariant<
|
||||||
@ -214,25 +305,6 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
|||||||
layout
|
layout
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn layout_of_never_type<FieldIdx: Idx, VariantIdx: Idx>(
|
|
||||||
&self,
|
|
||||||
) -> LayoutData<FieldIdx, VariantIdx> {
|
|
||||||
let dl = self.cx.data_layout();
|
|
||||||
// This is also used for uninhabited enums, so we use `Variants::Empty`.
|
|
||||||
LayoutData {
|
|
||||||
variants: Variants::Empty,
|
|
||||||
fields: FieldsShape::Primitive,
|
|
||||||
backend_repr: BackendRepr::Memory { sized: true },
|
|
||||||
largest_niche: None,
|
|
||||||
uninhabited: true,
|
|
||||||
align: dl.i8_align,
|
|
||||||
size: Size::ZERO,
|
|
||||||
max_repr_align: None,
|
|
||||||
unadjusted_abi_align: dl.i8_align.abi,
|
|
||||||
randomization_seed: Hash64::ZERO,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn layout_of_struct_or_enum<
|
pub fn layout_of_struct_or_enum<
|
||||||
'a,
|
'a,
|
||||||
FieldIdx: Idx,
|
FieldIdx: Idx,
|
||||||
@ -260,7 +332,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
|||||||
Some(present_first) => present_first,
|
Some(present_first) => present_first,
|
||||||
// Uninhabited because it has no variants, or only absent ones.
|
// Uninhabited because it has no variants, or only absent ones.
|
||||||
None if is_enum => {
|
None if is_enum => {
|
||||||
return Ok(self.layout_of_never_type());
|
return Ok(LayoutData::never_type(&self.cx));
|
||||||
}
|
}
|
||||||
// If it's a struct, still compute a layout so that we can still compute the
|
// If it's a struct, still compute a layout so that we can still compute the
|
||||||
// field offsets.
|
// field offsets.
|
||||||
@ -949,7 +1021,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
|||||||
// Common prim might be uninit.
|
// Common prim might be uninit.
|
||||||
Scalar::Union { value: prim }
|
Scalar::Union { value: prim }
|
||||||
};
|
};
|
||||||
let pair = self.scalar_pair::<FieldIdx, VariantIdx>(tag, prim_scalar);
|
let pair =
|
||||||
|
LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);
|
||||||
let pair_offsets = match pair.fields {
|
let pair_offsets = match pair.fields {
|
||||||
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
||||||
assert_eq!(memory_index.raw, [0, 1]);
|
assert_eq!(memory_index.raw, [0, 1]);
|
||||||
@ -1341,7 +1414,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
|||||||
} else {
|
} else {
|
||||||
((j, b), (i, a))
|
((j, b), (i, a))
|
||||||
};
|
};
|
||||||
let pair = self.scalar_pair::<FieldIdx, VariantIdx>(a, b);
|
let pair =
|
||||||
|
LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);
|
||||||
let pair_offsets = match pair.fields {
|
let pair_offsets = match pair.fields {
|
||||||
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
||||||
assert_eq!(memory_index.raw, [0, 1]);
|
assert_eq!(memory_index.raw, [0, 1]);
|
||||||
|
320
compiler/rustc_abi/src/layout/coroutine.rs
Normal file
320
compiler/rustc_abi/src/layout/coroutine.rs
Normal file
@ -0,0 +1,320 @@
|
|||||||
|
//! Coroutine layout logic.
|
||||||
|
//!
|
||||||
|
//! When laying out coroutines, we divide our saved local fields into two
|
||||||
|
//! categories: overlap-eligible and overlap-ineligible.
|
||||||
|
//!
|
||||||
|
//! Those fields which are ineligible for overlap go in a "prefix" at the
|
||||||
|
//! beginning of the layout, and always have space reserved for them.
|
||||||
|
//!
|
||||||
|
//! Overlap-eligible fields are only assigned to one variant, so we lay
|
||||||
|
//! those fields out for each variant and put them right after the
|
||||||
|
//! prefix.
|
||||||
|
//!
|
||||||
|
//! Finally, in the layout details, we point to the fields from the
|
||||||
|
//! variants they are assigned to. It is possible for some fields to be
|
||||||
|
//! included in multiple variants. No field ever "moves around" in the
|
||||||
|
//! layout; its offset is always the same.
|
||||||
|
//!
|
||||||
|
//! Also included in the layout are the upvars and the discriminant.
|
||||||
|
//! These are included as fields on the "outer" layout; they are not part
|
||||||
|
//! of any variant.
|
||||||
|
|
||||||
|
use std::iter;
|
||||||
|
|
||||||
|
use rustc_index::bit_set::{BitMatrix, DenseBitSet};
|
||||||
|
use rustc_index::{Idx, IndexSlice, IndexVec};
|
||||||
|
use tracing::{debug, trace};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutData, Primitive, ReprOptions, Scalar,
|
||||||
|
StructKind, TagEncoding, Variants, WrappingRange,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
enum SavedLocalEligibility<VariantIdx, FieldIdx> {
|
||||||
|
Unassigned,
|
||||||
|
Assigned(VariantIdx),
|
||||||
|
Ineligible(Option<FieldIdx>),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute the eligibility and assignment of each local.
|
||||||
|
fn coroutine_saved_local_eligibility<VariantIdx: Idx, FieldIdx: Idx, LocalIdx: Idx>(
|
||||||
|
nb_locals: usize,
|
||||||
|
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
|
||||||
|
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
|
||||||
|
) -> (DenseBitSet<LocalIdx>, IndexVec<LocalIdx, SavedLocalEligibility<VariantIdx, FieldIdx>>) {
|
||||||
|
use SavedLocalEligibility::*;
|
||||||
|
|
||||||
|
let mut assignments: IndexVec<LocalIdx, _> = IndexVec::from_elem_n(Unassigned, nb_locals);
|
||||||
|
|
||||||
|
// The saved locals not eligible for overlap. These will get
|
||||||
|
// "promoted" to the prefix of our coroutine.
|
||||||
|
let mut ineligible_locals = DenseBitSet::new_empty(nb_locals);
|
||||||
|
|
||||||
|
// Figure out which of our saved locals are fields in only
|
||||||
|
// one variant. The rest are deemed ineligible for overlap.
|
||||||
|
for (variant_index, fields) in variant_fields.iter_enumerated() {
|
||||||
|
for local in fields {
|
||||||
|
match assignments[*local] {
|
||||||
|
Unassigned => {
|
||||||
|
assignments[*local] = Assigned(variant_index);
|
||||||
|
}
|
||||||
|
Assigned(idx) => {
|
||||||
|
// We've already seen this local at another suspension
|
||||||
|
// point, so it is no longer a candidate.
|
||||||
|
trace!(
|
||||||
|
"removing local {:?} in >1 variant ({:?}, {:?})",
|
||||||
|
local, variant_index, idx
|
||||||
|
);
|
||||||
|
ineligible_locals.insert(*local);
|
||||||
|
assignments[*local] = Ineligible(None);
|
||||||
|
}
|
||||||
|
Ineligible(_) => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next, check every pair of eligible locals to see if they
|
||||||
|
// conflict.
|
||||||
|
for local_a in storage_conflicts.rows() {
|
||||||
|
let conflicts_a = storage_conflicts.count(local_a);
|
||||||
|
if ineligible_locals.contains(local_a) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for local_b in storage_conflicts.iter(local_a) {
|
||||||
|
// local_a and local_b are storage live at the same time, therefore they
|
||||||
|
// cannot overlap in the coroutine layout. The only way to guarantee
|
||||||
|
// this is if they are in the same variant, or one is ineligible
|
||||||
|
// (which means it is stored in every variant).
|
||||||
|
if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If they conflict, we will choose one to make ineligible.
|
||||||
|
// This is not always optimal; it's just a greedy heuristic that
|
||||||
|
// seems to produce good results most of the time.
|
||||||
|
let conflicts_b = storage_conflicts.count(local_b);
|
||||||
|
let (remove, other) =
|
||||||
|
if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
|
||||||
|
ineligible_locals.insert(remove);
|
||||||
|
assignments[remove] = Ineligible(None);
|
||||||
|
trace!("removing local {:?} due to conflict with {:?}", remove, other);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count the number of variants in use. If only one of them, then it is
|
||||||
|
// impossible to overlap any locals in our layout. In this case it's
|
||||||
|
// always better to make the remaining locals ineligible, so we can
|
||||||
|
// lay them out with the other locals in the prefix and eliminate
|
||||||
|
// unnecessary padding bytes.
|
||||||
|
{
|
||||||
|
let mut used_variants = DenseBitSet::new_empty(variant_fields.len());
|
||||||
|
for assignment in &assignments {
|
||||||
|
if let Assigned(idx) = assignment {
|
||||||
|
used_variants.insert(*idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if used_variants.count() < 2 {
|
||||||
|
for assignment in assignments.iter_mut() {
|
||||||
|
*assignment = Ineligible(None);
|
||||||
|
}
|
||||||
|
ineligible_locals.insert_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write down the order of our locals that will be promoted to the prefix.
|
||||||
|
{
|
||||||
|
for (idx, local) in ineligible_locals.iter().enumerate() {
|
||||||
|
assignments[local] = Ineligible(Some(FieldIdx::new(idx)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug!("coroutine saved local assignments: {:?}", assignments);
|
||||||
|
|
||||||
|
(ineligible_locals, assignments)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute the full coroutine layout.
|
||||||
|
pub(super) fn layout<
|
||||||
|
'a,
|
||||||
|
F: core::ops::Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + core::fmt::Debug + Copy,
|
||||||
|
VariantIdx: Idx,
|
||||||
|
FieldIdx: Idx,
|
||||||
|
LocalIdx: Idx,
|
||||||
|
>(
|
||||||
|
calc: &super::LayoutCalculator<impl HasDataLayout>,
|
||||||
|
local_layouts: &IndexSlice<LocalIdx, F>,
|
||||||
|
mut prefix_layouts: IndexVec<FieldIdx, F>,
|
||||||
|
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
|
||||||
|
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
|
||||||
|
tag_to_layout: impl Fn(Scalar) -> F,
|
||||||
|
) -> super::LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
|
||||||
|
use SavedLocalEligibility::*;
|
||||||
|
|
||||||
|
let (ineligible_locals, assignments) =
|
||||||
|
coroutine_saved_local_eligibility(local_layouts.len(), variant_fields, storage_conflicts);
|
||||||
|
|
||||||
|
// Build a prefix layout, including "promoting" all ineligible
|
||||||
|
// locals as part of the prefix. We compute the layout of all of
|
||||||
|
// these fields at once to get optimal packing.
|
||||||
|
let tag_index = prefix_layouts.len();
|
||||||
|
|
||||||
|
// `variant_fields` already accounts for the reserved variants, so no need to add them.
|
||||||
|
let max_discr = (variant_fields.len() - 1) as u128;
|
||||||
|
let discr_int = Integer::fit_unsigned(max_discr);
|
||||||
|
let tag = Scalar::Initialized {
|
||||||
|
value: Primitive::Int(discr_int, /* signed = */ false),
|
||||||
|
valid_range: WrappingRange { start: 0, end: max_discr },
|
||||||
|
};
|
||||||
|
|
||||||
|
let promoted_layouts = ineligible_locals.iter().map(|local| local_layouts[local]);
|
||||||
|
prefix_layouts.push(tag_to_layout(tag));
|
||||||
|
prefix_layouts.extend(promoted_layouts);
|
||||||
|
let prefix =
|
||||||
|
calc.univariant(&prefix_layouts, &ReprOptions::default(), StructKind::AlwaysSized)?;
|
||||||
|
|
||||||
|
let (prefix_size, prefix_align) = (prefix.size, prefix.align);
|
||||||
|
|
||||||
|
// Split the prefix layout into the "outer" fields (upvars and
|
||||||
|
// discriminant) and the "promoted" fields. Promoted fields will
|
||||||
|
// get included in each variant that requested them in
|
||||||
|
// CoroutineLayout.
|
||||||
|
debug!("prefix = {:#?}", prefix);
|
||||||
|
let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
|
||||||
|
FieldsShape::Arbitrary { mut offsets, memory_index } => {
|
||||||
|
let mut inverse_memory_index = memory_index.invert_bijective_mapping();
|
||||||
|
|
||||||
|
// "a" (`0..b_start`) and "b" (`b_start..`) correspond to
|
||||||
|
// "outer" and "promoted" fields respectively.
|
||||||
|
let b_start = FieldIdx::new(tag_index + 1);
|
||||||
|
let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
|
||||||
|
let offsets_a = offsets;
|
||||||
|
|
||||||
|
// Disentangle the "a" and "b" components of `inverse_memory_index`
|
||||||
|
// by preserving the order but keeping only one disjoint "half" each.
|
||||||
|
// FIXME(eddyb) build a better abstraction for permutations, if possible.
|
||||||
|
let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
|
||||||
|
.iter()
|
||||||
|
.filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
|
||||||
|
.collect();
|
||||||
|
inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
|
||||||
|
let inverse_memory_index_a = inverse_memory_index;
|
||||||
|
|
||||||
|
// Since `inverse_memory_index_{a,b}` each only refer to their
|
||||||
|
// respective fields, they can be safely inverted
|
||||||
|
let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
|
||||||
|
let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
|
||||||
|
|
||||||
|
let outer_fields =
|
||||||
|
FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
|
||||||
|
(outer_fields, offsets_b, memory_index_b)
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut size = prefix.size;
|
||||||
|
let mut align = prefix.align;
|
||||||
|
let variants = variant_fields
|
||||||
|
.iter_enumerated()
|
||||||
|
.map(|(index, variant_fields)| {
|
||||||
|
// Only include overlap-eligible fields when we compute our variant layout.
|
||||||
|
let variant_only_tys = variant_fields
|
||||||
|
.iter()
|
||||||
|
.filter(|local| match assignments[**local] {
|
||||||
|
Unassigned => unreachable!(),
|
||||||
|
Assigned(v) if v == index => true,
|
||||||
|
Assigned(_) => unreachable!("assignment does not match variant"),
|
||||||
|
Ineligible(_) => false,
|
||||||
|
})
|
||||||
|
.map(|local| local_layouts[*local]);
|
||||||
|
|
||||||
|
let mut variant = calc.univariant(
|
||||||
|
&variant_only_tys.collect::<IndexVec<_, _>>(),
|
||||||
|
&ReprOptions::default(),
|
||||||
|
StructKind::Prefixed(prefix_size, prefix_align.abi),
|
||||||
|
)?;
|
||||||
|
variant.variants = Variants::Single { index };
|
||||||
|
|
||||||
|
let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
|
||||||
|
unreachable!();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Now, stitch the promoted and variant-only fields back together in
|
||||||
|
// the order they are mentioned by our CoroutineLayout.
|
||||||
|
// Because we only use some subset (that can differ between variants)
|
||||||
|
// of the promoted fields, we can't just pick those elements of the
|
||||||
|
// `promoted_memory_index` (as we'd end up with gaps).
|
||||||
|
// So instead, we build an "inverse memory_index", as if all of the
|
||||||
|
// promoted fields were being used, but leave the elements not in the
|
||||||
|
// subset as `invalid_field_idx`, which we can filter out later to
|
||||||
|
// obtain a valid (bijective) mapping.
|
||||||
|
let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
|
||||||
|
let mut combined_inverse_memory_index =
|
||||||
|
IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
|
||||||
|
|
||||||
|
let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
|
||||||
|
let combined_offsets = variant_fields
|
||||||
|
.iter_enumerated()
|
||||||
|
.map(|(i, local)| {
|
||||||
|
let (offset, memory_index) = match assignments[*local] {
|
||||||
|
Unassigned => unreachable!(),
|
||||||
|
Assigned(_) => {
|
||||||
|
let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
|
||||||
|
(offset, promoted_memory_index.len() as u32 + memory_index)
|
||||||
|
}
|
||||||
|
Ineligible(field_idx) => {
|
||||||
|
let field_idx = field_idx.unwrap();
|
||||||
|
(promoted_offsets[field_idx], promoted_memory_index[field_idx])
|
||||||
|
}
|
||||||
|
};
|
||||||
|
combined_inverse_memory_index[memory_index] = i;
|
||||||
|
offset
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Remove the unused slots and invert the mapping to obtain the
|
||||||
|
// combined `memory_index` (also see previous comment).
|
||||||
|
combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
|
||||||
|
let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
|
||||||
|
|
||||||
|
variant.fields = FieldsShape::Arbitrary {
|
||||||
|
offsets: combined_offsets,
|
||||||
|
memory_index: combined_memory_index,
|
||||||
|
};
|
||||||
|
|
||||||
|
size = size.max(variant.size);
|
||||||
|
align = align.max(variant.align);
|
||||||
|
Ok(variant)
|
||||||
|
})
|
||||||
|
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
|
||||||
|
|
||||||
|
size = size.align_to(align.abi);
|
||||||
|
|
||||||
|
let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited());
|
||||||
|
let abi = BackendRepr::Memory { sized: true };
|
||||||
|
|
||||||
|
Ok(LayoutData {
|
||||||
|
variants: Variants::Multiple {
|
||||||
|
tag,
|
||||||
|
tag_encoding: TagEncoding::Direct,
|
||||||
|
tag_field: tag_index,
|
||||||
|
variants,
|
||||||
|
},
|
||||||
|
fields: outer_fields,
|
||||||
|
backend_repr: abi,
|
||||||
|
// Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
|
||||||
|
// self-referentiality), getting the discriminant can cause aliasing violations.
|
||||||
|
// `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
|
||||||
|
// would do the same for us here.
|
||||||
|
// See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>.
|
||||||
|
// FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`.
|
||||||
|
largest_niche: None,
|
||||||
|
uninhabited,
|
||||||
|
size,
|
||||||
|
align,
|
||||||
|
max_repr_align: None,
|
||||||
|
unadjusted_abi_align: align.abi,
|
||||||
|
randomization_seed: Default::default(),
|
||||||
|
})
|
||||||
|
}
|
148
compiler/rustc_abi/src/layout/simple.rs
Normal file
148
compiler/rustc_abi/src/layout/simple.rs
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
use std::num::NonZero;
|
||||||
|
|
||||||
|
use rustc_hashes::Hash64;
|
||||||
|
use rustc_index::{Idx, IndexVec};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
BackendRepr, FieldsShape, HasDataLayout, LayoutData, Niche, Primitive, Scalar, Size, Variants,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// "Simple" layout constructors that cannot fail.
|
||||||
|
impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||||
|
pub fn unit<C: HasDataLayout>(cx: &C, sized: bool) -> Self {
|
||||||
|
let dl = cx.data_layout();
|
||||||
|
LayoutData {
|
||||||
|
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||||
|
fields: FieldsShape::Arbitrary {
|
||||||
|
offsets: IndexVec::new(),
|
||||||
|
memory_index: IndexVec::new(),
|
||||||
|
},
|
||||||
|
backend_repr: BackendRepr::Memory { sized },
|
||||||
|
largest_niche: None,
|
||||||
|
uninhabited: false,
|
||||||
|
align: dl.i8_align,
|
||||||
|
size: Size::ZERO,
|
||||||
|
max_repr_align: None,
|
||||||
|
unadjusted_abi_align: dl.i8_align.abi,
|
||||||
|
randomization_seed: Hash64::new(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn never_type<C: HasDataLayout>(cx: &C) -> Self {
|
||||||
|
let dl = cx.data_layout();
|
||||||
|
// This is also used for uninhabited enums, so we use `Variants::Empty`.
|
||||||
|
LayoutData {
|
||||||
|
variants: Variants::Empty,
|
||||||
|
fields: FieldsShape::Primitive,
|
||||||
|
backend_repr: BackendRepr::Memory { sized: true },
|
||||||
|
largest_niche: None,
|
||||||
|
uninhabited: true,
|
||||||
|
align: dl.i8_align,
|
||||||
|
size: Size::ZERO,
|
||||||
|
max_repr_align: None,
|
||||||
|
unadjusted_abi_align: dl.i8_align.abi,
|
||||||
|
randomization_seed: Hash64::ZERO,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
|
||||||
|
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
|
||||||
|
let size = scalar.size(cx);
|
||||||
|
let align = scalar.align(cx);
|
||||||
|
|
||||||
|
let range = scalar.valid_range(cx);
|
||||||
|
|
||||||
|
// All primitive types for which we don't have subtype coercions should get a distinct seed,
|
||||||
|
// so that types wrapping them can use randomization to arrive at distinct layouts.
|
||||||
|
//
|
||||||
|
// Some type information is already lost at this point, so as an approximation we derive
|
||||||
|
// the seed from what remains. For example on 64-bit targets usize and u64 can no longer
|
||||||
|
// be distinguished.
|
||||||
|
let randomization_seed = size
|
||||||
|
.bytes()
|
||||||
|
.wrapping_add(
|
||||||
|
match scalar.primitive() {
|
||||||
|
Primitive::Int(_, true) => 1,
|
||||||
|
Primitive::Int(_, false) => 2,
|
||||||
|
Primitive::Float(_) => 3,
|
||||||
|
Primitive::Pointer(_) => 4,
|
||||||
|
} << 32,
|
||||||
|
)
|
||||||
|
// distinguishes references from pointers
|
||||||
|
.wrapping_add((range.start as u64).rotate_right(16))
|
||||||
|
// distinguishes char from u32 and bool from u8
|
||||||
|
.wrapping_add((range.end as u64).rotate_right(16));
|
||||||
|
|
||||||
|
LayoutData {
|
||||||
|
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||||
|
fields: FieldsShape::Primitive,
|
||||||
|
backend_repr: BackendRepr::Scalar(scalar),
|
||||||
|
largest_niche,
|
||||||
|
uninhabited: false,
|
||||||
|
size,
|
||||||
|
align,
|
||||||
|
max_repr_align: None,
|
||||||
|
unadjusted_abi_align: align.abi,
|
||||||
|
randomization_seed: Hash64::new(randomization_seed),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn scalar_pair<C: HasDataLayout>(cx: &C, a: Scalar, b: Scalar) -> Self {
|
||||||
|
let dl = cx.data_layout();
|
||||||
|
let b_align = b.align(dl);
|
||||||
|
let align = a.align(dl).max(b_align).max(dl.aggregate_align);
|
||||||
|
let b_offset = a.size(dl).align_to(b_align.abi);
|
||||||
|
let size = (b_offset + b.size(dl)).align_to(align.abi);
|
||||||
|
|
||||||
|
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
|
||||||
|
// returns the last maximum.
|
||||||
|
let largest_niche = Niche::from_scalar(dl, b_offset, b)
|
||||||
|
.into_iter()
|
||||||
|
.chain(Niche::from_scalar(dl, Size::ZERO, a))
|
||||||
|
.max_by_key(|niche| niche.available(dl));
|
||||||
|
|
||||||
|
let combined_seed = a.size(dl).bytes().wrapping_add(b.size(dl).bytes());
|
||||||
|
|
||||||
|
LayoutData {
|
||||||
|
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||||
|
fields: FieldsShape::Arbitrary {
|
||||||
|
offsets: [Size::ZERO, b_offset].into(),
|
||||||
|
memory_index: [0, 1].into(),
|
||||||
|
},
|
||||||
|
backend_repr: BackendRepr::ScalarPair(a, b),
|
||||||
|
largest_niche,
|
||||||
|
uninhabited: false,
|
||||||
|
align,
|
||||||
|
size,
|
||||||
|
max_repr_align: None,
|
||||||
|
unadjusted_abi_align: align.abi,
|
||||||
|
randomization_seed: Hash64::new(combined_seed),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a dummy layout for an uninhabited variant.
|
||||||
|
///
|
||||||
|
/// Uninhabited variants get pruned as part of the layout calculation,
|
||||||
|
/// so this can be used after the fact to reconstitute a layout.
|
||||||
|
pub fn uninhabited_variant<C: HasDataLayout>(cx: &C, index: VariantIdx, fields: usize) -> Self {
|
||||||
|
let dl = cx.data_layout();
|
||||||
|
LayoutData {
|
||||||
|
variants: Variants::Single { index },
|
||||||
|
fields: match NonZero::new(fields) {
|
||||||
|
Some(fields) => FieldsShape::Union(fields),
|
||||||
|
None => FieldsShape::Arbitrary {
|
||||||
|
offsets: IndexVec::new(),
|
||||||
|
memory_index: IndexVec::new(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
backend_repr: BackendRepr::Memory { sized: true },
|
||||||
|
largest_niche: None,
|
||||||
|
uninhabited: true,
|
||||||
|
align: dl.i8_align,
|
||||||
|
size: Size::ZERO,
|
||||||
|
max_repr_align: None,
|
||||||
|
unadjusted_abi_align: dl.i8_align.abi,
|
||||||
|
randomization_seed: Hash64::ZERO,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -150,6 +150,12 @@ impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a, Ty> AsRef<LayoutData<FieldIdx, VariantIdx>> for TyAndLayout<'a, Ty> {
|
||||||
|
fn as_ref(&self) -> &LayoutData<FieldIdx, VariantIdx> {
|
||||||
|
&*self.layout.0.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Trait that needs to be implemented by the higher-level type representation
|
/// Trait that needs to be implemented by the higher-level type representation
|
||||||
/// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
|
/// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
|
||||||
pub trait TyAbiInterface<'a, C>: Sized + std::fmt::Debug {
|
pub trait TyAbiInterface<'a, C>: Sized + std::fmt::Debug {
|
||||||
|
@ -5,7 +5,6 @@
|
|||||||
#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
|
#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
|
||||||
#![cfg_attr(feature = "nightly", feature(rustdoc_internals))]
|
#![cfg_attr(feature = "nightly", feature(rustdoc_internals))]
|
||||||
#![cfg_attr(feature = "nightly", feature(step_trait))]
|
#![cfg_attr(feature = "nightly", feature(step_trait))]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
/*! ABI handling for rustc
|
/*! ABI handling for rustc
|
||||||
@ -205,6 +204,13 @@ impl ReprOptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The maximum supported number of lanes in a SIMD vector.
|
||||||
|
///
|
||||||
|
/// This value is selected based on backend support:
|
||||||
|
/// * LLVM does not appear to have a vector width limit.
|
||||||
|
/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
|
||||||
|
pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
|
||||||
|
|
||||||
/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
|
/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
|
||||||
/// for a target, which contains everything needed to compute layouts.
|
/// for a target, which contains everything needed to compute layouts.
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
@ -1744,48 +1750,6 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
|||||||
pub fn is_uninhabited(&self) -> bool {
|
pub fn is_uninhabited(&self) -> bool {
|
||||||
self.uninhabited
|
self.uninhabited
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
|
|
||||||
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
|
|
||||||
let size = scalar.size(cx);
|
|
||||||
let align = scalar.align(cx);
|
|
||||||
|
|
||||||
let range = scalar.valid_range(cx);
|
|
||||||
|
|
||||||
// All primitive types for which we don't have subtype coercions should get a distinct seed,
|
|
||||||
// so that types wrapping them can use randomization to arrive at distinct layouts.
|
|
||||||
//
|
|
||||||
// Some type information is already lost at this point, so as an approximation we derive
|
|
||||||
// the seed from what remains. For example on 64-bit targets usize and u64 can no longer
|
|
||||||
// be distinguished.
|
|
||||||
let randomization_seed = size
|
|
||||||
.bytes()
|
|
||||||
.wrapping_add(
|
|
||||||
match scalar.primitive() {
|
|
||||||
Primitive::Int(_, true) => 1,
|
|
||||||
Primitive::Int(_, false) => 2,
|
|
||||||
Primitive::Float(_) => 3,
|
|
||||||
Primitive::Pointer(_) => 4,
|
|
||||||
} << 32,
|
|
||||||
)
|
|
||||||
// distinguishes references from pointers
|
|
||||||
.wrapping_add((range.start as u64).rotate_right(16))
|
|
||||||
// distinguishes char from u32 and bool from u8
|
|
||||||
.wrapping_add((range.end as u64).rotate_right(16));
|
|
||||||
|
|
||||||
LayoutData {
|
|
||||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
|
||||||
fields: FieldsShape::Primitive,
|
|
||||||
backend_repr: BackendRepr::Scalar(scalar),
|
|
||||||
largest_niche,
|
|
||||||
uninhabited: false,
|
|
||||||
size,
|
|
||||||
align,
|
|
||||||
max_repr_align: None,
|
|
||||||
unadjusted_abi_align: align.abi,
|
|
||||||
randomization_seed: Hash64::new(randomization_seed),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
|
impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
|
||||||
|
@ -7,3 +7,6 @@ edition = "2024"
|
|||||||
# tidy-alphabetical-start
|
# tidy-alphabetical-start
|
||||||
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -23,7 +23,6 @@
|
|||||||
#![feature(maybe_uninit_slice)]
|
#![feature(maybe_uninit_slice)]
|
||||||
#![feature(rustc_attrs)]
|
#![feature(rustc_attrs)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
use std::alloc::Layout;
|
use std::alloc::Layout;
|
||||||
@ -93,7 +92,7 @@ impl<T> ArenaChunk<T> {
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn end(&mut self) -> *mut T {
|
fn end(&mut self) -> *mut T {
|
||||||
unsafe {
|
unsafe {
|
||||||
if mem::size_of::<T>() == 0 {
|
if size_of::<T>() == 0 {
|
||||||
// A pointer as large as possible for zero-sized elements.
|
// A pointer as large as possible for zero-sized elements.
|
||||||
ptr::without_provenance_mut(!0)
|
ptr::without_provenance_mut(!0)
|
||||||
} else {
|
} else {
|
||||||
@ -151,7 +150,7 @@ impl<T> TypedArena<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
if mem::size_of::<T>() == 0 {
|
if size_of::<T>() == 0 {
|
||||||
self.ptr.set(self.ptr.get().wrapping_byte_add(1));
|
self.ptr.set(self.ptr.get().wrapping_byte_add(1));
|
||||||
let ptr = ptr::NonNull::<T>::dangling().as_ptr();
|
let ptr = ptr::NonNull::<T>::dangling().as_ptr();
|
||||||
// Don't drop the object. This `write` is equivalent to `forget`.
|
// Don't drop the object. This `write` is equivalent to `forget`.
|
||||||
@ -173,13 +172,13 @@ impl<T> TypedArena<T> {
|
|||||||
// FIXME: this should *likely* use `offset_from`, but more
|
// FIXME: this should *likely* use `offset_from`, but more
|
||||||
// investigation is needed (including running tests in miri).
|
// investigation is needed (including running tests in miri).
|
||||||
let available_bytes = self.end.get().addr() - self.ptr.get().addr();
|
let available_bytes = self.end.get().addr() - self.ptr.get().addr();
|
||||||
let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap();
|
let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
|
||||||
available_bytes >= additional_bytes
|
available_bytes >= additional_bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn alloc_raw_slice(&self, len: usize) -> *mut T {
|
fn alloc_raw_slice(&self, len: usize) -> *mut T {
|
||||||
assert!(mem::size_of::<T>() != 0);
|
assert!(size_of::<T>() != 0);
|
||||||
assert!(len != 0);
|
assert!(len != 0);
|
||||||
|
|
||||||
// Ensure the current chunk can fit `len` objects.
|
// Ensure the current chunk can fit `len` objects.
|
||||||
@ -213,7 +212,7 @@ impl<T> TypedArena<T> {
|
|||||||
// So we collect all the elements beforehand, which takes care of reentrancy and panic
|
// So we collect all the elements beforehand, which takes care of reentrancy and panic
|
||||||
// safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
|
// safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
|
||||||
// doesn't need to be hyper-optimized.
|
// doesn't need to be hyper-optimized.
|
||||||
assert!(mem::size_of::<T>() != 0);
|
assert!(size_of::<T>() != 0);
|
||||||
|
|
||||||
let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
|
let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
|
||||||
if vec.is_empty() {
|
if vec.is_empty() {
|
||||||
@ -236,7 +235,7 @@ impl<T> TypedArena<T> {
|
|||||||
unsafe {
|
unsafe {
|
||||||
// We need the element size to convert chunk sizes (ranging from
|
// We need the element size to convert chunk sizes (ranging from
|
||||||
// PAGE to HUGE_PAGE bytes) to element counts.
|
// PAGE to HUGE_PAGE bytes) to element counts.
|
||||||
let elem_size = cmp::max(1, mem::size_of::<T>());
|
let elem_size = cmp::max(1, size_of::<T>());
|
||||||
let mut chunks = self.chunks.borrow_mut();
|
let mut chunks = self.chunks.borrow_mut();
|
||||||
let mut new_cap;
|
let mut new_cap;
|
||||||
if let Some(last_chunk) = chunks.last_mut() {
|
if let Some(last_chunk) = chunks.last_mut() {
|
||||||
@ -246,7 +245,7 @@ impl<T> TypedArena<T> {
|
|||||||
// FIXME: this should *likely* use `offset_from`, but more
|
// FIXME: this should *likely* use `offset_from`, but more
|
||||||
// investigation is needed (including running tests in miri).
|
// investigation is needed (including running tests in miri).
|
||||||
let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
|
let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
|
||||||
last_chunk.entries = used_bytes / mem::size_of::<T>();
|
last_chunk.entries = used_bytes / size_of::<T>();
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the previous chunk's len is less than HUGE_PAGE
|
// If the previous chunk's len is less than HUGE_PAGE
|
||||||
@ -276,7 +275,7 @@ impl<T> TypedArena<T> {
|
|||||||
let end = self.ptr.get().addr();
|
let end = self.ptr.get().addr();
|
||||||
// We then calculate the number of elements to be dropped in the last chunk,
|
// We then calculate the number of elements to be dropped in the last chunk,
|
||||||
// which is the filled area's length.
|
// which is the filled area's length.
|
||||||
let diff = if mem::size_of::<T>() == 0 {
|
let diff = if size_of::<T>() == 0 {
|
||||||
// `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
|
// `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
|
||||||
// the number of zero-sized values in the last and only chunk, just out of caution.
|
// the number of zero-sized values in the last and only chunk, just out of caution.
|
||||||
// Recall that `end` was incremented for each allocated value.
|
// Recall that `end` was incremented for each allocated value.
|
||||||
@ -284,7 +283,7 @@ impl<T> TypedArena<T> {
|
|||||||
} else {
|
} else {
|
||||||
// FIXME: this should *likely* use `offset_from`, but more
|
// FIXME: this should *likely* use `offset_from`, but more
|
||||||
// investigation is needed (including running tests in miri).
|
// investigation is needed (including running tests in miri).
|
||||||
(end - start) / mem::size_of::<T>()
|
(end - start) / size_of::<T>()
|
||||||
};
|
};
|
||||||
// Pass that to the `destroy` method.
|
// Pass that to the `destroy` method.
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -329,7 +328,7 @@ fn align_up(val: usize, align: usize) -> usize {
|
|||||||
|
|
||||||
// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
|
// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
|
||||||
// to optimize away alignment code.
|
// to optimize away alignment code.
|
||||||
const DROPLESS_ALIGNMENT: usize = mem::align_of::<usize>();
|
const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
|
||||||
|
|
||||||
/// An arena that can hold objects of multiple different types that impl `Copy`
|
/// An arena that can hold objects of multiple different types that impl `Copy`
|
||||||
/// and/or satisfy `!mem::needs_drop`.
|
/// and/or satisfy `!mem::needs_drop`.
|
||||||
@ -447,7 +446,7 @@ impl DroplessArena {
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn alloc<T>(&self, object: T) -> &mut T {
|
pub fn alloc<T>(&self, object: T) -> &mut T {
|
||||||
assert!(!mem::needs_drop::<T>());
|
assert!(!mem::needs_drop::<T>());
|
||||||
assert!(mem::size_of::<T>() != 0);
|
assert!(size_of::<T>() != 0);
|
||||||
|
|
||||||
let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
|
let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
|
||||||
|
|
||||||
@ -471,7 +470,7 @@ impl DroplessArena {
|
|||||||
T: Copy,
|
T: Copy,
|
||||||
{
|
{
|
||||||
assert!(!mem::needs_drop::<T>());
|
assert!(!mem::needs_drop::<T>());
|
||||||
assert!(mem::size_of::<T>() != 0);
|
assert!(size_of::<T>() != 0);
|
||||||
assert!(!slice.is_empty());
|
assert!(!slice.is_empty());
|
||||||
|
|
||||||
let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
|
let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
|
||||||
@ -546,7 +545,7 @@ impl DroplessArena {
|
|||||||
// Warning: this function is reentrant: `iter` could hold a reference to `&self` and
|
// Warning: this function is reentrant: `iter` could hold a reference to `&self` and
|
||||||
// allocate additional elements while we're iterating.
|
// allocate additional elements while we're iterating.
|
||||||
let iter = iter.into_iter();
|
let iter = iter.into_iter();
|
||||||
assert!(mem::size_of::<T>() != 0);
|
assert!(size_of::<T>() != 0);
|
||||||
assert!(!mem::needs_drop::<T>());
|
assert!(!mem::needs_drop::<T>());
|
||||||
|
|
||||||
let size_hint = iter.size_hint();
|
let size_hint = iter.size_hint();
|
||||||
|
@ -18,3 +18,6 @@ smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
|||||||
thin-vec = "0.2.12"
|
thin-vec = "0.2.12"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
#![feature(never_type)]
|
#![feature(never_type)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![feature(stmt_expr_attributes)]
|
#![feature(stmt_expr_attributes)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
pub mod util {
|
pub mod util {
|
||||||
|
@ -19,3 +19,6 @@ nightly = [
|
|||||||
"dep:rustc_macros",
|
"dep:rustc_macros",
|
||||||
"dep:rustc_span",
|
"dep:rustc_span",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
#![cfg_attr(feature = "nightly", allow(internal_features))]
|
#![cfg_attr(feature = "nightly", allow(internal_features))]
|
||||||
#![cfg_attr(feature = "nightly", feature(never_type))]
|
#![cfg_attr(feature = "nightly", feature(never_type))]
|
||||||
#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
|
#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
#[cfg(feature = "nightly")]
|
#[cfg(feature = "nightly")]
|
||||||
|
@ -28,3 +28,6 @@ smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
|||||||
thin-vec = "0.2.12"
|
thin-vec = "0.2.12"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -38,7 +38,6 @@
|
|||||||
#![feature(if_let_guard)]
|
#![feature(if_let_guard)]
|
||||||
#![feature(let_chains)]
|
#![feature(let_chains)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -20,3 +20,6 @@ rustc_session = { path = "../rustc_session" }
|
|||||||
rustc_span = { path = "../rustc_span" }
|
rustc_span = { path = "../rustc_span" }
|
||||||
thin-vec = "0.2.12"
|
thin-vec = "0.2.12"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
#![feature(iter_is_partitioned)]
|
#![feature(iter_is_partitioned)]
|
||||||
#![feature(let_chains)]
|
#![feature(let_chains)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
pub mod ast_validation;
|
pub mod ast_validation;
|
||||||
|
@ -12,3 +12,6 @@ rustc_lexer = { path = "../rustc_lexer" }
|
|||||||
rustc_span = { path = "../rustc_span" }
|
rustc_span = { path = "../rustc_span" }
|
||||||
thin-vec = "0.2.12"
|
thin-vec = "0.2.12"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
#![doc(rust_logo)]
|
#![doc(rust_logo)]
|
||||||
#![feature(box_patterns)]
|
#![feature(box_patterns)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
mod helpers;
|
mod helpers;
|
||||||
|
@ -14,3 +14,6 @@ rustc_serialize = {path = "../rustc_serialize"}
|
|||||||
rustc_span = {path = "../rustc_span"}
|
rustc_span = {path = "../rustc_span"}
|
||||||
thin-vec = "0.2.12"
|
thin-vec = "0.2.12"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
#![doc(rust_logo)]
|
#![doc(rust_logo)]
|
||||||
#![feature(let_chains)]
|
#![feature(let_chains)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
mod attributes;
|
mod attributes;
|
||||||
@ -149,3 +148,47 @@ print_tup!(A B C D E F G H);
|
|||||||
print_skip!(Span, ());
|
print_skip!(Span, ());
|
||||||
print_disp!(Symbol, u16, bool, NonZero<u32>);
|
print_disp!(Symbol, u16, bool, NonZero<u32>);
|
||||||
print_debug!(UintTy, IntTy, Align, AttrStyle, CommentKind, Transparency);
|
print_debug!(UintTy, IntTy, Align, AttrStyle, CommentKind, Transparency);
|
||||||
|
|
||||||
|
/// Finds attributes in sequences of attributes by pattern matching.
|
||||||
|
///
|
||||||
|
/// A little like `matches` but for attributes.
|
||||||
|
///
|
||||||
|
/// ```rust,ignore (illustrative)
|
||||||
|
/// // finds the repr attribute
|
||||||
|
/// if let Some(r) = find_attr!(attrs, AttributeKind::Repr(r) => r) {
|
||||||
|
///
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// // checks if one has matched
|
||||||
|
/// if find_attr!(attrs, AttributeKind::Repr(_)) {
|
||||||
|
///
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Often this requires you to first end up with a list of attributes.
|
||||||
|
/// A common way to get those is through `tcx.get_all_attrs(did)`
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! find_attr {
|
||||||
|
($attributes_list: expr, $pattern: pat $(if $guard: expr)?) => {{
|
||||||
|
$crate::find_attr!($attributes_list, $pattern $(if $guard)? => ()).is_some()
|
||||||
|
}};
|
||||||
|
|
||||||
|
($attributes_list: expr, $pattern: pat $(if $guard: expr)? => $e: expr) => {{
|
||||||
|
fn check_attribute_iterator<'a>(_: &'_ impl IntoIterator<Item = &'a rustc_hir::Attribute>) {}
|
||||||
|
check_attribute_iterator(&$attributes_list);
|
||||||
|
|
||||||
|
let find_attribute = |iter| {
|
||||||
|
for i in $attributes_list {
|
||||||
|
match i {
|
||||||
|
rustc_hir::Attribute::Parsed($pattern) $(if $guard)? => {
|
||||||
|
return Some($e);
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
};
|
||||||
|
find_attribute($attributes_list)
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
@ -16,8 +16,12 @@ rustc_fluent_macro = { path = "../rustc_fluent_macro" }
|
|||||||
rustc_hir = { path = "../rustc_hir" }
|
rustc_hir = { path = "../rustc_hir" }
|
||||||
rustc_lexer = { path = "../rustc_lexer" }
|
rustc_lexer = { path = "../rustc_lexer" }
|
||||||
rustc_macros = { path = "../rustc_macros" }
|
rustc_macros = { path = "../rustc_macros" }
|
||||||
|
rustc_middle = { path = "../rustc_middle" }
|
||||||
rustc_serialize = { path = "../rustc_serialize" }
|
rustc_serialize = { path = "../rustc_serialize" }
|
||||||
rustc_session = { path = "../rustc_session" }
|
rustc_session = { path = "../rustc_session" }
|
||||||
rustc_span = { path = "../rustc_span" }
|
rustc_span = { path = "../rustc_span" }
|
||||||
thin-vec = "0.2.12"
|
thin-vec = "0.2.12"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -80,7 +80,6 @@
|
|||||||
#![doc(rust_logo)]
|
#![doc(rust_logo)]
|
||||||
#![feature(let_chains)]
|
#![feature(let_chains)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
@ -95,47 +94,3 @@ pub use context::{AttributeParser, OmitDoc};
|
|||||||
pub use rustc_attr_data_structures::*;
|
pub use rustc_attr_data_structures::*;
|
||||||
|
|
||||||
rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
|
rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
|
||||||
|
|
||||||
/// Finds attributes in sequences of attributes by pattern matching.
|
|
||||||
///
|
|
||||||
/// A little like `matches` but for attributes.
|
|
||||||
///
|
|
||||||
/// ```rust,ignore (illustrative)
|
|
||||||
/// // finds the repr attribute
|
|
||||||
/// if let Some(r) = find_attr!(attrs, AttributeKind::Repr(r) => r) {
|
|
||||||
///
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// // checks if one has matched
|
|
||||||
/// if find_attr!(attrs, AttributeKind::Repr(_)) {
|
|
||||||
///
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// Often this requires you to first end up with a list of attributes.
|
|
||||||
/// A common way to get those is through `tcx.get_all_attrs(did)`
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! find_attr {
|
|
||||||
($attributes_list: expr, $pattern: pat $(if $guard: expr)?) => {{
|
|
||||||
$crate::find_attr!($attributes_list, $pattern $(if $guard)? => ()).is_some()
|
|
||||||
}};
|
|
||||||
|
|
||||||
($attributes_list: expr, $pattern: pat $(if $guard: expr)? => $e: expr) => {{
|
|
||||||
fn check_attribute_iterator<'a>(_: &'_ impl IntoIterator<Item = &'a rustc_hir::Attribute>) {}
|
|
||||||
check_attribute_iterator(&$attributes_list);
|
|
||||||
|
|
||||||
let find_attribute = |iter| {
|
|
||||||
for i in $attributes_list {
|
|
||||||
match i {
|
|
||||||
rustc_hir::Attribute::Parsed($pattern) $(if $guard)? => {
|
|
||||||
return Some($e);
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
};
|
|
||||||
find_attribute($attributes_list)
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
@ -11,3 +11,6 @@ icu_locid_transform = "1.3.2"
|
|||||||
icu_provider = { version = "1.2", features = ["sync"] }
|
icu_provider = { version = "1.2", features = ["sync"] }
|
||||||
zerovec = "0.10.0"
|
zerovec = "0.10.0"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -23,9 +23,9 @@
|
|||||||
// tidy-alphabetical-start
|
// tidy-alphabetical-start
|
||||||
#![allow(elided_lifetimes_in_paths)]
|
#![allow(elided_lifetimes_in_paths)]
|
||||||
#![allow(internal_features)]
|
#![allow(internal_features)]
|
||||||
|
#![allow(unreachable_pub)] // because this crate is mostly generated code
|
||||||
#![doc(rust_logo)]
|
#![doc(rust_logo)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
// #![warn(unreachable_pub)] // don't use because this crate is mostly generated code
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
mod data {
|
mod data {
|
||||||
|
@ -27,3 +27,6 @@ rustc_traits = { path = "../rustc_traits" }
|
|||||||
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![feature(stmt_expr_attributes)]
|
#![feature(stmt_expr_attributes)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
|
@ -3,10 +3,6 @@ name = "rustc_builtin_macros"
|
|||||||
version = "0.0.0"
|
version = "0.0.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
|
|
||||||
|
|
||||||
[lints.rust]
|
|
||||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(llvm_enzyme)'] }
|
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
doctest = false
|
doctest = false
|
||||||
|
|
||||||
@ -34,3 +30,6 @@ smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
|||||||
thin-vec = "0.2.12"
|
thin-vec = "0.2.12"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![feature(string_from_utf8_lossy_owned)]
|
#![feature(string_from_utf8_lossy_owned)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
extern crate proc_macro;
|
extern crate proc_macro;
|
||||||
|
@ -2439,9 +2439,5 @@ fn get_maybe_pointer_size(value: RValue<'_>) -> u32 {
|
|||||||
#[cfg(not(feature = "master"))]
|
#[cfg(not(feature = "master"))]
|
||||||
fn get_maybe_pointer_size(value: RValue<'_>) -> u32 {
|
fn get_maybe_pointer_size(value: RValue<'_>) -> u32 {
|
||||||
let type_ = value.get_type();
|
let type_ = value.get_type();
|
||||||
if type_.get_pointee().is_some() {
|
if type_.get_pointee().is_some() { size_of::<*const ()>() as _ } else { type_.get_size() }
|
||||||
std::mem::size_of::<*const ()>() as _
|
|
||||||
} else {
|
|
||||||
type_.get_size()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -43,3 +43,6 @@ serde_json = "1"
|
|||||||
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![feature(slice_as_array)]
|
#![feature(slice_as_array)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
@ -63,3 +63,6 @@ features = ["read_core", "elf", "macho", "pe", "xcoff", "unaligned", "archive",
|
|||||||
[target.'cfg(windows)'.dependencies.windows]
|
[target.'cfg(windows)'.dependencies.windows]
|
||||||
version = "0.59.0"
|
version = "0.59.0"
|
||||||
features = ["Win32_Globalization"]
|
features = ["Win32_Globalization"]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -1177,7 +1177,7 @@ mod win {
|
|||||||
let mut cp: u32 = 0;
|
let mut cp: u32 = 0;
|
||||||
// We're using the `LOCALE_RETURN_NUMBER` flag to return a u32.
|
// We're using the `LOCALE_RETURN_NUMBER` flag to return a u32.
|
||||||
// But the API requires us to pass the data as though it's a [u16] string.
|
// But the API requires us to pass the data as though it's a [u16] string.
|
||||||
let len = std::mem::size_of::<u32>() / std::mem::size_of::<u16>();
|
let len = size_of::<u32>() / size_of::<u16>();
|
||||||
let data = std::slice::from_raw_parts_mut(&mut cp as *mut u32 as *mut u16, len);
|
let data = std::slice::from_raw_parts_mut(&mut cp as *mut u32 as *mut u16, len);
|
||||||
let len_written = GetLocaleInfoEx(
|
let len_written = GetLocaleInfoEx(
|
||||||
LOCALE_NAME_SYSTEM_DEFAULT,
|
LOCALE_NAME_SYSTEM_DEFAULT,
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![feature(trait_alias)]
|
#![feature(trait_alias)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
//! This crate contains codegen code that is used by all codegen backends (LLVM and others).
|
//! This crate contains codegen code that is used by all codegen backends (LLVM and others).
|
||||||
|
@ -26,3 +26,6 @@ rustc_trait_selection = { path = "../rustc_trait_selection" }
|
|||||||
rustc_type_ir = { path = "../rustc_type_ir" }
|
rustc_type_ir = { path = "../rustc_type_ir" }
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#![feature(unqualified_local_imports)]
|
#![feature(unqualified_local_imports)]
|
||||||
#![feature(yeet_expr)]
|
#![feature(yeet_expr)]
|
||||||
#![warn(unqualified_local_imports)]
|
#![warn(unqualified_local_imports)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
pub mod check_consts;
|
pub mod check_consts;
|
||||||
|
@ -54,3 +54,6 @@ memmap2 = "0.2.1"
|
|||||||
|
|
||||||
[target.'cfg(not(target_has_atomic = "64"))'.dependencies]
|
[target.'cfg(not(target_has_atomic = "64"))'.dependencies]
|
||||||
portable-atomic = "1.5.1"
|
portable-atomic = "1.5.1"
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -2,10 +2,8 @@ use std::ptr::Alignment;
|
|||||||
|
|
||||||
/// Returns the ABI-required minimum alignment of a type in bytes.
|
/// Returns the ABI-required minimum alignment of a type in bytes.
|
||||||
///
|
///
|
||||||
/// This is equivalent to [`mem::align_of`], but also works for some unsized
|
/// This is equivalent to [`align_of`], but also works for some unsized
|
||||||
/// types (e.g. slices or rustc's `List`s).
|
/// types (e.g. slices or rustc's `List`s).
|
||||||
///
|
|
||||||
/// [`mem::align_of`]: std::mem::align_of
|
|
||||||
pub const fn align_of<T: ?Sized + Aligned>() -> Alignment {
|
pub const fn align_of<T: ?Sized + Aligned>() -> Alignment {
|
||||||
T::ALIGN
|
T::ALIGN
|
||||||
}
|
}
|
||||||
@ -15,10 +13,10 @@ pub const fn align_of<T: ?Sized + Aligned>() -> Alignment {
|
|||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// `Self::ALIGN` must be equal to the alignment of `Self`. For sized types it
|
/// `Self::ALIGN` must be equal to the alignment of `Self`. For sized types it
|
||||||
/// is [`mem::align_of<Self>()`], for unsized types it depends on the type, for
|
/// is [`align_of::<Self>()`], for unsized types it depends on the type, for
|
||||||
/// example `[T]` has alignment of `T`.
|
/// example `[T]` has alignment of `T`.
|
||||||
///
|
///
|
||||||
/// [`mem::align_of<Self>()`]: std::mem::align_of
|
/// [`align_of::<Self>()`]: align_of
|
||||||
pub unsafe trait Aligned {
|
pub unsafe trait Aligned {
|
||||||
/// Alignment of `Self`.
|
/// Alignment of `Self`.
|
||||||
const ALIGN: Alignment;
|
const ALIGN: Alignment;
|
||||||
|
@ -3,7 +3,7 @@ use std::cmp::max;
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::fx::FxHashMap;
|
use crate::fx::FxHashMap;
|
||||||
|
|
||||||
pub struct TestGraph {
|
pub(super) struct TestGraph {
|
||||||
num_nodes: usize,
|
num_nodes: usize,
|
||||||
start_node: usize,
|
start_node: usize,
|
||||||
successors: FxHashMap<usize, Vec<usize>>,
|
successors: FxHashMap<usize, Vec<usize>>,
|
||||||
@ -11,7 +11,7 @@ pub struct TestGraph {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl TestGraph {
|
impl TestGraph {
|
||||||
pub fn new(start_node: usize, edges: &[(usize, usize)]) -> Self {
|
pub(super) fn new(start_node: usize, edges: &[(usize, usize)]) -> Self {
|
||||||
let mut graph = TestGraph {
|
let mut graph = TestGraph {
|
||||||
num_nodes: start_node + 1,
|
num_nodes: start_node + 1,
|
||||||
start_node,
|
start_node,
|
||||||
|
@ -313,7 +313,7 @@ pub struct Error<O, E> {
|
|||||||
|
|
||||||
mod helper {
|
mod helper {
|
||||||
use super::*;
|
use super::*;
|
||||||
pub type ObligationTreeIdGenerator = impl Iterator<Item = ObligationTreeId>;
|
pub(super) type ObligationTreeIdGenerator = impl Iterator<Item = ObligationTreeId>;
|
||||||
impl<O: ForestObligation> ObligationForest<O> {
|
impl<O: ForestObligation> ObligationForest<O> {
|
||||||
pub fn new() -> ObligationForest<O> {
|
pub fn new() -> ObligationForest<O> {
|
||||||
ObligationForest {
|
ObligationForest {
|
||||||
|
@ -863,15 +863,13 @@ fn get_thread_id() -> u32 {
|
|||||||
cfg_match! {
|
cfg_match! {
|
||||||
windows => {
|
windows => {
|
||||||
pub fn get_resident_set_size() -> Option<usize> {
|
pub fn get_resident_set_size() -> Option<usize> {
|
||||||
use std::mem;
|
|
||||||
|
|
||||||
use windows::{
|
use windows::{
|
||||||
Win32::System::ProcessStatus::{K32GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS},
|
Win32::System::ProcessStatus::{K32GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS},
|
||||||
Win32::System::Threading::GetCurrentProcess,
|
Win32::System::Threading::GetCurrentProcess,
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut pmc = PROCESS_MEMORY_COUNTERS::default();
|
let mut pmc = PROCESS_MEMORY_COUNTERS::default();
|
||||||
let pmc_size = mem::size_of_val(&pmc);
|
let pmc_size = size_of_val(&pmc);
|
||||||
unsafe {
|
unsafe {
|
||||||
K32GetProcessMemoryInfo(
|
K32GetProcessMemoryInfo(
|
||||||
GetCurrentProcess(),
|
GetCurrentProcess(),
|
||||||
@ -889,7 +887,7 @@ cfg_match! {
|
|||||||
pub fn get_resident_set_size() -> Option<usize> {
|
pub fn get_resident_set_size() -> Option<usize> {
|
||||||
use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO};
|
use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
const PROC_TASKINFO_SIZE: c_int = mem::size_of::<proc_taskinfo>() as c_int;
|
const PROC_TASKINFO_SIZE: c_int = size_of::<proc_taskinfo>() as c_int;
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut info: proc_taskinfo = mem::zeroed();
|
let mut info: proc_taskinfo = mem::zeroed();
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::collections::hash_map::RawEntryMut;
|
use std::collections::hash_map::RawEntryMut;
|
||||||
use std::hash::{Hash, Hasher};
|
use std::hash::{Hash, Hasher};
|
||||||
use std::{iter, mem};
|
use std::iter;
|
||||||
|
|
||||||
use either::Either;
|
use either::Either;
|
||||||
|
|
||||||
@ -221,7 +221,7 @@ pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
|
|||||||
/// consistently for each `Sharded` instance.
|
/// consistently for each `Sharded` instance.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_shard_hash(hash: u64) -> usize {
|
fn get_shard_hash(hash: u64) -> usize {
|
||||||
let hash_len = mem::size_of::<usize>();
|
let hash_len = size_of::<usize>();
|
||||||
// Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
|
// Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
|
||||||
// hashbrown also uses the lowest bits, so we can't use those
|
// hashbrown also uses the lowest bits, so we can't use those
|
||||||
(hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize
|
(hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize
|
||||||
|
@ -88,7 +88,7 @@ mod mode {
|
|||||||
|
|
||||||
// Whether thread safety might be enabled.
|
// Whether thread safety might be enabled.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn might_be_dyn_thread_safe() -> bool {
|
pub(super) fn might_be_dyn_thread_safe() -> bool {
|
||||||
DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE
|
DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R {
|
|||||||
ret
|
ret
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
|
fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
|
||||||
where
|
where
|
||||||
A: FnOnce() -> RA,
|
A: FnOnce() -> RA,
|
||||||
B: FnOnce() -> RB,
|
B: FnOnce() -> RB,
|
||||||
|
@ -7,7 +7,7 @@ use crate::stable_hasher::{HashStable, StableHasher};
|
|||||||
|
|
||||||
/// A tag type used in [`TaggedRef`] tests.
|
/// A tag type used in [`TaggedRef`] tests.
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||||
pub enum Tag2 {
|
enum Tag2 {
|
||||||
B00 = 0b00,
|
B00 = 0b00,
|
||||||
B01 = 0b01,
|
B01 = 0b01,
|
||||||
B10 = 0b10,
|
B10 = 0b10,
|
||||||
|
@ -10,3 +10,6 @@ crate-type = ["dylib"]
|
|||||||
# tidy-alphabetical-start
|
# tidy-alphabetical-start
|
||||||
rustc_driver_impl = { path = "../rustc_driver_impl" }
|
rustc_driver_impl = { path = "../rustc_driver_impl" }
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -79,3 +79,6 @@ rustc_randomized_layouts = [
|
|||||||
'rustc_middle/rustc_randomized_layouts'
|
'rustc_middle/rustc_randomized_layouts'
|
||||||
]
|
]
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#![feature(result_flattening)]
|
#![feature(result_flattening)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
use std::cmp::max;
|
use std::cmp::max;
|
||||||
|
@ -6,3 +6,6 @@ edition = "2024"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
# tidy-alphabetical-start
|
# tidy-alphabetical-start
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#![deny(rustdoc::invalid_codeblock_attributes)]
|
#![deny(rustdoc::invalid_codeblock_attributes)]
|
||||||
#![doc(rust_logo)]
|
#![doc(rust_logo)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
// This higher-order macro defines the error codes that are in use. It is used
|
// This higher-order macro defines the error codes that are in use. It is used
|
||||||
|
@ -19,3 +19,6 @@ rustc_span = { path = "../rustc_span" }
|
|||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
unic-langid = { version = "0.9.0", features = ["macros"] }
|
unic-langid = { version = "0.9.0", features = ["macros"] }
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
#![feature(rustc_attrs)]
|
#![feature(rustc_attrs)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![feature(type_alias_impl_trait)]
|
#![feature(type_alias_impl_trait)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
|
@ -39,3 +39,6 @@ features = [
|
|||||||
"Win32_Security",
|
"Win32_Security",
|
||||||
"Win32_System_Threading",
|
"Win32_System_Threading",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -490,7 +490,7 @@ pub struct Diag<'a, G: EmissionGuarantee = ErrorGuaranteed> {
|
|||||||
// would be bad.
|
// would be bad.
|
||||||
impl<G> !Clone for Diag<'_, G> {}
|
impl<G> !Clone for Diag<'_, G> {}
|
||||||
|
|
||||||
rustc_data_structures::static_assert_size!(Diag<'_, ()>, 3 * std::mem::size_of::<usize>());
|
rustc_data_structures::static_assert_size!(Diag<'_, ()>, 3 * size_of::<usize>());
|
||||||
|
|
||||||
impl<G: EmissionGuarantee> Deref for Diag<'_, G> {
|
impl<G: EmissionGuarantee> Deref for Diag<'_, G> {
|
||||||
type Target = DiagInner;
|
type Target = DiagInner;
|
||||||
|
@ -25,7 +25,6 @@
|
|||||||
#![feature(trait_alias)]
|
#![feature(trait_alias)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![feature(yeet_expr)]
|
#![feature(yeet_expr)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
extern crate self as rustc_errors;
|
extern crate self as rustc_errors;
|
||||||
|
@ -29,3 +29,6 @@ smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
|||||||
thin-vec = "0.2.12"
|
thin-vec = "0.2.12"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![feature(yeet_expr)]
|
#![feature(yeet_expr)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
extern crate proc_macro as pm;
|
extern crate proc_macro as pm;
|
||||||
|
@ -10,3 +10,6 @@ rustc_span = { path = "../rustc_span" }
|
|||||||
serde = { version = "1.0.125", features = [ "derive" ] }
|
serde = { version = "1.0.125", features = [ "derive" ] }
|
||||||
serde_json = "1.0.59"
|
serde_json = "1.0.59"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
#![allow(internal_features)]
|
#![allow(internal_features)]
|
||||||
#![doc(rust_logo)]
|
#![doc(rust_logo)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
mod accepted;
|
mod accepted;
|
||||||
|
@ -16,3 +16,6 @@ quote = "1"
|
|||||||
syn = { version = "2", features = ["full"] }
|
syn = { version = "2", features = ["full"] }
|
||||||
unic-langid = { version = "0.9.0", features = ["macros"] }
|
unic-langid = { version = "0.9.0", features = ["macros"] }
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
#![feature(proc_macro_span)]
|
#![feature(proc_macro_span)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![feature(track_path)]
|
#![feature(track_path)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
use proc_macro::TokenStream;
|
use proc_macro::TokenStream;
|
||||||
|
@ -6,3 +6,6 @@ edition = "2024"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
# tidy-alphabetical-start
|
# tidy-alphabetical-start
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -6,3 +6,6 @@ edition = "2024"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
# tidy-alphabetical-start
|
# tidy-alphabetical-start
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -277,7 +277,6 @@
|
|||||||
)]
|
)]
|
||||||
#![doc(rust_logo)]
|
#![doc(rust_logo)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
|
@ -7,3 +7,6 @@ edition = "2024"
|
|||||||
# tidy-alphabetical-start
|
# tidy-alphabetical-start
|
||||||
rustc-stable-hash = { version = "0.1.0" }
|
rustc-stable-hash = { version = "0.1.0" }
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -21,3 +21,6 @@ smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
|||||||
thin-vec = "0.2.12"
|
thin-vec = "0.2.12"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -435,7 +435,7 @@ pub enum Res<Id = hir::HirId> {
|
|||||||
/// mention any generic parameters to allow the following with `min_const_generics`:
|
/// mention any generic parameters to allow the following with `min_const_generics`:
|
||||||
/// ```
|
/// ```
|
||||||
/// # struct Foo;
|
/// # struct Foo;
|
||||||
/// impl Foo { fn test() -> [u8; std::mem::size_of::<Self>()] { todo!() } }
|
/// impl Foo { fn test() -> [u8; size_of::<Self>()] { todo!() } }
|
||||||
///
|
///
|
||||||
/// struct Bar([u8; baz::<Self>()]);
|
/// struct Bar([u8; baz::<Self>()]);
|
||||||
/// const fn baz<T>() -> usize { 10 }
|
/// const fn baz<T>() -> usize { 10 }
|
||||||
@ -445,7 +445,7 @@ pub enum Res<Id = hir::HirId> {
|
|||||||
/// compat lint:
|
/// compat lint:
|
||||||
/// ```
|
/// ```
|
||||||
/// fn foo<T>() {
|
/// fn foo<T>() {
|
||||||
/// let _bar = [1_u8; std::mem::size_of::<*mut T>()];
|
/// let _bar = [1_u8; size_of::<*mut T>()];
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
// FIXME(generic_const_exprs): Remove this bodge once that feature is stable.
|
// FIXME(generic_const_exprs): Remove this bodge once that feature is stable.
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
#![feature(never_type)]
|
#![feature(never_type)]
|
||||||
#![feature(rustc_attrs)]
|
#![feature(rustc_attrs)]
|
||||||
#![feature(variant_count)]
|
#![feature(variant_count)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
extern crate self as rustc_hir;
|
extern crate self as rustc_hir;
|
||||||
|
@ -32,3 +32,6 @@ rustc_type_ir = { path = "../rustc_type_ir" }
|
|||||||
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -73,7 +73,6 @@ This API is completely unstable and subject to change.
|
|||||||
#![feature(slice_partition_dedup)]
|
#![feature(slice_partition_dedup)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![feature(unwrap_infallible)]
|
#![feature(unwrap_infallible)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
// These are used by Clippy.
|
// These are used by Clippy.
|
||||||
|
@ -8,7 +8,10 @@ edition = "2024"
|
|||||||
rustc_abi = { path = "../rustc_abi" }
|
rustc_abi = { path = "../rustc_abi" }
|
||||||
rustc_ast = { path = "../rustc_ast" }
|
rustc_ast = { path = "../rustc_ast" }
|
||||||
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
|
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
|
||||||
rustc_attr_parsing = { path = "../rustc_attr_parsing" }
|
rustc_attr_data_structures = { path = "../rustc_attr_data_structures" }
|
||||||
rustc_hir = { path = "../rustc_hir" }
|
rustc_hir = { path = "../rustc_hir" }
|
||||||
rustc_span = { path = "../rustc_span" }
|
rustc_span = { path = "../rustc_span" }
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
|
|
||||||
// tidy-alphabetical-start
|
// tidy-alphabetical-start
|
||||||
#![recursion_limit = "256"]
|
#![recursion_limit = "256"]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
use std::cell::Cell;
|
use std::cell::Cell;
|
||||||
@ -16,7 +15,7 @@ use rustc_ast_pretty::pp::Breaks::{Consistent, Inconsistent};
|
|||||||
use rustc_ast_pretty::pp::{self, Breaks};
|
use rustc_ast_pretty::pp::{self, Breaks};
|
||||||
use rustc_ast_pretty::pprust::state::MacHeader;
|
use rustc_ast_pretty::pprust::state::MacHeader;
|
||||||
use rustc_ast_pretty::pprust::{Comments, PrintState};
|
use rustc_ast_pretty::pprust::{Comments, PrintState};
|
||||||
use rustc_attr_parsing::{AttributeKind, PrintAttribute};
|
use rustc_attr_data_structures::{AttributeKind, PrintAttribute};
|
||||||
use rustc_hir::{
|
use rustc_hir::{
|
||||||
BindingMode, ByRef, ConstArgKind, GenericArg, GenericBound, GenericParam, GenericParamKind,
|
BindingMode, ByRef, ConstArgKind, GenericArg, GenericBound, GenericParam, GenericParamKind,
|
||||||
HirId, ImplicitSelfKind, LifetimeParamKind, Node, PatKind, PreciseCapturingArg, RangeEnd, Term,
|
HirId, ImplicitSelfKind, LifetimeParamKind, Node, PatKind, PreciseCapturingArg, RangeEnd, Term,
|
||||||
|
@ -27,3 +27,6 @@ rustc_type_ir = { path = "../rustc_type_ir" }
|
|||||||
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -41,8 +41,8 @@ use rustc_abi::ExternAbi;
|
|||||||
use rustc_attr_parsing::InlineAttr;
|
use rustc_attr_parsing::InlineAttr;
|
||||||
use rustc_errors::codes::*;
|
use rustc_errors::codes::*;
|
||||||
use rustc_errors::{Applicability, Diag, struct_span_code_err};
|
use rustc_errors::{Applicability, Diag, struct_span_code_err};
|
||||||
use rustc_hir as hir;
|
|
||||||
use rustc_hir::def_id::{DefId, LocalDefId};
|
use rustc_hir::def_id::{DefId, LocalDefId};
|
||||||
|
use rustc_hir::{self as hir, LangItem};
|
||||||
use rustc_hir_analysis::hir_ty_lowering::HirTyLowerer;
|
use rustc_hir_analysis::hir_ty_lowering::HirTyLowerer;
|
||||||
use rustc_infer::infer::relate::RelateResult;
|
use rustc_infer::infer::relate::RelateResult;
|
||||||
use rustc_infer::infer::{Coercion, DefineOpaqueTypes, InferOk, InferResult};
|
use rustc_infer::infer::{Coercion, DefineOpaqueTypes, InferOk, InferResult};
|
||||||
@ -56,7 +56,7 @@ use rustc_middle::ty::adjustment::{
|
|||||||
};
|
};
|
||||||
use rustc_middle::ty::error::TypeError;
|
use rustc_middle::ty::error::TypeError;
|
||||||
use rustc_middle::ty::visit::TypeVisitableExt;
|
use rustc_middle::ty::visit::TypeVisitableExt;
|
||||||
use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt};
|
use rustc_middle::ty::{self, AliasTy, GenericArgsRef, Ty, TyCtxt};
|
||||||
use rustc_span::{BytePos, DUMMY_SP, DesugaringKind, Span};
|
use rustc_span::{BytePos, DUMMY_SP, DesugaringKind, Span};
|
||||||
use rustc_trait_selection::infer::InferCtxtExt as _;
|
use rustc_trait_selection::infer::InferCtxtExt as _;
|
||||||
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
|
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
|
||||||
@ -593,6 +593,63 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
|
|||||||
|
|
||||||
// Create an obligation for `Source: CoerceUnsized<Target>`.
|
// Create an obligation for `Source: CoerceUnsized<Target>`.
|
||||||
let cause = self.cause(self.cause.span, ObligationCauseCode::Coercion { source, target });
|
let cause = self.cause(self.cause.span, ObligationCauseCode::Coercion { source, target });
|
||||||
|
let root_obligation = Obligation::new(
|
||||||
|
self.tcx,
|
||||||
|
cause.clone(),
|
||||||
|
self.fcx.param_env,
|
||||||
|
ty::TraitRef::new(self.tcx, coerce_unsized_did, [coerce_source, coerce_target]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// If the root `Source: CoerceUnsized<Target>` obligation can't possibly hold,
|
||||||
|
// we don't have to assume that this is unsizing coercion (it will always lead to an error)
|
||||||
|
//
|
||||||
|
// However, we don't want to bail early all the time, since the unholdable obligations
|
||||||
|
// may be interesting for diagnostics (such as trying to coerce `&T` to `&dyn Id<This = U>`),
|
||||||
|
// so we only bail if there (likely) is another way to convert the types.
|
||||||
|
if !self.infcx.predicate_may_hold(&root_obligation) {
|
||||||
|
if let Some(dyn_metadata_adt_def_id) = self.tcx.lang_items().get(LangItem::DynMetadata)
|
||||||
|
&& let Some(metadata_type_def_id) = self.tcx.lang_items().get(LangItem::Metadata)
|
||||||
|
{
|
||||||
|
self.probe(|_| {
|
||||||
|
let ocx = ObligationCtxt::new(&self.infcx);
|
||||||
|
|
||||||
|
// returns `true` if `<ty as Pointee>::Metadata` is `DynMetadata<_>`
|
||||||
|
let has_dyn_trait_metadata = |ty| {
|
||||||
|
let metadata_ty: Result<_, _> = ocx.structurally_normalize_ty(
|
||||||
|
&ObligationCause::dummy(),
|
||||||
|
self.fcx.param_env,
|
||||||
|
Ty::new_alias(
|
||||||
|
self.tcx,
|
||||||
|
ty::AliasTyKind::Projection,
|
||||||
|
AliasTy::new(self.tcx, metadata_type_def_id, [ty]),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|
||||||
|
metadata_ty.is_ok_and(|metadata_ty| {
|
||||||
|
metadata_ty
|
||||||
|
.ty_adt_def()
|
||||||
|
.is_some_and(|d| d.did() == dyn_metadata_adt_def_id)
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
// If both types are raw pointers to a (wrapper over a) trait object,
|
||||||
|
// this might be a cast like `*const W<dyn Trait> -> *const dyn Trait`.
|
||||||
|
// So it's better to bail and try that. (even if the cast is not possible, for
|
||||||
|
// example due to vtables not matching, cast diagnostic will likely still be better)
|
||||||
|
//
|
||||||
|
// N.B. use `target`, not `coerce_target` (the latter is a var)
|
||||||
|
if let &ty::RawPtr(source_pointee, _) = coerce_source.kind()
|
||||||
|
&& let &ty::RawPtr(target_pointee, _) = target.kind()
|
||||||
|
&& has_dyn_trait_metadata(source_pointee)
|
||||||
|
&& has_dyn_trait_metadata(target_pointee)
|
||||||
|
{
|
||||||
|
return Err(TypeError::Mismatch);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Use a FIFO queue for this custom fulfillment procedure.
|
// Use a FIFO queue for this custom fulfillment procedure.
|
||||||
//
|
//
|
||||||
@ -601,12 +658,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
|
|||||||
// and almost never more than 3. By using a SmallVec we avoid an
|
// and almost never more than 3. By using a SmallVec we avoid an
|
||||||
// allocation, at the (very small) cost of (occasionally) having to
|
// allocation, at the (very small) cost of (occasionally) having to
|
||||||
// shift subsequent elements down when removing the front element.
|
// shift subsequent elements down when removing the front element.
|
||||||
let mut queue: SmallVec<[PredicateObligation<'tcx>; 4]> = smallvec![Obligation::new(
|
let mut queue: SmallVec<[PredicateObligation<'tcx>; 4]> = smallvec![root_obligation];
|
||||||
self.tcx,
|
|
||||||
cause,
|
|
||||||
self.fcx.param_env,
|
|
||||||
ty::TraitRef::new(self.tcx, coerce_unsized_did, [coerce_source, coerce_target])
|
|
||||||
)];
|
|
||||||
|
|
||||||
// Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
|
// Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
|
||||||
// emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
|
// emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
#![feature(let_chains)]
|
#![feature(let_chains)]
|
||||||
#![feature(never_type)]
|
#![feature(never_type)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
mod _match;
|
mod _match;
|
||||||
|
@ -22,3 +22,6 @@ rustc_span = { path = "../rustc_span" }
|
|||||||
thin-vec = "0.2.12"
|
thin-vec = "0.2.12"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
#![doc(rust_logo)]
|
#![doc(rust_logo)]
|
||||||
#![feature(file_buffered)]
|
#![feature(file_buffered)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
mod assert_dep_graph;
|
mod assert_dep_graph;
|
||||||
|
@ -123,7 +123,7 @@ pub(crate) fn read_file(
|
|||||||
|
|
||||||
// Check HEADER_FORMAT_VERSION
|
// Check HEADER_FORMAT_VERSION
|
||||||
{
|
{
|
||||||
debug_assert!(::std::mem::size_of_val(&HEADER_FORMAT_VERSION) == 2);
|
debug_assert!(size_of_val(&HEADER_FORMAT_VERSION) == 2);
|
||||||
let mut header_format_version = [0u8; 2];
|
let mut header_format_version = [0u8; 2];
|
||||||
file.read_exact(&mut header_format_version)?;
|
file.read_exact(&mut header_format_version)?;
|
||||||
let header_format_version =
|
let header_format_version =
|
||||||
|
@ -21,3 +21,6 @@ nightly = [
|
|||||||
]
|
]
|
||||||
rustc_randomized_layouts = []
|
rustc_randomized_layouts = []
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
#[cfg(not(feature = "nightly"))]
|
||||||
|
use std::mem;
|
||||||
use std::ops::{BitAnd, BitAndAssign, BitOrAssign, Bound, Not, Range, RangeBounds, Shl};
|
use std::ops::{BitAnd, BitAndAssign, BitOrAssign, Bound, Not, Range, RangeBounds, Shl};
|
||||||
use std::rc::Rc;
|
use std::rc::Rc;
|
||||||
use std::{fmt, iter, mem, slice};
|
use std::{fmt, iter, slice};
|
||||||
|
|
||||||
use Chunk::*;
|
use Chunk::*;
|
||||||
#[cfg(feature = "nightly")]
|
#[cfg(feature = "nightly")]
|
||||||
@ -14,7 +16,7 @@ use crate::{Idx, IndexVec};
|
|||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
type Word = u64;
|
type Word = u64;
|
||||||
const WORD_BYTES: usize = mem::size_of::<Word>();
|
const WORD_BYTES: usize = size_of::<Word>();
|
||||||
const WORD_BITS: usize = WORD_BYTES * 8;
|
const WORD_BITS: usize = WORD_BYTES * 8;
|
||||||
|
|
||||||
// The choice of chunk size has some trade-offs.
|
// The choice of chunk size has some trade-offs.
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
#![cfg_attr(feature = "nightly", feature(extend_one, step_trait, test))]
|
#![cfg_attr(feature = "nightly", feature(extend_one, step_trait, test))]
|
||||||
#![cfg_attr(feature = "nightly", feature(new_range_api))]
|
#![cfg_attr(feature = "nightly", feature(new_range_api))]
|
||||||
#![cfg_attr(feature = "nightly", feature(new_zeroed_alloc))]
|
#![cfg_attr(feature = "nightly", feature(new_zeroed_alloc))]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
pub mod bit_set;
|
pub mod bit_set;
|
||||||
|
@ -9,8 +9,6 @@ crate::newtype_index! {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn index_size_is_optimized() {
|
fn index_size_is_optimized() {
|
||||||
use std::mem::size_of;
|
|
||||||
|
|
||||||
assert_eq!(size_of::<MyIdx>(), 4);
|
assert_eq!(size_of::<MyIdx>(), 4);
|
||||||
// Uses 0xFFFF_FFFB
|
// Uses 0xFFFF_FFFB
|
||||||
assert_eq!(size_of::<Option<MyIdx>>(), 4);
|
assert_eq!(size_of::<Option<MyIdx>>(), 4);
|
||||||
|
@ -13,3 +13,6 @@ quote = "1"
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
nightly = []
|
nightly = []
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -305,7 +305,7 @@ impl Parse for Newtype {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn newtype(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
pub(crate) fn newtype(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||||
let input = parse_macro_input!(input as Newtype);
|
let input = parse_macro_input!(input as Newtype);
|
||||||
input.0.into()
|
input.0.into()
|
||||||
}
|
}
|
||||||
|
@ -21,3 +21,6 @@ smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
|||||||
thin-vec = "0.2.12"
|
thin-vec = "0.2.12"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -24,7 +24,6 @@
|
|||||||
#![feature(let_chains)]
|
#![feature(let_chains)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![recursion_limit = "512"] // For rustdoc
|
#![recursion_limit = "512"] // For rustdoc
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
mod errors;
|
mod errors;
|
||||||
|
@ -56,3 +56,6 @@ tracing = "0.1"
|
|||||||
# tidy-alphabetical-start
|
# tidy-alphabetical-start
|
||||||
llvm = ['dep:rustc_codegen_llvm']
|
llvm = ['dep:rustc_codegen_llvm']
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
#![feature(iter_intersperse)]
|
#![feature(iter_intersperse)]
|
||||||
#![feature(let_chains)]
|
#![feature(let_chains)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
mod callbacks;
|
mod callbacks;
|
||||||
|
@ -24,3 +24,6 @@ features = ["emoji"]
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
expect-test = "1.4.0"
|
expect-test = "1.4.0"
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -23,7 +23,6 @@
|
|||||||
// We want to be able to build this crate with a stable compiler,
|
// We want to be able to build this crate with a stable compiler,
|
||||||
// so no `#![feature]` attributes should be added.
|
// so no `#![feature]` attributes should be added.
|
||||||
#![deny(unstable_features)]
|
#![deny(unstable_features)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
mod cursor;
|
mod cursor;
|
||||||
|
@ -28,3 +28,6 @@ smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
|||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
unicode-security = "0.1.0"
|
unicode-security = "0.1.0"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -32,7 +32,6 @@
|
|||||||
#![feature(rustc_attrs)]
|
#![feature(rustc_attrs)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
mod async_closures;
|
mod async_closures;
|
||||||
|
@ -15,3 +15,6 @@ rustc_serialize = { path = "../rustc_serialize" }
|
|||||||
rustc_span = { path = "../rustc_span" }
|
rustc_span = { path = "../rustc_span" }
|
||||||
serde = { version = "1.0.125", features = ["derive"] }
|
serde = { version = "1.0.125", features = ["derive"] }
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -73,7 +73,6 @@ declare_lint_pass! {
|
|||||||
NEVER_TYPE_FALLBACK_FLOWING_INTO_UNSAFE,
|
NEVER_TYPE_FALLBACK_FLOWING_INTO_UNSAFE,
|
||||||
NON_CONTIGUOUS_RANGE_ENDPOINTS,
|
NON_CONTIGUOUS_RANGE_ENDPOINTS,
|
||||||
NON_EXHAUSTIVE_OMITTED_PATTERNS,
|
NON_EXHAUSTIVE_OMITTED_PATTERNS,
|
||||||
ORDER_DEPENDENT_TRAIT_OBJECTS,
|
|
||||||
OUT_OF_SCOPE_MACRO_CALLS,
|
OUT_OF_SCOPE_MACRO_CALLS,
|
||||||
OVERLAPPING_RANGE_ENDPOINTS,
|
OVERLAPPING_RANGE_ENDPOINTS,
|
||||||
PATTERNS_IN_FNS_WITHOUT_BODY,
|
PATTERNS_IN_FNS_WITHOUT_BODY,
|
||||||
@ -1501,42 +1500,6 @@ declare_lint! {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
declare_lint! {
|
|
||||||
/// The `order_dependent_trait_objects` lint detects a trait coherency
|
|
||||||
/// violation that would allow creating two trait impls for the same
|
|
||||||
/// dynamic trait object involving marker traits.
|
|
||||||
///
|
|
||||||
/// ### Example
|
|
||||||
///
|
|
||||||
/// ```rust,compile_fail
|
|
||||||
/// pub trait Trait {}
|
|
||||||
///
|
|
||||||
/// impl Trait for dyn Send + Sync { }
|
|
||||||
/// impl Trait for dyn Sync + Send { }
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// {{produces}}
|
|
||||||
///
|
|
||||||
/// ### Explanation
|
|
||||||
///
|
|
||||||
/// A previous bug caused the compiler to interpret traits with different
|
|
||||||
/// orders (such as `Send + Sync` and `Sync + Send`) as distinct types
|
|
||||||
/// when they were intended to be treated the same. This allowed code to
|
|
||||||
/// define separate trait implementations when there should be a coherence
|
|
||||||
/// error. This is a [future-incompatible] lint to transition this to a
|
|
||||||
/// hard error in the future. See [issue #56484] for more details.
|
|
||||||
///
|
|
||||||
/// [issue #56484]: https://github.com/rust-lang/rust/issues/56484
|
|
||||||
/// [future-incompatible]: ../index.md#future-incompatible-lints
|
|
||||||
pub ORDER_DEPENDENT_TRAIT_OBJECTS,
|
|
||||||
Deny,
|
|
||||||
"trait-object types were treated as different depending on marker-trait order",
|
|
||||||
@future_incompatible = FutureIncompatibleInfo {
|
|
||||||
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
|
|
||||||
reference: "issue #56484 <https://github.com/rust-lang/rust/issues/56484>",
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
declare_lint! {
|
declare_lint! {
|
||||||
/// The `coherence_leak_check` lint detects conflicting implementations of
|
/// The `coherence_leak_check` lint detects conflicting implementations of
|
||||||
/// a trait that are only distinguished by the old leak-check code.
|
/// a trait that are only distinguished by the old leak-check code.
|
||||||
@ -2710,7 +2673,7 @@ declare_lint! {
|
|||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// const fn foo<T>() -> usize {
|
/// const fn foo<T>() -> usize {
|
||||||
/// if std::mem::size_of::<*mut T>() < 8 { // size of *mut T does not depend on T
|
/// if size_of::<*mut T>() < 8 { // size of *mut T does not depend on T
|
||||||
/// 4
|
/// 4
|
||||||
/// } else {
|
/// } else {
|
||||||
/// 8
|
/// 8
|
||||||
|
@ -1,7 +1,3 @@
|
|||||||
// tidy-alphabetical-start
|
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
|
||||||
|
|
||||||
use rustc_abi::ExternAbi;
|
use rustc_abi::ExternAbi;
|
||||||
use rustc_ast::AttrId;
|
use rustc_ast::AttrId;
|
||||||
use rustc_ast::attr::AttributeExt;
|
use rustc_ast::attr::AttributeExt;
|
||||||
|
@ -14,3 +14,6 @@ libc = "0.2.73"
|
|||||||
# pinned `cc` in `rustc_codegen_ssa` if you update `cc` here.
|
# pinned `cc` in `rustc_codegen_ssa` if you update `cc` here.
|
||||||
cc = "=1.2.16"
|
cc = "=1.2.16"
|
||||||
# tidy-alphabetical-end
|
# tidy-alphabetical-end
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
#![doc(rust_logo)]
|
#![doc(rust_logo)]
|
||||||
#![feature(extern_types)]
|
#![feature(extern_types)]
|
||||||
#![feature(rustdoc_internals)]
|
#![feature(rustdoc_internals)]
|
||||||
#![warn(unreachable_pub)]
|
|
||||||
// tidy-alphabetical-end
|
// tidy-alphabetical-end
|
||||||
|
|
||||||
use std::cell::RefCell;
|
use std::cell::RefCell;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user