mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 14:55:26 +00:00
Merge from rustc
This commit is contained in:
commit
bd1e4eeaea
@ -3367,7 +3367,6 @@ dependencies = [
|
||||
"rustc_type_ir",
|
||||
"serde_json",
|
||||
"smallvec",
|
||||
"snap",
|
||||
"tempfile",
|
||||
"thorin-dwp",
|
||||
"tracing",
|
||||
|
@ -49,6 +49,14 @@ bitflags! {
|
||||
}
|
||||
}
|
||||
|
||||
/// Which niches (beyond the `null` niche) are available on references.
|
||||
#[derive(Default, Copy, Clone, Hash, Debug, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
|
||||
pub struct ReferenceNichePolicy {
|
||||
pub size: bool,
|
||||
pub align: bool,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
|
||||
pub enum IntegerType {
|
||||
@ -346,6 +354,33 @@ impl TargetDataLayout {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn target_usize_max(&self) -> u64 {
|
||||
self.pointer_size.unsigned_int_max().try_into().unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn target_isize_min(&self) -> i64 {
|
||||
self.pointer_size.signed_int_min().try_into().unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn target_isize_max(&self) -> i64 {
|
||||
self.pointer_size.signed_int_max().try_into().unwrap()
|
||||
}
|
||||
|
||||
/// Returns the (inclusive) range of possible addresses for an allocation with
|
||||
/// the given size and alignment.
|
||||
///
|
||||
/// Note that this doesn't take into account target-specific limitations.
|
||||
#[inline]
|
||||
pub fn address_range_for(&self, size: Size, align: Align) -> (u64, u64) {
|
||||
let end = Size::from_bytes(self.target_usize_max());
|
||||
let min = align.bytes();
|
||||
let max = (end - size).align_down_to(align).bytes();
|
||||
(min, max)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
|
||||
for &(size, align) in &self.vector_align {
|
||||
@ -473,6 +508,12 @@ impl Size {
|
||||
Size::from_bytes((self.bytes() + mask) & !mask)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn align_down_to(self, align: Align) -> Size {
|
||||
let mask = align.bytes() - 1;
|
||||
Size::from_bytes(self.bytes() & !mask)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_aligned(self, align: Align) -> bool {
|
||||
let mask = align.bytes() - 1;
|
||||
@ -967,6 +1008,43 @@ impl WrappingRange {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if `range` is contained in `self`.
|
||||
#[inline(always)]
|
||||
pub fn contains_range<I: Into<u128> + Ord>(&self, range: RangeInclusive<I>) -> bool {
|
||||
if range.is_empty() {
|
||||
return true;
|
||||
}
|
||||
|
||||
let (vmin, vmax) = range.into_inner();
|
||||
let (vmin, vmax) = (vmin.into(), vmax.into());
|
||||
|
||||
if self.start <= self.end {
|
||||
self.start <= vmin && vmax <= self.end
|
||||
} else {
|
||||
// The last check is needed to cover the following case:
|
||||
// `vmin ... start, end ... vmax`. In this special case there is no gap
|
||||
// between `start` and `end` so we must return true.
|
||||
self.start <= vmin || vmax <= self.end || self.start == self.end + 1
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if `range` has an overlap with `self`.
|
||||
#[inline(always)]
|
||||
pub fn overlaps_range<I: Into<u128> + Ord>(&self, range: RangeInclusive<I>) -> bool {
|
||||
if range.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let (vmin, vmax) = range.into_inner();
|
||||
let (vmin, vmax) = (vmin.into(), vmax.into());
|
||||
|
||||
if self.start <= self.end {
|
||||
self.start <= vmax && vmin <= self.end
|
||||
} else {
|
||||
self.start <= vmax || vmin <= self.end
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `self` with replaced `start`
|
||||
#[inline(always)]
|
||||
pub fn with_start(mut self, start: u128) -> Self {
|
||||
@ -984,9 +1062,15 @@ impl WrappingRange {
|
||||
/// Returns `true` if `size` completely fills the range.
|
||||
#[inline]
|
||||
pub fn is_full_for(&self, size: Size) -> bool {
|
||||
debug_assert!(self.is_in_range_for(size));
|
||||
self.start == (self.end.wrapping_add(1) & size.unsigned_int_max())
|
||||
}
|
||||
|
||||
/// Returns `true` if the range is valid for `size`.
|
||||
#[inline(always)]
|
||||
pub fn is_in_range_for(&self, size: Size) -> bool {
|
||||
let max_value = size.unsigned_int_max();
|
||||
debug_assert!(self.start <= max_value && self.end <= max_value);
|
||||
self.start == (self.end.wrapping_add(1) & max_value)
|
||||
self.start <= max_value && self.end <= max_value
|
||||
}
|
||||
}
|
||||
|
||||
@ -1427,16 +1511,21 @@ impl Niche {
|
||||
|
||||
pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
|
||||
assert!(count > 0);
|
||||
if count > self.available(cx) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let Self { value, valid_range: v, .. } = *self;
|
||||
let size = value.size(cx);
|
||||
assert!(size.bits() <= 128);
|
||||
let max_value = size.unsigned_int_max();
|
||||
let max_value = value.size(cx).unsigned_int_max();
|
||||
let distance_end_zero = max_value - v.end;
|
||||
|
||||
let niche = v.end.wrapping_add(1)..v.start;
|
||||
let available = niche.end.wrapping_sub(niche.start) & max_value;
|
||||
if count > available {
|
||||
return None;
|
||||
// Null-pointer optimization. This is guaranteed by Rust (at least for `Option<_>`),
|
||||
// and offers better codegen opportunities.
|
||||
if count == 1 && matches!(value, Pointer(_)) && !v.contains(0) {
|
||||
// Select which bound to move to minimize the number of lost niches.
|
||||
let valid_range =
|
||||
if v.start - 1 > distance_end_zero { v.with_end(0) } else { v.with_start(0) };
|
||||
return Some((0, Scalar::Initialized { value, valid_range }));
|
||||
}
|
||||
|
||||
// Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
|
||||
@ -1459,7 +1548,6 @@ impl Niche {
|
||||
let end = v.end.wrapping_add(count) & max_value;
|
||||
Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
|
||||
};
|
||||
let distance_end_zero = max_value - v.end;
|
||||
if v.start > v.end {
|
||||
// zero is unavailable because wrapping occurs
|
||||
move_end(v)
|
||||
|
@ -551,17 +551,6 @@ impl<'hir> LoweringContext<'_, 'hir> {
|
||||
for &(ref use_tree, id) in trees {
|
||||
let new_hir_id = self.local_def_id(id);
|
||||
|
||||
let mut prefix = prefix.clone();
|
||||
|
||||
// Give the segments new node-ids since they are being cloned.
|
||||
for seg in &mut prefix.segments {
|
||||
// Give the cloned segment the same resolution information
|
||||
// as the old one (this is needed for stability checking).
|
||||
let new_id = self.next_node_id();
|
||||
self.resolver.clone_res(seg.id, new_id);
|
||||
seg.id = new_id;
|
||||
}
|
||||
|
||||
// Each `use` import is an item and thus are owners of the
|
||||
// names in the path. Up to this point the nested import is
|
||||
// the current owner, since we want each desugared import to
|
||||
@ -570,6 +559,9 @@ impl<'hir> LoweringContext<'_, 'hir> {
|
||||
self.with_hir_id_owner(id, |this| {
|
||||
let mut ident = *ident;
|
||||
|
||||
// `prefix` is lowered multiple times, but in different HIR owners.
|
||||
// So each segment gets renewed `HirId` with the same
|
||||
// `ItemLocalId` and the new owner. (See `lower_node_id`)
|
||||
let kind =
|
||||
this.lower_use_tree(use_tree, &prefix, id, vis_span, &mut ident, attrs);
|
||||
if let Some(attrs) = attrs {
|
||||
|
@ -148,10 +148,6 @@ trait ResolverAstLoweringExt {
|
||||
fn legacy_const_generic_args(&self, expr: &Expr) -> Option<Vec<usize>>;
|
||||
fn get_partial_res(&self, id: NodeId) -> Option<PartialRes>;
|
||||
fn get_import_res(&self, id: NodeId) -> PerNS<Option<Res<NodeId>>>;
|
||||
// Clones the resolution (if any) on 'source' and applies it
|
||||
// to 'target'. Used when desugaring a `UseTreeKind::Nested` to
|
||||
// multiple `UseTreeKind::Simple`s
|
||||
fn clone_res(&mut self, source: NodeId, target: NodeId);
|
||||
fn get_label_res(&self, id: NodeId) -> Option<NodeId>;
|
||||
fn get_lifetime_res(&self, id: NodeId) -> Option<LifetimeRes>;
|
||||
fn take_extra_lifetime_params(&mut self, id: NodeId) -> Vec<(Ident, NodeId, LifetimeRes)>;
|
||||
@ -184,12 +180,6 @@ impl ResolverAstLoweringExt for ResolverAstLowering {
|
||||
None
|
||||
}
|
||||
|
||||
fn clone_res(&mut self, source: NodeId, target: NodeId) {
|
||||
if let Some(res) = self.partial_res_map.get(&source) {
|
||||
self.partial_res_map.insert(target, *res);
|
||||
}
|
||||
}
|
||||
|
||||
/// Obtains resolution for a `NodeId` with a single resolution.
|
||||
fn get_partial_res(&self, id: NodeId) -> Option<PartialRes> {
|
||||
self.partial_res_map.get(&id).copied()
|
||||
|
@ -239,6 +239,10 @@ impl WriteBackendMethods for GccCodegenBackend {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
fn print_statistics(&self) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> {
|
||||
module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
|
||||
Ok(())
|
||||
|
@ -339,7 +339,8 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
|
||||
return pointee;
|
||||
}
|
||||
|
||||
let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
|
||||
let assume_valid_ptr = true;
|
||||
let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset, assume_valid_ptr);
|
||||
|
||||
cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
|
||||
result
|
||||
|
@ -33,6 +33,7 @@ use rustc_target::abi::{
|
||||
use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use libc::c_uint;
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::ffi::CStr;
|
||||
use std::str;
|
||||
@ -349,6 +350,23 @@ pub unsafe fn create_module<'ll>(
|
||||
);
|
||||
}
|
||||
|
||||
// Insert `llvm.ident` metadata.
|
||||
//
|
||||
// On the wasm targets it will get hooked up to the "producer" sections
|
||||
// `processed-by` information.
|
||||
let rustc_producer =
|
||||
format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION"));
|
||||
let name_metadata = llvm::LLVMMDStringInContext(
|
||||
llcx,
|
||||
rustc_producer.as_ptr().cast(),
|
||||
rustc_producer.as_bytes().len() as c_uint,
|
||||
);
|
||||
llvm::LLVMAddNamedMetadataOperand(
|
||||
llmod,
|
||||
cstr!("llvm.ident").as_ptr(),
|
||||
llvm::LLVMMDNodeInContext(llcx, &name_metadata, 1),
|
||||
);
|
||||
|
||||
llmod
|
||||
}
|
||||
|
||||
|
@ -888,21 +888,6 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
|
||||
llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, llvm_gcov_ident.as_ptr(), val);
|
||||
}
|
||||
|
||||
// Insert `llvm.ident` metadata on the wasm targets since that will
|
||||
// get hooked up to the "producer" sections `processed-by` information.
|
||||
if tcx.sess.target.is_like_wasm {
|
||||
let name_metadata = llvm::LLVMMDStringInContext(
|
||||
debug_context.llcontext,
|
||||
rustc_producer.as_ptr().cast(),
|
||||
rustc_producer.as_bytes().len() as c_uint,
|
||||
);
|
||||
llvm::LLVMAddNamedMetadataOperand(
|
||||
debug_context.llmod,
|
||||
cstr!("llvm.ident").as_ptr(),
|
||||
llvm::LLVMMDNodeInContext(debug_context.llcontext, &name_metadata, 1),
|
||||
);
|
||||
}
|
||||
|
||||
return unit_metadata;
|
||||
};
|
||||
|
||||
|
@ -40,12 +40,13 @@ use rustc_metadata::EncodedMetadata;
|
||||
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
|
||||
use rustc_middle::query::Providers;
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_session::config::{OptLevel, OutputFilenames, PrintRequest};
|
||||
use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest};
|
||||
use rustc_session::Session;
|
||||
use rustc_span::symbol::Symbol;
|
||||
|
||||
use std::any::Any;
|
||||
use std::ffi::CStr;
|
||||
use std::io::Write;
|
||||
|
||||
mod back {
|
||||
pub mod archive;
|
||||
@ -178,7 +179,28 @@ impl WriteBackendMethods for LlvmCodegenBackend {
|
||||
type ThinBuffer = back::lto::ThinBuffer;
|
||||
fn print_pass_timings(&self) {
|
||||
unsafe {
|
||||
llvm::LLVMRustPrintPassTimings();
|
||||
let mut size = 0;
|
||||
let cstr = llvm::LLVMRustPrintPassTimings(&mut size as *mut usize);
|
||||
if cstr.is_null() {
|
||||
println!("failed to get pass timings");
|
||||
} else {
|
||||
let timings = std::slice::from_raw_parts(cstr as *const u8, size);
|
||||
std::io::stdout().write_all(timings).unwrap();
|
||||
libc::free(cstr as *mut _);
|
||||
}
|
||||
}
|
||||
}
|
||||
fn print_statistics(&self) {
|
||||
unsafe {
|
||||
let mut size = 0;
|
||||
let cstr = llvm::LLVMRustPrintStatistics(&mut size as *mut usize);
|
||||
if cstr.is_null() {
|
||||
println!("failed to get pass stats");
|
||||
} else {
|
||||
let stats = std::slice::from_raw_parts(cstr as *const u8, size);
|
||||
std::io::stdout().write_all(stats).unwrap();
|
||||
libc::free(cstr as *mut _);
|
||||
}
|
||||
}
|
||||
}
|
||||
fn run_link(
|
||||
@ -262,10 +284,10 @@ impl CodegenBackend for LlvmCodegenBackend {
|
||||
|tcx, ()| llvm_util::global_llvm_features(tcx.sess, true)
|
||||
}
|
||||
|
||||
fn print(&self, req: PrintRequest, sess: &Session) {
|
||||
match req {
|
||||
PrintRequest::RelocationModels => {
|
||||
println!("Available relocation models:");
|
||||
fn print(&self, req: &PrintRequest, out: &mut dyn PrintBackendInfo, sess: &Session) {
|
||||
match req.kind {
|
||||
PrintKind::RelocationModels => {
|
||||
writeln!(out, "Available relocation models:");
|
||||
for name in &[
|
||||
"static",
|
||||
"pic",
|
||||
@ -276,26 +298,27 @@ impl CodegenBackend for LlvmCodegenBackend {
|
||||
"ropi-rwpi",
|
||||
"default",
|
||||
] {
|
||||
println!(" {}", name);
|
||||
writeln!(out, " {}", name);
|
||||
}
|
||||
println!();
|
||||
writeln!(out);
|
||||
}
|
||||
PrintRequest::CodeModels => {
|
||||
println!("Available code models:");
|
||||
PrintKind::CodeModels => {
|
||||
writeln!(out, "Available code models:");
|
||||
for name in &["tiny", "small", "kernel", "medium", "large"] {
|
||||
println!(" {}", name);
|
||||
writeln!(out, " {}", name);
|
||||
}
|
||||
println!();
|
||||
writeln!(out);
|
||||
}
|
||||
PrintRequest::TlsModels => {
|
||||
println!("Available TLS models:");
|
||||
PrintKind::TlsModels => {
|
||||
writeln!(out, "Available TLS models:");
|
||||
for name in &["global-dynamic", "local-dynamic", "initial-exec", "local-exec"] {
|
||||
println!(" {}", name);
|
||||
writeln!(out, " {}", name);
|
||||
}
|
||||
println!();
|
||||
writeln!(out);
|
||||
}
|
||||
PrintRequest::StackProtectorStrategies => {
|
||||
println!(
|
||||
PrintKind::StackProtectorStrategies => {
|
||||
writeln!(
|
||||
out,
|
||||
r#"Available stack protector strategies:
|
||||
all
|
||||
Generate stack canaries in all functions.
|
||||
@ -319,7 +342,7 @@ impl CodegenBackend for LlvmCodegenBackend {
|
||||
"#
|
||||
);
|
||||
}
|
||||
req => llvm_util::print(req, sess),
|
||||
_other => llvm_util::print(req, out, sess),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1868,7 +1868,10 @@ extern "C" {
|
||||
pub fn LLVMRustGetLastError() -> *const c_char;
|
||||
|
||||
/// Print the pass timings since static dtors aren't picking them up.
|
||||
pub fn LLVMRustPrintPassTimings();
|
||||
pub fn LLVMRustPrintPassTimings(size: *const size_t) -> *const c_char;
|
||||
|
||||
/// Print the statistics since static dtors aren't picking them up.
|
||||
pub fn LLVMRustPrintStatistics(size: *const size_t) -> *const c_char;
|
||||
|
||||
pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type;
|
||||
|
||||
@ -2280,7 +2283,12 @@ extern "C" {
|
||||
|
||||
pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool;
|
||||
|
||||
pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine, cpu: *const c_char);
|
||||
pub fn LLVMRustPrintTargetCPUs(
|
||||
T: &TargetMachine,
|
||||
cpu: *const c_char,
|
||||
print: unsafe extern "C" fn(out: *mut c_void, string: *const c_char, len: usize),
|
||||
out: *mut c_void,
|
||||
);
|
||||
pub fn LLVMRustGetTargetFeaturesCount(T: &TargetMachine) -> size_t;
|
||||
pub fn LLVMRustGetTargetFeature(
|
||||
T: &TargetMachine,
|
||||
|
@ -8,16 +8,17 @@ use libc::c_int;
|
||||
use rustc_codegen_ssa::target_features::{
|
||||
supported_target_features, tied_target_features, RUSTC_SPECIFIC_FEATURES,
|
||||
};
|
||||
use rustc_codegen_ssa::traits::PrintBackendInfo;
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
use rustc_data_structures::small_c_str::SmallCStr;
|
||||
use rustc_fs_util::path_to_c_string;
|
||||
use rustc_middle::bug;
|
||||
use rustc_session::config::PrintRequest;
|
||||
use rustc_session::config::{PrintKind, PrintRequest};
|
||||
use rustc_session::Session;
|
||||
use rustc_span::symbol::Symbol;
|
||||
use rustc_target::spec::{MergeFunctions, PanicStrategy};
|
||||
use std::ffi::{CStr, CString};
|
||||
|
||||
use std::ffi::{c_char, c_void, CStr, CString};
|
||||
use std::path::Path;
|
||||
use std::ptr;
|
||||
use std::slice;
|
||||
@ -110,6 +111,10 @@ unsafe fn configure_llvm(sess: &Session) {
|
||||
// Use non-zero `import-instr-limit` multiplier for cold callsites.
|
||||
add("-import-cold-multiplier=0.1", false);
|
||||
|
||||
if sess.print_llvm_stats() {
|
||||
add("-stats", false);
|
||||
}
|
||||
|
||||
for arg in sess_args {
|
||||
add(&(*arg), true);
|
||||
}
|
||||
@ -350,7 +355,7 @@ fn llvm_target_features(tm: &llvm::TargetMachine) -> Vec<(&str, &str)> {
|
||||
ret
|
||||
}
|
||||
|
||||
fn print_target_features(sess: &Session, tm: &llvm::TargetMachine) {
|
||||
fn print_target_features(out: &mut dyn PrintBackendInfo, sess: &Session, tm: &llvm::TargetMachine) {
|
||||
let mut llvm_target_features = llvm_target_features(tm);
|
||||
let mut known_llvm_target_features = FxHashSet::<&'static str>::default();
|
||||
let mut rustc_target_features = supported_target_features(sess)
|
||||
@ -383,36 +388,48 @@ fn print_target_features(sess: &Session, tm: &llvm::TargetMachine) {
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
println!("Features supported by rustc for this target:");
|
||||
writeln!(out, "Features supported by rustc for this target:");
|
||||
for (feature, desc) in &rustc_target_features {
|
||||
println!(" {1:0$} - {2}.", max_feature_len, feature, desc);
|
||||
writeln!(out, " {1:0$} - {2}.", max_feature_len, feature, desc);
|
||||
}
|
||||
println!("\nCode-generation features supported by LLVM for this target:");
|
||||
writeln!(out, "\nCode-generation features supported by LLVM for this target:");
|
||||
for (feature, desc) in &llvm_target_features {
|
||||
println!(" {1:0$} - {2}.", max_feature_len, feature, desc);
|
||||
writeln!(out, " {1:0$} - {2}.", max_feature_len, feature, desc);
|
||||
}
|
||||
if llvm_target_features.is_empty() {
|
||||
println!(" Target features listing is not supported by this LLVM version.");
|
||||
writeln!(out, " Target features listing is not supported by this LLVM version.");
|
||||
}
|
||||
println!("\nUse +feature to enable a feature, or -feature to disable it.");
|
||||
println!("For example, rustc -C target-cpu=mycpu -C target-feature=+feature1,-feature2\n");
|
||||
println!("Code-generation features cannot be used in cfg or #[target_feature],");
|
||||
println!("and may be renamed or removed in a future version of LLVM or rustc.\n");
|
||||
writeln!(out, "\nUse +feature to enable a feature, or -feature to disable it.");
|
||||
writeln!(out, "For example, rustc -C target-cpu=mycpu -C target-feature=+feature1,-feature2\n");
|
||||
writeln!(out, "Code-generation features cannot be used in cfg or #[target_feature],");
|
||||
writeln!(out, "and may be renamed or removed in a future version of LLVM or rustc.\n");
|
||||
}
|
||||
|
||||
pub(crate) fn print(req: PrintRequest, sess: &Session) {
|
||||
pub(crate) fn print(req: &PrintRequest, mut out: &mut dyn PrintBackendInfo, sess: &Session) {
|
||||
require_inited();
|
||||
let tm = create_informational_target_machine(sess);
|
||||
match req {
|
||||
PrintRequest::TargetCPUs => {
|
||||
match req.kind {
|
||||
PrintKind::TargetCPUs => {
|
||||
// SAFETY generate a C compatible string from a byte slice to pass
|
||||
// the target CPU name into LLVM, the lifetime of the reference is
|
||||
// at least as long as the C function
|
||||
let cpu_cstring = CString::new(handle_native(sess.target.cpu.as_ref()))
|
||||
.unwrap_or_else(|e| bug!("failed to convert to cstring: {}", e));
|
||||
unsafe { llvm::LLVMRustPrintTargetCPUs(tm, cpu_cstring.as_ptr()) };
|
||||
unsafe extern "C" fn callback(out: *mut c_void, string: *const c_char, len: usize) {
|
||||
let out = &mut *(out as *mut &mut dyn PrintBackendInfo);
|
||||
let bytes = slice::from_raw_parts(string as *const u8, len);
|
||||
write!(out, "{}", String::from_utf8_lossy(bytes));
|
||||
}
|
||||
unsafe {
|
||||
llvm::LLVMRustPrintTargetCPUs(
|
||||
tm,
|
||||
cpu_cstring.as_ptr(),
|
||||
callback,
|
||||
&mut out as *mut &mut dyn PrintBackendInfo as *mut c_void,
|
||||
);
|
||||
}
|
||||
}
|
||||
PrintRequest::TargetFeatures => print_target_features(sess, tm),
|
||||
PrintKind::TargetFeatures => print_target_features(out, sess, tm),
|
||||
_ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
|
||||
}
|
||||
}
|
||||
|
@ -411,8 +411,8 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||
if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
|
||||
return pointee;
|
||||
}
|
||||
|
||||
let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
|
||||
let assume_valid_ptr = true;
|
||||
let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset, assume_valid_ptr);
|
||||
|
||||
cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
|
||||
result
|
||||
|
@ -17,7 +17,6 @@ tempfile = "3.2"
|
||||
thorin-dwp = "0.6"
|
||||
pathdiff = "0.2.0"
|
||||
serde_json = "1.0.59"
|
||||
snap = "1"
|
||||
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
|
||||
regex = "1.4"
|
||||
|
||||
|
@ -197,6 +197,8 @@ codegen_ssa_specify_libraries_to_link = use the `-l` flag to specify native libr
|
||||
|
||||
codegen_ssa_static_library_native_artifacts = Link against the following native artifacts when linking against this static library. The order and any duplication can be significant on some platforms.
|
||||
|
||||
codegen_ssa_static_library_native_artifacts_to_file = Native artifacts to link against have been written to {$path}. The order and any duplication can be significant on some platforms.
|
||||
|
||||
codegen_ssa_stripping_debug_info_failed = stripping debug info with `{$util}` failed: {$status}
|
||||
.note = {$output}
|
||||
|
||||
|
@ -12,8 +12,8 @@ use rustc_metadata::fs::{copy_to_stdout, emit_wrapper_file, METADATA_FILENAME};
|
||||
use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile;
|
||||
use rustc_middle::middle::dependency_format::Linkage;
|
||||
use rustc_middle::middle::exported_symbols::SymbolExportKind;
|
||||
use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, Strip};
|
||||
use rustc_session::config::{OutputFilenames, OutputType, PrintRequest, SplitDwarfKind};
|
||||
use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, OutFileName, Strip};
|
||||
use rustc_session::config::{OutputFilenames, OutputType, PrintKind, SplitDwarfKind};
|
||||
use rustc_session::cstore::DllImport;
|
||||
use rustc_session::output::{check_file_is_writeable, invalid_output_for_target, out_filename};
|
||||
use rustc_session::search_paths::PathKind;
|
||||
@ -596,8 +596,10 @@ fn link_staticlib<'a>(
|
||||
|
||||
all_native_libs.extend_from_slice(&codegen_results.crate_info.used_libraries);
|
||||
|
||||
if sess.opts.prints.contains(&PrintRequest::NativeStaticLibs) {
|
||||
print_native_static_libs(sess, &all_native_libs, &all_rust_dylibs);
|
||||
for print in &sess.opts.prints {
|
||||
if print.kind == PrintKind::NativeStaticLibs {
|
||||
print_native_static_libs(sess, &print.out, &all_native_libs, &all_rust_dylibs);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -744,8 +746,11 @@ fn link_natively<'a>(
|
||||
cmd.env_remove(k.as_ref());
|
||||
}
|
||||
|
||||
if sess.opts.prints.contains(&PrintRequest::LinkArgs) {
|
||||
println!("{:?}", &cmd);
|
||||
for print in &sess.opts.prints {
|
||||
if print.kind == PrintKind::LinkArgs {
|
||||
let content = format!("{:?}", cmd);
|
||||
print.out.overwrite(&content, sess);
|
||||
}
|
||||
}
|
||||
|
||||
// May have not found libraries in the right formats.
|
||||
@ -1386,6 +1391,7 @@ enum RlibFlavor {
|
||||
|
||||
fn print_native_static_libs(
|
||||
sess: &Session,
|
||||
out: &OutFileName,
|
||||
all_native_libs: &[NativeLib],
|
||||
all_rust_dylibs: &[&Path],
|
||||
) {
|
||||
@ -1459,11 +1465,22 @@ fn print_native_static_libs(
|
||||
lib_args.push(format!("-l{}", lib));
|
||||
}
|
||||
}
|
||||
if !lib_args.is_empty() {
|
||||
sess.emit_note(errors::StaticLibraryNativeArtifacts);
|
||||
// Prefix for greppability
|
||||
// Note: This must not be translated as tools are allowed to depend on this exact string.
|
||||
sess.note_without_error(format!("native-static-libs: {}", &lib_args.join(" ")));
|
||||
|
||||
match out {
|
||||
OutFileName::Real(path) => {
|
||||
out.overwrite(&lib_args.join(" "), sess);
|
||||
if !lib_args.is_empty() {
|
||||
sess.emit_note(errors::StaticLibraryNativeArtifactsToFile { path });
|
||||
}
|
||||
}
|
||||
OutFileName::Stdout => {
|
||||
if !lib_args.is_empty() {
|
||||
sess.emit_note(errors::StaticLibraryNativeArtifacts);
|
||||
// Prefix for greppability
|
||||
// Note: This must not be translated as tools are allowed to depend on this exact string.
|
||||
sess.note_without_error(format!("native-static-libs: {}", &lib_args.join(" ")));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,8 +10,6 @@ use object::{
|
||||
ObjectSymbol, SectionFlags, SectionKind, SymbolFlags, SymbolKind, SymbolScope,
|
||||
};
|
||||
|
||||
use snap::write::FrameEncoder;
|
||||
|
||||
use rustc_data_structures::memmap::Mmap;
|
||||
use rustc_data_structures::owned_slice::{try_slice_owned, OwnedSlice};
|
||||
use rustc_metadata::fs::METADATA_FILENAME;
|
||||
@ -481,19 +479,15 @@ pub fn create_compressed_metadata_file(
|
||||
metadata: &EncodedMetadata,
|
||||
symbol_name: &str,
|
||||
) -> Vec<u8> {
|
||||
let mut compressed = rustc_metadata::METADATA_HEADER.to_vec();
|
||||
// Our length will be backfilled once we're done writing
|
||||
compressed.write_all(&[0; 4]).unwrap();
|
||||
FrameEncoder::new(&mut compressed).write_all(metadata.raw_data()).unwrap();
|
||||
let meta_len = rustc_metadata::METADATA_HEADER.len();
|
||||
let data_len = (compressed.len() - meta_len - 4) as u32;
|
||||
compressed[meta_len..meta_len + 4].copy_from_slice(&data_len.to_be_bytes());
|
||||
let mut packed_metadata = rustc_metadata::METADATA_HEADER.to_vec();
|
||||
packed_metadata.write_all(&(metadata.raw_data().len() as u32).to_be_bytes()).unwrap();
|
||||
packed_metadata.extend(metadata.raw_data());
|
||||
|
||||
let Some(mut file) = create_object_file(sess) else {
|
||||
return compressed.to_vec();
|
||||
return packed_metadata.to_vec();
|
||||
};
|
||||
if file.format() == BinaryFormat::Xcoff {
|
||||
return create_compressed_metadata_file_for_xcoff(file, &compressed, symbol_name);
|
||||
return create_compressed_metadata_file_for_xcoff(file, &packed_metadata, symbol_name);
|
||||
}
|
||||
let section = file.add_section(
|
||||
file.segment_name(StandardSegment::Data).to_vec(),
|
||||
@ -507,14 +501,14 @@ pub fn create_compressed_metadata_file(
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
let offset = file.append_section_data(section, &compressed, 1);
|
||||
let offset = file.append_section_data(section, &packed_metadata, 1);
|
||||
|
||||
// For MachO and probably PE this is necessary to prevent the linker from throwing away the
|
||||
// .rustc section. For ELF this isn't necessary, but it also doesn't harm.
|
||||
file.add_symbol(Symbol {
|
||||
name: symbol_name.as_bytes().to_vec(),
|
||||
value: offset,
|
||||
size: compressed.len() as u64,
|
||||
size: packed_metadata.len() as u64,
|
||||
kind: SymbolKind::Data,
|
||||
scope: SymbolScope::Dynamic,
|
||||
weak: false,
|
||||
|
@ -1945,6 +1945,10 @@ impl<B: ExtraBackendMethods> OngoingCodegen<B> {
|
||||
self.backend.print_pass_timings()
|
||||
}
|
||||
|
||||
if sess.print_llvm_stats() {
|
||||
self.backend.print_statistics()
|
||||
}
|
||||
|
||||
(
|
||||
CodegenResults {
|
||||
metadata: self.metadata,
|
||||
|
@ -455,6 +455,12 @@ pub struct LinkerFileStem;
|
||||
#[diag(codegen_ssa_static_library_native_artifacts)]
|
||||
pub struct StaticLibraryNativeArtifacts;
|
||||
|
||||
#[derive(Diagnostic)]
|
||||
#[diag(codegen_ssa_static_library_native_artifacts_to_file)]
|
||||
pub struct StaticLibraryNativeArtifactsToFile<'a> {
|
||||
pub path: &'a Path,
|
||||
}
|
||||
|
||||
#[derive(Diagnostic)]
|
||||
#[diag(codegen_ssa_link_script_unavailable)]
|
||||
pub struct LinkScriptUnavailable;
|
||||
|
@ -65,8 +65,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
&self,
|
||||
constant: &mir::Constant<'tcx>,
|
||||
) -> Result<Option<ty::ValTree<'tcx>>, ErrorHandled> {
|
||||
let uv = match constant.literal {
|
||||
let uv = match self.monomorphize(constant.literal) {
|
||||
mir::ConstantKind::Unevaluated(uv, _) => uv.shrink(),
|
||||
mir::ConstantKind::Ty(c) => match c.kind() {
|
||||
// A constant that came from a const generic but was then used as an argument to old-style
|
||||
// simd_shuffle (passing as argument instead of as a generic param).
|
||||
rustc_type_ir::ConstKind::Value(valtree) => return Ok(Some(valtree)),
|
||||
other => span_bug!(constant.span, "{other:#?}"),
|
||||
},
|
||||
// We should never encounter `ConstantKind::Val` unless MIR opts (like const prop) evaluate
|
||||
// a constant and write that value back into `Operand`s. This could happen, but is unlikely.
|
||||
// Also: all users of `simd_shuffle` are on unstable and already need to take a lot of care
|
||||
// around intrinsics. For an issue to happen here, it would require a macro expanding to a
|
||||
// `simd_shuffle` call without wrapping the constant argument in a `const {}` block, but
|
||||
// the user pass through arbitrary expressions.
|
||||
// FIXME(oli-obk): replace the magic const generic argument of `simd_shuffle` with a real
|
||||
// const generic.
|
||||
other => span_bug!(constant.span, "{other:#?}"),
|
||||
};
|
||||
let uv = self.monomorphize(uv);
|
||||
|
@ -23,6 +23,8 @@ use rustc_span::symbol::Symbol;
|
||||
use rustc_target::abi::call::FnAbi;
|
||||
use rustc_target::spec::Target;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
pub trait BackendTypes {
|
||||
type Value: CodegenObject;
|
||||
type Function: CodegenObject;
|
||||
@ -61,7 +63,7 @@ pub trait CodegenBackend {
|
||||
fn locale_resource(&self) -> &'static str;
|
||||
|
||||
fn init(&self, _sess: &Session) {}
|
||||
fn print(&self, _req: PrintRequest, _sess: &Session) {}
|
||||
fn print(&self, _req: &PrintRequest, _out: &mut dyn PrintBackendInfo, _sess: &Session) {}
|
||||
fn target_features(&self, _sess: &Session, _allow_unstable: bool) -> Vec<Symbol> {
|
||||
vec![]
|
||||
}
|
||||
@ -162,3 +164,19 @@ pub trait ExtraBackendMethods:
|
||||
std::thread::Builder::new().name(name).spawn(f)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PrintBackendInfo {
|
||||
fn infallible_write_fmt(&mut self, args: fmt::Arguments<'_>);
|
||||
}
|
||||
|
||||
impl PrintBackendInfo for String {
|
||||
fn infallible_write_fmt(&mut self, args: fmt::Arguments<'_>) {
|
||||
fmt::Write::write_fmt(self, args).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl dyn PrintBackendInfo + '_ {
|
||||
pub fn write_fmt(&mut self, args: fmt::Arguments<'_>) {
|
||||
self.infallible_write_fmt(args);
|
||||
}
|
||||
}
|
||||
|
@ -5,7 +5,13 @@ use rustc_target::abi;
|
||||
pub trait ConstMethods<'tcx>: BackendTypes {
|
||||
// Constant constructors
|
||||
fn const_null(&self, t: Self::Type) -> Self::Value;
|
||||
/// Generate an uninitialized value (matching uninitialized memory in MIR).
|
||||
/// Whether memory is initialized or not is tracked byte-for-byte.
|
||||
fn const_undef(&self, t: Self::Type) -> Self::Value;
|
||||
/// Generate a fake value. Poison always affects the entire value, even if just a single byte is
|
||||
/// poison. This can only be used in codepaths that are already UB, i.e., UB-free Rust code
|
||||
/// (including code that e.g. copies uninit memory with `MaybeUninit`) can never encounter a
|
||||
/// poison value.
|
||||
fn const_poison(&self, t: Self::Type) -> Self::Value;
|
||||
fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
|
||||
fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
|
||||
|
@ -30,7 +30,9 @@ mod write;
|
||||
|
||||
pub use self::abi::AbiBuilderMethods;
|
||||
pub use self::asm::{AsmBuilderMethods, AsmMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
|
||||
pub use self::backend::{Backend, BackendTypes, CodegenBackend, ExtraBackendMethods};
|
||||
pub use self::backend::{
|
||||
Backend, BackendTypes, CodegenBackend, ExtraBackendMethods, PrintBackendInfo,
|
||||
};
|
||||
pub use self::builder::{BuilderMethods, OverflowOp};
|
||||
pub use self::consts::ConstMethods;
|
||||
pub use self::coverageinfo::CoverageInfoBuilderMethods;
|
||||
|
@ -35,6 +35,7 @@ pub trait WriteBackendMethods: 'static + Sized + Clone {
|
||||
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
||||
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError>;
|
||||
fn print_pass_timings(&self);
|
||||
fn print_statistics(&self);
|
||||
unsafe fn optimize(
|
||||
cgcx: &CodegenContext<Self>,
|
||||
diag_handler: &Handler,
|
||||
|
@ -244,7 +244,6 @@ const_eval_not_enough_caller_args =
|
||||
const_eval_null_box = {$front_matter}: encountered a null box
|
||||
const_eval_null_fn_ptr = {$front_matter}: encountered a null function pointer
|
||||
const_eval_null_ref = {$front_matter}: encountered a null reference
|
||||
const_eval_nullable_ptr_out_of_range = {$front_matter}: encountered a potentially null pointer, but expected something that cannot possibly fail to be {$in_range}
|
||||
const_eval_nullary_intrinsic_fail =
|
||||
could not evaluate nullary intrinsic
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
use rustc_hir::def::DefKind;
|
||||
use rustc_hir::{LangItem, CRATE_HIR_ID};
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::mir::interpret::PointerArithmetic;
|
||||
use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
|
||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||
use rustc_session::lint::builtin::INVALID_ALIGNMENT;
|
||||
@ -17,7 +16,7 @@ use rustc_ast::Mutability;
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_middle::mir::AssertMessage;
|
||||
use rustc_span::symbol::{sym, Symbol};
|
||||
use rustc_target::abi::{Align, Size};
|
||||
use rustc_target::abi::{Align, HasDataLayout as _, Size};
|
||||
use rustc_target::spec::abi::Abi as CallAbi;
|
||||
|
||||
use crate::errors::{LongRunning, LongRunningWarn};
|
||||
@ -304,8 +303,8 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
|
||||
Ok(ControlFlow::Break(()))
|
||||
} else {
|
||||
// Not alignable in const, return `usize::MAX`.
|
||||
let usize_max = Scalar::from_target_usize(self.target_usize_max(), self);
|
||||
self.write_scalar(usize_max, dest)?;
|
||||
let usize_max = self.data_layout().target_usize_max();
|
||||
self.write_scalar(Scalar::from_target_usize(usize_max, self), dest)?;
|
||||
self.return_to_block(ret)?;
|
||||
Ok(ControlFlow::Break(()))
|
||||
}
|
||||
@ -333,7 +332,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
|
||||
// Inequality with integers other than null can never be known for sure.
|
||||
(Scalar::Int(int), ptr @ Scalar::Ptr(..))
|
||||
| (ptr @ Scalar::Ptr(..), Scalar::Int(int))
|
||||
if int.is_null() && !self.scalar_may_be_null(ptr)? =>
|
||||
if int.is_null() && !self.ptr_scalar_range(ptr)?.contains(&0) =>
|
||||
{
|
||||
0
|
||||
}
|
||||
|
@ -617,7 +617,6 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
|
||||
MutableRefInConst => const_eval_mutable_ref_in_const,
|
||||
NullFnPtr => const_eval_null_fn_ptr,
|
||||
NeverVal => const_eval_never_val,
|
||||
NullablePtrOutOfRange { .. } => const_eval_nullable_ptr_out_of_range,
|
||||
PtrOutOfRange { .. } => const_eval_ptr_out_of_range,
|
||||
OutOfRange { .. } => const_eval_out_of_range,
|
||||
UnsafeCell => const_eval_unsafe_cell,
|
||||
@ -732,9 +731,7 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
|
||||
| InvalidFnPtr { value } => {
|
||||
err.set_arg("value", value);
|
||||
}
|
||||
NullablePtrOutOfRange { range, max_value } | PtrOutOfRange { range, max_value } => {
|
||||
add_range_arg(range, max_value, handler, err)
|
||||
}
|
||||
PtrOutOfRange { range, max_value } => add_range_arg(range, max_value, handler, err),
|
||||
OutOfRange { range, max_value, value } => {
|
||||
err.set_arg("value", value);
|
||||
add_range_arg(range, max_value, handler, err);
|
||||
|
@ -2,8 +2,7 @@
|
||||
|
||||
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
|
||||
use rustc_middle::{mir, ty};
|
||||
use rustc_target::abi::{self, TagEncoding};
|
||||
use rustc_target::abi::{VariantIdx, Variants};
|
||||
use rustc_target::abi::{self, TagEncoding, VariantIdx, Variants, WrappingRange};
|
||||
|
||||
use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Scalar};
|
||||
|
||||
@ -180,19 +179,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
// discriminant (encoded in niche/tag) and variant index are the same.
|
||||
let variants_start = niche_variants.start().as_u32();
|
||||
let variants_end = niche_variants.end().as_u32();
|
||||
let variants_len = u128::from(variants_end - variants_start);
|
||||
let variant = match tag_val.try_to_int() {
|
||||
Err(dbg_val) => {
|
||||
// So this is a pointer then, and casting to an int failed.
|
||||
// Can only happen during CTFE.
|
||||
// The niche must be just 0, and the ptr not null, then we know this is
|
||||
// okay. Everything else, we conservatively reject.
|
||||
let ptr_valid = niche_start == 0
|
||||
&& variants_start == variants_end
|
||||
&& !self.scalar_may_be_null(tag_val)?;
|
||||
if !ptr_valid {
|
||||
// The pointer and niches ranges must be disjoint, then we know
|
||||
// this is the untagged variant (as the value is not in the niche).
|
||||
// Everything else, we conservatively reject.
|
||||
let range = self.ptr_scalar_range(tag_val)?;
|
||||
let niches = WrappingRange {
|
||||
start: niche_start,
|
||||
end: niche_start.wrapping_add(variants_len),
|
||||
};
|
||||
if niches.overlaps_range(range) {
|
||||
throw_ub!(InvalidTag(dbg_val))
|
||||
} else {
|
||||
untagged_variant
|
||||
}
|
||||
untagged_variant
|
||||
}
|
||||
Ok(tag_bits) => {
|
||||
let tag_bits = tag_bits.assert_bits(tag_layout.size);
|
||||
@ -205,7 +209,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
let variant_index_relative =
|
||||
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
|
||||
// Check if this is in the range that indicates an actual discriminant.
|
||||
if variant_index_relative <= u128::from(variants_end - variants_start) {
|
||||
if variant_index_relative <= variants_len {
|
||||
let variant_index_relative = u32::try_from(variant_index_relative)
|
||||
.expect("we checked that this fits into a u32");
|
||||
// Then computing the absolute variant idx should not overflow any more.
|
||||
|
@ -5,9 +5,7 @@
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_middle::mir::{
|
||||
self,
|
||||
interpret::{
|
||||
Allocation, ConstAllocation, ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar,
|
||||
},
|
||||
interpret::{Allocation, ConstAllocation, ConstValue, GlobalId, InterpResult, Scalar},
|
||||
BinOp, NonDivergingIntrinsic,
|
||||
};
|
||||
use rustc_middle::ty;
|
||||
@ -15,7 +13,7 @@ use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
|
||||
use rustc_middle::ty::GenericArgsRef;
|
||||
use rustc_middle::ty::{Ty, TyCtxt};
|
||||
use rustc_span::symbol::{sym, Symbol};
|
||||
use rustc_target::abi::{Abi, Align, Primitive, Size};
|
||||
use rustc_target::abi::{Abi, Align, HasDataLayout as _, Primitive, Size};
|
||||
|
||||
use super::{
|
||||
util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
|
||||
@ -361,11 +359,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
)?;
|
||||
|
||||
// Perform division by size to compute return value.
|
||||
let dl = self.data_layout();
|
||||
let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned {
|
||||
assert!(0 <= dist && dist <= self.target_isize_max());
|
||||
assert!(0 <= dist && dist <= dl.target_isize_max());
|
||||
usize_layout
|
||||
} else {
|
||||
assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max());
|
||||
assert!(dl.target_isize_min() <= dist && dist <= dl.target_isize_max());
|
||||
isize_layout
|
||||
};
|
||||
let pointee_layout = self.layout_of(instance_args.type_at(0))?;
|
||||
|
@ -10,6 +10,7 @@ use std::assert_matches::assert_matches;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::ops::RangeInclusive;
|
||||
use std::ptr;
|
||||
|
||||
use rustc_ast::Mutability;
|
||||
@ -1222,24 +1223,34 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
||||
/// Machine pointer introspection.
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Test if this value might be null.
|
||||
/// Turn a pointer-sized scalar into a (non-empty) range of possible values.
|
||||
/// If the machine does not support ptr-to-int casts, this is conservative.
|
||||
pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
|
||||
Ok(match scalar.try_to_int() {
|
||||
Ok(int) => int.is_null(),
|
||||
Err(_) => {
|
||||
// Can only happen during CTFE.
|
||||
let ptr = scalar.to_pointer(self)?;
|
||||
match self.ptr_try_get_alloc_id(ptr) {
|
||||
Ok((alloc_id, offset, _)) => {
|
||||
let (size, _align, _kind) = self.get_alloc_info(alloc_id);
|
||||
// If the pointer is out-of-bounds, it may be null.
|
||||
// Note that one-past-the-end (offset == size) is still inbounds, and never null.
|
||||
offset > size
|
||||
}
|
||||
Err(_offset) => bug!("a non-int scalar is always a pointer"),
|
||||
pub fn ptr_scalar_range(
|
||||
&self,
|
||||
scalar: Scalar<M::Provenance>,
|
||||
) -> InterpResult<'tcx, RangeInclusive<u64>> {
|
||||
if let Ok(int) = scalar.to_target_usize(self) {
|
||||
return Ok(int..=int);
|
||||
}
|
||||
|
||||
let ptr = scalar.to_pointer(self)?;
|
||||
|
||||
// Can only happen during CTFE.
|
||||
Ok(match self.ptr_try_get_alloc_id(ptr) {
|
||||
Ok((alloc_id, offset, _)) => {
|
||||
let offset = offset.bytes();
|
||||
let (size, align, _) = self.get_alloc_info(alloc_id);
|
||||
let dl = self.data_layout();
|
||||
if offset > size.bytes() {
|
||||
// If the pointer is out-of-bounds, we do not have a
|
||||
// meaningful range to return.
|
||||
0..=dl.target_usize_max()
|
||||
} else {
|
||||
let (min, max) = dl.address_range_for(size, align);
|
||||
(min + offset)..=(max + offset)
|
||||
}
|
||||
}
|
||||
Err(_offset) => bug!("a non-int scalar is always a pointer"),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ pub enum Immediate<Prov: Provenance = AllocId> {
|
||||
/// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
|
||||
/// `Scalar::Initialized`).
|
||||
ScalarPair(Scalar<Prov>, Scalar<Prov>),
|
||||
/// A value of fully uninitialized memory. Can have and size and layout.
|
||||
/// A value of fully uninitialized memory. Can have arbitrary size and layout.
|
||||
Uninit,
|
||||
}
|
||||
|
||||
|
@ -19,9 +19,7 @@ use rustc_middle::mir::interpret::{
|
||||
use rustc_middle::ty;
|
||||
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
||||
use rustc_span::symbol::{sym, Symbol};
|
||||
use rustc_target::abi::{
|
||||
Abi, FieldIdx, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
|
||||
};
|
||||
use rustc_target::abi::{Abi, FieldIdx, Scalar as ScalarAbi, Size, VariantIdx, Variants};
|
||||
|
||||
use std::hash::Hash;
|
||||
|
||||
@ -554,7 +552,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
||||
// FIXME: Check if the signature matches
|
||||
} else {
|
||||
// Otherwise (for standalone Miri), we have to still check it to be non-null.
|
||||
if self.ecx.scalar_may_be_null(value)? {
|
||||
if self.ecx.ptr_scalar_range(value)?.contains(&0) {
|
||||
throw_validation_failure!(self.path, NullFnPtr);
|
||||
}
|
||||
}
|
||||
@ -595,46 +593,36 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
||||
) -> InterpResult<'tcx> {
|
||||
let size = scalar_layout.size(self.ecx);
|
||||
let valid_range = scalar_layout.valid_range(self.ecx);
|
||||
let WrappingRange { start, end } = valid_range;
|
||||
let max_value = size.unsigned_int_max();
|
||||
assert!(end <= max_value);
|
||||
let bits = match scalar.try_to_int() {
|
||||
Ok(int) => int.assert_bits(size),
|
||||
assert!(valid_range.end <= max_value);
|
||||
match scalar.try_to_int() {
|
||||
Ok(int) => {
|
||||
// We have an explicit int: check it against the valid range.
|
||||
let bits = int.assert_bits(size);
|
||||
if valid_range.contains(bits) {
|
||||
Ok(())
|
||||
} else {
|
||||
throw_validation_failure!(
|
||||
self.path,
|
||||
OutOfRange { value: format!("{bits}"), range: valid_range, max_value }
|
||||
)
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
// So this is a pointer then, and casting to an int failed.
|
||||
// Can only happen during CTFE.
|
||||
// We support 2 kinds of ranges here: full range, and excluding zero.
|
||||
if start == 1 && end == max_value {
|
||||
// Only null is the niche. So make sure the ptr is NOT null.
|
||||
if self.ecx.scalar_may_be_null(scalar)? {
|
||||
throw_validation_failure!(
|
||||
self.path,
|
||||
NullablePtrOutOfRange { range: valid_range, max_value }
|
||||
)
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
} else if scalar_layout.is_always_valid(self.ecx) {
|
||||
// Easy. (This is reachable if `enforce_number_validity` is set.)
|
||||
return Ok(());
|
||||
// We check if the possible addresses are compatible with the valid range.
|
||||
let range = self.ecx.ptr_scalar_range(scalar)?;
|
||||
if valid_range.contains_range(range) {
|
||||
Ok(())
|
||||
} else {
|
||||
// Conservatively, we reject, because the pointer *could* have a bad
|
||||
// value.
|
||||
// Reject conservatively, because the pointer *could* have a bad value.
|
||||
throw_validation_failure!(
|
||||
self.path,
|
||||
PtrOutOfRange { range: valid_range, max_value }
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
// Now compare.
|
||||
if valid_range.contains(bits) {
|
||||
Ok(())
|
||||
} else {
|
||||
throw_validation_failure!(
|
||||
self.path,
|
||||
OutOfRange { value: format!("{bits}"), range: valid_range, max_value }
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,5 +19,3 @@ driver_impl_rlink_rustc_version_mismatch = .rlink file was produced by rustc ver
|
||||
driver_impl_rlink_unable_to_read = failed to read rlink file: `{$err}`
|
||||
|
||||
driver_impl_rlink_wrong_file_type = The input does not look like a .rlink file
|
||||
|
||||
driver_impl_unpretty_dump_fail = pretty-print failed to write `{$path}` due to error `{$err}`
|
||||
|
@ -37,9 +37,7 @@ use rustc_interface::{interface, Queries};
|
||||
use rustc_lint::LintStore;
|
||||
use rustc_metadata::locator;
|
||||
use rustc_session::config::{nightly_options, CG_OPTIONS, Z_OPTIONS};
|
||||
use rustc_session::config::{
|
||||
ErrorOutputType, Input, OutFileName, OutputType, PrintRequest, TrimmedDefPaths,
|
||||
};
|
||||
use rustc_session::config::{ErrorOutputType, Input, OutFileName, OutputType, TrimmedDefPaths};
|
||||
use rustc_session::cstore::MetadataLoader;
|
||||
use rustc_session::getopts::{self, Matches};
|
||||
use rustc_session::lint::{Lint, LintId};
|
||||
@ -53,6 +51,7 @@ use std::cmp::max;
|
||||
use std::collections::BTreeMap;
|
||||
use std::env;
|
||||
use std::ffi::OsString;
|
||||
use std::fmt::Write as _;
|
||||
use std::fs;
|
||||
use std::io::{self, IsTerminal, Read, Write};
|
||||
use std::panic::{self, catch_unwind};
|
||||
@ -72,6 +71,11 @@ macro do_not_use_print($($t:tt)*) {
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(unused_macros)]
|
||||
macro do_not_use_safe_print($($t:tt)*) {
|
||||
std::compile_error!("Don't use `safe_print` or `safe_println` here, use `println_info` instead")
|
||||
}
|
||||
|
||||
// This import blocks the use of panicking `print` and `println` in all the code
|
||||
// below. Please use `safe_print` and `safe_println` to avoid ICE when
|
||||
// encountering an I/O error during print.
|
||||
@ -720,10 +724,17 @@ fn print_crate_info(
|
||||
sess: &Session,
|
||||
parse_attrs: bool,
|
||||
) -> Compilation {
|
||||
use rustc_session::config::PrintRequest::*;
|
||||
use rustc_session::config::PrintKind::*;
|
||||
|
||||
// This import prevents the following code from using the printing macros
|
||||
// used by the rest of the module. Within this function, we only write to
|
||||
// the output specified by `sess.io.output_file`.
|
||||
#[allow(unused_imports)]
|
||||
use {do_not_use_safe_print as safe_print, do_not_use_safe_print as safe_println};
|
||||
|
||||
// NativeStaticLibs and LinkArgs are special - printed during linking
|
||||
// (empty iterator returns true)
|
||||
if sess.opts.prints.iter().all(|&p| p == NativeStaticLibs || p == LinkArgs) {
|
||||
if sess.opts.prints.iter().all(|p| p.kind == NativeStaticLibs || p.kind == LinkArgs) {
|
||||
return Compilation::Continue;
|
||||
}
|
||||
|
||||
@ -739,17 +750,23 @@ fn print_crate_info(
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
for req in &sess.opts.prints {
|
||||
match *req {
|
||||
let mut crate_info = String::new();
|
||||
macro println_info($($arg:tt)*) {
|
||||
crate_info.write_fmt(format_args!("{}\n", format_args!($($arg)*))).unwrap()
|
||||
}
|
||||
|
||||
match req.kind {
|
||||
TargetList => {
|
||||
let mut targets = rustc_target::spec::TARGETS.to_vec();
|
||||
targets.sort_unstable();
|
||||
safe_println!("{}", targets.join("\n"));
|
||||
println_info!("{}", targets.join("\n"));
|
||||
}
|
||||
Sysroot => safe_println!("{}", sess.sysroot.display()),
|
||||
TargetLibdir => safe_println!("{}", sess.target_tlib_path.dir.display()),
|
||||
Sysroot => println_info!("{}", sess.sysroot.display()),
|
||||
TargetLibdir => println_info!("{}", sess.target_tlib_path.dir.display()),
|
||||
TargetSpec => {
|
||||
safe_println!("{}", serde_json::to_string_pretty(&sess.target.to_json()).unwrap());
|
||||
println_info!("{}", serde_json::to_string_pretty(&sess.target.to_json()).unwrap());
|
||||
}
|
||||
AllTargetSpecs => {
|
||||
let mut targets = BTreeMap::new();
|
||||
@ -758,26 +775,30 @@ fn print_crate_info(
|
||||
let target = Target::expect_builtin(&triple);
|
||||
targets.insert(name, target.to_json());
|
||||
}
|
||||
safe_println!("{}", serde_json::to_string_pretty(&targets).unwrap());
|
||||
println_info!("{}", serde_json::to_string_pretty(&targets).unwrap());
|
||||
}
|
||||
FileNames | CrateName => {
|
||||
FileNames => {
|
||||
let Some(attrs) = attrs.as_ref() else {
|
||||
// no crate attributes, print out an error and exit
|
||||
return Compilation::Continue;
|
||||
};
|
||||
let t_outputs = rustc_interface::util::build_output_filenames(attrs, sess);
|
||||
let id = rustc_session::output::find_crate_name(sess, attrs);
|
||||
if *req == PrintRequest::CrateName {
|
||||
safe_println!("{id}");
|
||||
continue;
|
||||
}
|
||||
let crate_types = collect_crate_types(sess, attrs);
|
||||
for &style in &crate_types {
|
||||
let fname =
|
||||
rustc_session::output::filename_for_input(sess, style, id, &t_outputs);
|
||||
safe_println!("{}", fname.as_path().file_name().unwrap().to_string_lossy());
|
||||
println_info!("{}", fname.as_path().file_name().unwrap().to_string_lossy());
|
||||
}
|
||||
}
|
||||
CrateName => {
|
||||
let Some(attrs) = attrs.as_ref() else {
|
||||
// no crate attributes, print out an error and exit
|
||||
return Compilation::Continue;
|
||||
};
|
||||
let id = rustc_session::output::find_crate_name(sess, attrs);
|
||||
println_info!("{id}");
|
||||
}
|
||||
Cfg => {
|
||||
let mut cfgs = sess
|
||||
.parse_sess
|
||||
@ -809,13 +830,13 @@ fn print_crate_info(
|
||||
|
||||
cfgs.sort();
|
||||
for cfg in cfgs {
|
||||
safe_println!("{cfg}");
|
||||
println_info!("{cfg}");
|
||||
}
|
||||
}
|
||||
CallingConventions => {
|
||||
let mut calling_conventions = rustc_target::spec::abi::all_names();
|
||||
calling_conventions.sort_unstable();
|
||||
safe_println!("{}", calling_conventions.join("\n"));
|
||||
println_info!("{}", calling_conventions.join("\n"));
|
||||
}
|
||||
RelocationModels
|
||||
| CodeModels
|
||||
@ -823,7 +844,7 @@ fn print_crate_info(
|
||||
| TargetCPUs
|
||||
| StackProtectorStrategies
|
||||
| TargetFeatures => {
|
||||
codegen_backend.print(*req, sess);
|
||||
codegen_backend.print(req, &mut crate_info, sess);
|
||||
}
|
||||
// Any output here interferes with Cargo's parsing of other printed output
|
||||
NativeStaticLibs => {}
|
||||
@ -833,7 +854,7 @@ fn print_crate_info(
|
||||
|
||||
for split in &[Off, Packed, Unpacked] {
|
||||
if sess.target.options.supported_split_debuginfo.contains(split) {
|
||||
safe_println!("{split}");
|
||||
println_info!("{split}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -841,7 +862,7 @@ fn print_crate_info(
|
||||
use rustc_target::spec::current_apple_deployment_target;
|
||||
|
||||
if sess.target.is_like_osx {
|
||||
safe_println!(
|
||||
println_info!(
|
||||
"deployment_target={}",
|
||||
current_apple_deployment_target(&sess.target)
|
||||
.expect("unknown Apple target OS")
|
||||
@ -852,6 +873,8 @@ fn print_crate_info(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
req.out.overwrite(&crate_info, sess);
|
||||
}
|
||||
Compilation::Stop
|
||||
}
|
||||
|
@ -1,6 +1,5 @@
|
||||
//! The various pretty-printing routines.
|
||||
|
||||
use crate::session_diagnostics::UnprettyDumpFail;
|
||||
use rustc_ast as ast;
|
||||
use rustc_ast_pretty::pprust;
|
||||
use rustc_errors::ErrorGuaranteed;
|
||||
@ -358,17 +357,7 @@ fn get_source(sess: &Session) -> (String, FileName) {
|
||||
}
|
||||
|
||||
fn write_or_print(out: &str, sess: &Session) {
|
||||
match &sess.io.output_file {
|
||||
None | Some(OutFileName::Stdout) => print!("{out}"),
|
||||
Some(OutFileName::Real(p)) => {
|
||||
if let Err(e) = std::fs::write(p, out) {
|
||||
sess.emit_fatal(UnprettyDumpFail {
|
||||
path: p.display().to_string(),
|
||||
err: e.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
sess.io.output_file.as_ref().unwrap_or(&OutFileName::Stdout).overwrite(out, sess);
|
||||
}
|
||||
|
||||
pub fn print_after_parsing(sess: &Session, krate: &ast::Crate, ppm: PpMode) {
|
||||
|
@ -32,13 +32,6 @@ pub(crate) struct RLinkRustcVersionMismatch<'a> {
|
||||
#[diag(driver_impl_rlink_no_a_file)]
|
||||
pub(crate) struct RlinkNotAFile;
|
||||
|
||||
#[derive(Diagnostic)]
|
||||
#[diag(driver_impl_unpretty_dump_fail)]
|
||||
pub(crate) struct UnprettyDumpFail {
|
||||
pub path: String,
|
||||
pub err: String,
|
||||
}
|
||||
|
||||
#[derive(Diagnostic)]
|
||||
#[diag(driver_impl_ice)]
|
||||
pub(crate) struct Ice;
|
||||
|
@ -11,7 +11,8 @@ struct ForceAlign32;
|
||||
|
||||
#[repr(transparent)]
|
||||
struct Wrapper(f32, ForceAlign32); // error: zero-sized field in transparent
|
||||
// struct has alignment larger than 1
|
||||
// struct has alignment of 32, which
|
||||
// is larger than 1
|
||||
```
|
||||
|
||||
A transparent struct, enum, or union is supposed to be represented exactly like
|
||||
|
@ -1078,9 +1078,9 @@ pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>)
|
||||
// We are currently checking the type this field came from, so it must be local
|
||||
let span = tcx.hir().span_if_local(field.did).unwrap();
|
||||
let zst = layout.is_ok_and(|layout| layout.is_zst());
|
||||
let align1 = layout.is_ok_and(|layout| layout.align.abi.bytes() == 1);
|
||||
let align = layout.ok().map(|layout| layout.align.abi.bytes());
|
||||
if !zst {
|
||||
return (span, zst, align1, None);
|
||||
return (span, zst, align, None);
|
||||
}
|
||||
|
||||
fn check_non_exhaustive<'tcx>(
|
||||
@ -1115,12 +1115,12 @@ pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>)
|
||||
}
|
||||
}
|
||||
|
||||
(span, zst, align1, check_non_exhaustive(tcx, ty).break_value())
|
||||
(span, zst, align, check_non_exhaustive(tcx, ty).break_value())
|
||||
});
|
||||
|
||||
let non_zst_fields = field_infos
|
||||
.clone()
|
||||
.filter_map(|(span, zst, _align1, _non_exhaustive)| if !zst { Some(span) } else { None });
|
||||
.filter_map(|(span, zst, _align, _non_exhaustive)| if !zst { Some(span) } else { None });
|
||||
let non_zst_count = non_zst_fields.clone().count();
|
||||
if non_zst_count >= 2 {
|
||||
bad_non_zero_sized_fields(tcx, adt, non_zst_count, non_zst_fields, tcx.def_span(adt.did()));
|
||||
@ -1128,17 +1128,26 @@ pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>)
|
||||
let incompatible_zst_fields =
|
||||
field_infos.clone().filter(|(_, _, _, opt)| opt.is_some()).count();
|
||||
let incompat = incompatible_zst_fields + non_zst_count >= 2 && non_zst_count < 2;
|
||||
for (span, zst, align1, non_exhaustive) in field_infos {
|
||||
if zst && !align1 {
|
||||
struct_span_err!(
|
||||
for (span, zst, align, non_exhaustive) in field_infos {
|
||||
if zst && align != Some(1) {
|
||||
let mut err = struct_span_err!(
|
||||
tcx.sess,
|
||||
span,
|
||||
E0691,
|
||||
"zero-sized field in transparent {} has alignment larger than 1",
|
||||
adt.descr(),
|
||||
)
|
||||
.span_label(span, "has alignment larger than 1")
|
||||
.emit();
|
||||
);
|
||||
|
||||
if let Some(align_bytes) = align {
|
||||
err.span_label(
|
||||
span,
|
||||
format!("has alignment of {align_bytes}, which is larger than 1"),
|
||||
);
|
||||
} else {
|
||||
err.span_label(span, "may have alignment larger than 1");
|
||||
}
|
||||
|
||||
err.emit();
|
||||
}
|
||||
if incompat && let Some((descr, def_id, args, non_exhaustive)) = non_exhaustive {
|
||||
tcx.struct_span_lint_hir(
|
||||
|
@ -1,11 +1,16 @@
|
||||
use rustc_data_structures::unord::{ExtendUnord, UnordSet};
|
||||
use rustc_hir::def::DefKind;
|
||||
use rustc_hir::def_id::LocalDefId;
|
||||
use rustc_middle::query::Providers;
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_session::lint;
|
||||
|
||||
pub fn check_crate(tcx: TyCtxt<'_>) {
|
||||
let mut used_trait_imports: UnordSet<LocalDefId> = Default::default();
|
||||
pub fn provide(providers: &mut Providers) {
|
||||
*providers = Providers { check_unused_traits, ..*providers };
|
||||
}
|
||||
|
||||
fn check_unused_traits(tcx: TyCtxt<'_>, (): ()) {
|
||||
let mut used_trait_imports = UnordSet::<LocalDefId>::default();
|
||||
|
||||
// FIXME: Use `tcx.hir().par_body_owners()` when we implement creating `DefId`s
|
||||
// for anon constants during their parents' typeck.
|
||||
|
@ -568,10 +568,10 @@ fn fast_reject_auto_impl<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, self_ty:
|
||||
|
||||
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for DisableAutoTraitVisitor<'tcx> {
|
||||
type BreakTy = ();
|
||||
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
|
||||
fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
|
||||
let tcx = self.tcx;
|
||||
if t != self.self_ty_root {
|
||||
for impl_def_id in tcx.non_blanket_impls_for_ty(self.trait_def_id, t) {
|
||||
if ty != self.self_ty_root {
|
||||
for impl_def_id in tcx.non_blanket_impls_for_ty(self.trait_def_id, ty) {
|
||||
match tcx.impl_polarity(impl_def_id) {
|
||||
ImplPolarity::Negative => return ControlFlow::Break(()),
|
||||
ImplPolarity::Reservation => {}
|
||||
@ -584,7 +584,7 @@ fn fast_reject_auto_impl<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, self_ty:
|
||||
}
|
||||
}
|
||||
|
||||
match t.kind() {
|
||||
match ty.kind() {
|
||||
ty::Adt(def, args) if def.is_phantom_data() => args.visit_with(self),
|
||||
ty::Adt(def, args) => {
|
||||
// @lcnr: This is the only place where cycles can happen. We avoid this
|
||||
@ -599,7 +599,7 @@ fn fast_reject_auto_impl<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, self_ty:
|
||||
|
||||
ControlFlow::Continue(())
|
||||
}
|
||||
_ => t.super_visit_with(self),
|
||||
_ => ty.super_visit_with(self),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -177,6 +177,7 @@ pub fn provide(providers: &mut Providers) {
|
||||
collect::provide(providers);
|
||||
coherence::provide(providers);
|
||||
check::provide(providers);
|
||||
check_unused::provide(providers);
|
||||
variance::provide(providers);
|
||||
outlives::provide(providers);
|
||||
impl_wf_check::provide(providers);
|
||||
@ -247,7 +248,7 @@ pub fn check_crate(tcx: TyCtxt<'_>) -> Result<(), ErrorGuaranteed> {
|
||||
}
|
||||
});
|
||||
|
||||
check_unused::check_crate(tcx);
|
||||
tcx.ensure().check_unused_traits(());
|
||||
|
||||
if let Some(reported) = tcx.sess.has_errors() { Err(reported) } else { Ok(()) }
|
||||
}
|
||||
|
@ -25,6 +25,13 @@ impl<'tcx> PredicateSet<'tcx> {
|
||||
Self { tcx, set: Default::default() }
|
||||
}
|
||||
|
||||
/// Adds a predicate to the set.
|
||||
///
|
||||
/// Returns whether the predicate was newly inserted. That is:
|
||||
/// - If the set did not previously contain this predicate, `true` is returned.
|
||||
/// - If the set already contained this predicate, `false` is returned,
|
||||
/// and the set is not modified: original predicate is not replaced,
|
||||
/// and the predicate passed as argument is dropped.
|
||||
pub fn insert(&mut self, pred: ty::Predicate<'tcx>) -> bool {
|
||||
// We have to be careful here because we want
|
||||
//
|
||||
|
@ -28,6 +28,7 @@ use rustc_span::edition::{Edition, DEFAULT_EDITION};
|
||||
use rustc_span::symbol::sym;
|
||||
use rustc_span::FileName;
|
||||
use rustc_span::SourceFileHashAlgorithm;
|
||||
use rustc_target::abi::ReferenceNichePolicy;
|
||||
use rustc_target::spec::{CodeModel, LinkerFlavorCli, MergeFunctions, PanicStrategy, RelocModel};
|
||||
use rustc_target::spec::{RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TlsModel};
|
||||
|
||||
@ -715,6 +716,7 @@ fn test_unstable_options_tracking_hash() {
|
||||
untracked!(perf_stats, true);
|
||||
// `pre_link_arg` is omitted because it just forwards to `pre_link_args`.
|
||||
untracked!(pre_link_args, vec![String::from("abc"), String::from("def")]);
|
||||
untracked!(print_codegen_stats, true);
|
||||
untracked!(print_llvm_passes, true);
|
||||
untracked!(print_mono_items, Some(String::from("abc")));
|
||||
untracked!(print_type_sizes, true);
|
||||
@ -819,6 +821,7 @@ fn test_unstable_options_tracking_hash() {
|
||||
tracked!(profile_emit, Some(PathBuf::from("abc")));
|
||||
tracked!(profile_sample_use, Some(PathBuf::from("abc")));
|
||||
tracked!(profiler_runtime, "abc".to_string());
|
||||
tracked!(reference_niches, Some(ReferenceNichePolicy { size: true, align: false }));
|
||||
tracked!(relax_elf_relocations, Some(true));
|
||||
tracked!(relro_level, Some(RelroLevel::Full));
|
||||
tracked!(remap_cwd_prefix, Some(PathBuf::from("abc")));
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <stdio.h>
|
||||
|
||||
#include <iomanip>
|
||||
#include <vector>
|
||||
#include <set>
|
||||
|
||||
@ -306,44 +307,55 @@ static size_t getLongestEntryLength(ArrayRef<KV> Table) {
|
||||
return MaxLen;
|
||||
}
|
||||
|
||||
extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM, const char* TargetCPU) {
|
||||
using PrintBackendInfo = void(void*, const char* Data, size_t Len);
|
||||
|
||||
extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM,
|
||||
const char* TargetCPU,
|
||||
PrintBackendInfo Print,
|
||||
void* Out) {
|
||||
const TargetMachine *Target = unwrap(TM);
|
||||
const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
|
||||
const Triple::ArchType HostArch = Triple(sys::getDefaultTargetTriple()).getArch();
|
||||
const Triple::ArchType TargetArch = Target->getTargetTriple().getArch();
|
||||
|
||||
std::ostringstream Buf;
|
||||
|
||||
#if LLVM_VERSION_GE(17, 0)
|
||||
const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getAllProcessorDescriptions();
|
||||
#elif defined(LLVM_RUSTLLVM)
|
||||
const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getCPUTable();
|
||||
#else
|
||||
printf("Full target CPU help is not supported by this LLVM version.\n\n");
|
||||
Buf << "Full target CPU help is not supported by this LLVM version.\n\n";
|
||||
SubtargetSubTypeKV TargetCPUKV = { TargetCPU, {{}}, {{}} };
|
||||
const ArrayRef<SubtargetSubTypeKV> CPUTable = TargetCPUKV;
|
||||
#endif
|
||||
unsigned MaxCPULen = getLongestEntryLength(CPUTable);
|
||||
|
||||
printf("Available CPUs for this target:\n");
|
||||
Buf << "Available CPUs for this target:\n";
|
||||
// Don't print the "native" entry when the user specifies --target with a
|
||||
// different arch since that could be wrong or misleading.
|
||||
if (HostArch == TargetArch) {
|
||||
MaxCPULen = std::max(MaxCPULen, (unsigned) std::strlen("native"));
|
||||
const StringRef HostCPU = sys::getHostCPUName();
|
||||
printf(" %-*s - Select the CPU of the current host (currently %.*s).\n",
|
||||
MaxCPULen, "native", (int)HostCPU.size(), HostCPU.data());
|
||||
Buf << " " << std::left << std::setw(MaxCPULen) << "native"
|
||||
<< " - Select the CPU of the current host "
|
||||
"(currently " << HostCPU.str() << ").\n";
|
||||
}
|
||||
for (auto &CPU : CPUTable) {
|
||||
// Compare cpu against current target to label the default
|
||||
if (strcmp(CPU.Key, TargetCPU) == 0) {
|
||||
printf(" %-*s - This is the default target CPU"
|
||||
" for the current build target (currently %s).",
|
||||
MaxCPULen, CPU.Key, Target->getTargetTriple().str().c_str());
|
||||
Buf << " " << std::left << std::setw(MaxCPULen) << CPU.Key
|
||||
<< " - This is the default target CPU for the current build target "
|
||||
"(currently " << Target->getTargetTriple().str() << ").";
|
||||
}
|
||||
else {
|
||||
printf(" %-*s", MaxCPULen, CPU.Key);
|
||||
Buf << " " << CPU.Key;
|
||||
}
|
||||
printf("\n");
|
||||
Buf << "\n";
|
||||
}
|
||||
|
||||
const auto &BufString = Buf.str();
|
||||
Print(Out, BufString.data(), BufString.size());
|
||||
}
|
||||
|
||||
extern "C" size_t LLVMRustGetTargetFeaturesCount(LLVMTargetMachineRef TM) {
|
||||
@ -1354,6 +1366,11 @@ LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
|
||||
if (WasmCustomSections)
|
||||
WasmCustomSections->eraseFromParent();
|
||||
|
||||
// `llvm.ident` named metadata also gets duplicated.
|
||||
auto *llvmIdent = (*MOrErr)->getNamedMetadata("llvm.ident");
|
||||
if (llvmIdent)
|
||||
llvmIdent->eraseFromParent();
|
||||
|
||||
return MOrErr;
|
||||
};
|
||||
bool ClearDSOLocal = clearDSOLocalOnDeclarations(Mod, Target);
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include "LLVMWrapper.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
#include "llvm/IR/DebugInfoMetadata.h"
|
||||
#include "llvm/IR/DiagnosticHandler.h"
|
||||
#include "llvm/IR/DiagnosticInfo.h"
|
||||
@ -111,9 +112,26 @@ extern "C" void LLVMRustSetNormalizedTarget(LLVMModuleRef M,
|
||||
unwrap(M)->setTargetTriple(Triple::normalize(Triple));
|
||||
}
|
||||
|
||||
extern "C" void LLVMRustPrintPassTimings() {
|
||||
raw_fd_ostream OS(2, false); // stderr.
|
||||
TimerGroup::printAll(OS);
|
||||
extern "C" const char *LLVMRustPrintPassTimings(size_t *Len) {
|
||||
std::string buf;
|
||||
raw_string_ostream SS(buf);
|
||||
TimerGroup::printAll(SS);
|
||||
SS.flush();
|
||||
*Len = buf.length();
|
||||
char *CStr = (char *)malloc(*Len);
|
||||
memcpy(CStr, buf.c_str(), *Len);
|
||||
return CStr;
|
||||
}
|
||||
|
||||
extern "C" const char *LLVMRustPrintStatistics(size_t *Len) {
|
||||
std::string buf;
|
||||
raw_string_ostream SS(buf);
|
||||
llvm::PrintStatistics(SS);
|
||||
SS.flush();
|
||||
*Len = buf.length();
|
||||
char *CStr = (char *)malloc(*Len);
|
||||
memcpy(CStr, buf.c_str(), *Len);
|
||||
return CStr;
|
||||
}
|
||||
|
||||
extern "C" LLVMValueRef LLVMRustGetNamedValue(LLVMModuleRef M, const char *Name,
|
||||
|
@ -511,7 +511,7 @@ impl<'a> CrateLocator<'a> {
|
||||
rlib: self.extract_one(rlibs, CrateFlavor::Rlib, &mut slot)?,
|
||||
dylib: self.extract_one(dylibs, CrateFlavor::Dylib, &mut slot)?,
|
||||
};
|
||||
Ok(slot.map(|(svh, metadata)| (svh, Library { source, metadata })))
|
||||
Ok(slot.map(|(svh, metadata, _)| (svh, Library { source, metadata })))
|
||||
}
|
||||
|
||||
fn needs_crate_flavor(&self, flavor: CrateFlavor) -> bool {
|
||||
@ -535,11 +535,13 @@ impl<'a> CrateLocator<'a> {
|
||||
// read the metadata from it if `*slot` is `None`. If the metadata couldn't
|
||||
// be read, it is assumed that the file isn't a valid rust library (no
|
||||
// errors are emitted).
|
||||
//
|
||||
// The `PathBuf` in `slot` will only be used for diagnostic purposes.
|
||||
fn extract_one(
|
||||
&mut self,
|
||||
m: FxHashMap<PathBuf, PathKind>,
|
||||
flavor: CrateFlavor,
|
||||
slot: &mut Option<(Svh, MetadataBlob)>,
|
||||
slot: &mut Option<(Svh, MetadataBlob, PathBuf)>,
|
||||
) -> Result<Option<(PathBuf, PathKind)>, CrateError> {
|
||||
// If we are producing an rlib, and we've already loaded metadata, then
|
||||
// we should not attempt to discover further crate sources (unless we're
|
||||
@ -550,16 +552,9 @@ impl<'a> CrateLocator<'a> {
|
||||
//
|
||||
// See also #68149 which provides more detail on why emitting the
|
||||
// dependency on the rlib is a bad thing.
|
||||
//
|
||||
// We currently do not verify that these other sources are even in sync,
|
||||
// and this is arguably a bug (see #10786), but because reading metadata
|
||||
// is quite slow (especially from dylibs) we currently do not read it
|
||||
// from the other crate sources.
|
||||
if slot.is_some() {
|
||||
if m.is_empty() || !self.needs_crate_flavor(flavor) {
|
||||
return Ok(None);
|
||||
} else if m.len() == 1 {
|
||||
return Ok(Some(m.into_iter().next().unwrap()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -610,8 +605,7 @@ impl<'a> CrateLocator<'a> {
|
||||
candidates,
|
||||
));
|
||||
}
|
||||
err_data = Some(vec![ret.as_ref().unwrap().0.clone()]);
|
||||
*slot = None;
|
||||
err_data = Some(vec![slot.take().unwrap().2]);
|
||||
}
|
||||
if let Some(candidates) = &mut err_data {
|
||||
candidates.push(lib);
|
||||
@ -644,7 +638,7 @@ impl<'a> CrateLocator<'a> {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
*slot = Some((hash, metadata));
|
||||
*slot = Some((hash, metadata, lib.clone()));
|
||||
ret = Some((lib, kind));
|
||||
}
|
||||
|
||||
@ -814,19 +808,26 @@ fn get_metadata_section<'p>(
|
||||
let compressed_len = u32::from_be_bytes(len_bytes) as usize;
|
||||
|
||||
// Header is okay -> inflate the actual metadata
|
||||
let compressed_bytes = &buf[data_start..(data_start + compressed_len)];
|
||||
debug!("inflating {} bytes of compressed metadata", compressed_bytes.len());
|
||||
// Assume the decompressed data will be at least the size of the compressed data, so we
|
||||
// don't have to grow the buffer as much.
|
||||
let mut inflated = Vec::with_capacity(compressed_bytes.len());
|
||||
FrameDecoder::new(compressed_bytes).read_to_end(&mut inflated).map_err(|_| {
|
||||
MetadataError::LoadFailure(format!(
|
||||
"failed to decompress metadata: {}",
|
||||
filename.display()
|
||||
))
|
||||
})?;
|
||||
let compressed_bytes = buf.slice(|buf| &buf[data_start..(data_start + compressed_len)]);
|
||||
if &compressed_bytes[..cmp::min(METADATA_HEADER.len(), compressed_bytes.len())]
|
||||
== METADATA_HEADER
|
||||
{
|
||||
// The metadata was not actually compressed.
|
||||
compressed_bytes
|
||||
} else {
|
||||
debug!("inflating {} bytes of compressed metadata", compressed_bytes.len());
|
||||
// Assume the decompressed data will be at least the size of the compressed data, so we
|
||||
// don't have to grow the buffer as much.
|
||||
let mut inflated = Vec::with_capacity(compressed_bytes.len());
|
||||
FrameDecoder::new(&*compressed_bytes).read_to_end(&mut inflated).map_err(|_| {
|
||||
MetadataError::LoadFailure(format!(
|
||||
"failed to decompress metadata: {}",
|
||||
filename.display()
|
||||
))
|
||||
})?;
|
||||
|
||||
slice_owned(inflated, Deref::deref)
|
||||
slice_owned(inflated, Deref::deref)
|
||||
}
|
||||
}
|
||||
CrateFlavor::Rmeta => {
|
||||
// mmap the file, because only a small fraction of it is read.
|
||||
|
@ -301,6 +301,7 @@ provide! { tcx, def_id, other, cdata,
|
||||
is_profiler_runtime => { cdata.root.profiler_runtime }
|
||||
required_panic_strategy => { cdata.root.required_panic_strategy }
|
||||
panic_in_drop_strategy => { cdata.root.panic_in_drop_strategy }
|
||||
reference_niches_policy => { cdata.root.reference_niches_policy }
|
||||
extern_crate => {
|
||||
let r = *cdata.extern_crate.lock();
|
||||
r.map(|c| &*tcx.arena.alloc(c))
|
||||
|
@ -673,6 +673,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
|
||||
stable_crate_id: tcx.def_path_hash(LOCAL_CRATE.as_def_id()).stable_crate_id(),
|
||||
required_panic_strategy: tcx.required_panic_strategy(LOCAL_CRATE),
|
||||
panic_in_drop_strategy: tcx.sess.opts.unstable_opts.panic_in_drop,
|
||||
reference_niches_policy: tcx.reference_niches_policy(LOCAL_CRATE),
|
||||
edition: tcx.sess.edition(),
|
||||
has_global_allocator: tcx.has_global_allocator(LOCAL_CRATE),
|
||||
has_alloc_error_handler: tcx.has_alloc_error_handler(LOCAL_CRATE),
|
||||
|
@ -32,7 +32,7 @@ use rustc_span::edition::Edition;
|
||||
use rustc_span::hygiene::{ExpnIndex, MacroKind};
|
||||
use rustc_span::symbol::{Ident, Symbol};
|
||||
use rustc_span::{self, ExpnData, ExpnHash, ExpnId, Span};
|
||||
use rustc_target::abi::{FieldIdx, VariantIdx};
|
||||
use rustc_target::abi::{FieldIdx, ReferenceNichePolicy, VariantIdx};
|
||||
use rustc_target::spec::{PanicStrategy, TargetTriple};
|
||||
|
||||
use std::marker::PhantomData;
|
||||
@ -251,6 +251,7 @@ pub(crate) struct CrateRoot {
|
||||
stable_crate_id: StableCrateId,
|
||||
required_panic_strategy: Option<PanicStrategy>,
|
||||
panic_in_drop_strategy: PanicStrategy,
|
||||
reference_niches_policy: ReferenceNichePolicy,
|
||||
edition: Edition,
|
||||
has_global_allocator: bool,
|
||||
has_alloc_error_handler: bool,
|
||||
|
@ -388,7 +388,6 @@ pub enum ValidationErrorKind<'tcx> {
|
||||
MutableRefInConst,
|
||||
NullFnPtr,
|
||||
NeverVal,
|
||||
NullablePtrOutOfRange { range: WrappingRange, max_value: u128 },
|
||||
PtrOutOfRange { range: WrappingRange, max_value: u128 },
|
||||
OutOfRange { value: String, range: WrappingRange, max_value: u128 },
|
||||
UnsafeCell,
|
||||
|
@ -19,33 +19,19 @@ pub trait PointerArithmetic: HasDataLayout {
|
||||
|
||||
#[inline(always)]
|
||||
fn max_size_of_val(&self) -> Size {
|
||||
Size::from_bytes(self.target_isize_max())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn target_usize_max(&self) -> u64 {
|
||||
self.pointer_size().unsigned_int_max().try_into().unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn target_isize_min(&self) -> i64 {
|
||||
self.pointer_size().signed_int_min().try_into().unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn target_isize_max(&self) -> i64 {
|
||||
self.pointer_size().signed_int_max().try_into().unwrap()
|
||||
Size::from_bytes(self.data_layout().target_isize_max())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn target_usize_to_isize(&self, val: u64) -> i64 {
|
||||
let dl = self.data_layout();
|
||||
let val = val as i64;
|
||||
// Now wrap-around into the machine_isize range.
|
||||
if val > self.target_isize_max() {
|
||||
if val > dl.target_isize_max() {
|
||||
// This can only happen if the ptr size is < 64, so we know max_usize_plus_1 fits into
|
||||
// i64.
|
||||
debug_assert!(self.pointer_size().bits() < 64);
|
||||
let max_usize_plus_1 = 1u128 << self.pointer_size().bits();
|
||||
debug_assert!(dl.pointer_size.bits() < 64);
|
||||
let max_usize_plus_1 = 1u128 << dl.pointer_size.bits();
|
||||
val - i64::try_from(max_usize_plus_1).unwrap()
|
||||
} else {
|
||||
val
|
||||
@ -58,7 +44,7 @@ pub trait PointerArithmetic: HasDataLayout {
|
||||
#[inline]
|
||||
fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) {
|
||||
let val = u128::from(val);
|
||||
let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
|
||||
let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits();
|
||||
(u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1)
|
||||
}
|
||||
|
||||
@ -76,11 +62,11 @@ pub trait PointerArithmetic: HasDataLayout {
|
||||
let n = i.unsigned_abs();
|
||||
if i >= 0 {
|
||||
let (val, over) = self.overflowing_offset(val, n);
|
||||
(val, over || i > self.target_isize_max())
|
||||
(val, over || i > self.data_layout().target_isize_max())
|
||||
} else {
|
||||
let res = val.overflowing_sub(n);
|
||||
let (val, over) = self.truncate_to_ptr(res);
|
||||
(val, over || i < self.target_isize_min())
|
||||
(val, over || i < self.data_layout().target_isize_min())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -111,6 +111,11 @@ impl EraseType
|
||||
>()];
|
||||
}
|
||||
|
||||
impl EraseType for Result<ty::layout::TyAndNaiveLayout<'_>, &ty::layout::LayoutError<'_>> {
|
||||
type Result =
|
||||
[u8; size_of::<Result<ty::layout::TyAndNaiveLayout<'_>, &ty::layout::LayoutError<'_>>>()];
|
||||
}
|
||||
|
||||
impl EraseType for Result<ty::Const<'_>, mir::interpret::LitToConstError> {
|
||||
type Result = [u8; size_of::<Result<ty::Const<'static>, mir::interpret::LitToConstError>>()];
|
||||
}
|
||||
@ -291,6 +296,7 @@ trivial! {
|
||||
rustc_span::Symbol,
|
||||
rustc_span::symbol::Ident,
|
||||
rustc_target::spec::PanicStrategy,
|
||||
rustc_target::abi::ReferenceNichePolicy,
|
||||
rustc_type_ir::Variance,
|
||||
u32,
|
||||
usize,
|
||||
|
@ -898,6 +898,10 @@ rustc_queries! {
|
||||
desc { |tcx| "linting {}", describe_as_module(key, tcx) }
|
||||
}
|
||||
|
||||
query check_unused_traits(_: ()) -> () {
|
||||
desc { "checking unused trait imports in crate" }
|
||||
}
|
||||
|
||||
/// Checks the attributes in the module.
|
||||
query check_mod_attrs(key: LocalDefId) -> () {
|
||||
desc { |tcx| "checking attributes in {}", describe_as_module(key, tcx) }
|
||||
@ -1390,6 +1394,18 @@ rustc_queries! {
|
||||
desc { "computing layout of `{}`", key.value }
|
||||
}
|
||||
|
||||
/// Computes the naive layout approximation of a type. Note that this implicitly
|
||||
/// executes in "reveal all" mode, and will normalize the input type.
|
||||
///
|
||||
/// Unlike `layout_of`, this doesn't look past references (beyond the `Pointee::Metadata`
|
||||
/// projection), and as such can be called on generic types like `Option<&T>`.
|
||||
query naive_layout_of(
|
||||
key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
|
||||
) -> Result<ty::layout::TyAndNaiveLayout<'tcx>, &'tcx ty::layout::LayoutError<'tcx>> {
|
||||
depth_limit
|
||||
desc { "computing layout (naive) of `{}`", key.value }
|
||||
}
|
||||
|
||||
/// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
|
||||
///
|
||||
/// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
|
||||
@ -1465,6 +1481,11 @@ rustc_queries! {
|
||||
desc { "getting a crate's configured panic-in-drop strategy" }
|
||||
separate_provide_extern
|
||||
}
|
||||
query reference_niches_policy(_: CrateNum) -> abi::ReferenceNichePolicy {
|
||||
fatal_cycle
|
||||
desc { "getting a crate's policy for size and alignment niches of references" }
|
||||
separate_provide_extern
|
||||
}
|
||||
query is_no_builtins(_: CrateNum) -> bool {
|
||||
fatal_cycle
|
||||
desc { "getting whether a crate has `#![no_builtins]`" }
|
||||
|
@ -569,6 +569,7 @@ pub struct GlobalCtxt<'tcx> {
|
||||
|
||||
/// Caches the results of goal evaluation in the new solver.
|
||||
pub new_solver_evaluation_cache: solve::EvaluationCache<'tcx>,
|
||||
pub new_solver_coherence_evaluation_cache: solve::EvaluationCache<'tcx>,
|
||||
|
||||
/// Data layout specification for the current target.
|
||||
pub data_layout: TargetDataLayout,
|
||||
@ -680,10 +681,12 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
value.lift_to_tcx(self)
|
||||
}
|
||||
|
||||
/// Creates a type context and call the closure with a `TyCtxt` reference
|
||||
/// to the context. The closure enforces that the type context and any interned
|
||||
/// value (types, args, etc.) can only be used while `ty::tls` has a valid
|
||||
/// reference to the context, to allow formatting values that need it.
|
||||
/// Creates a type context. To use the context call `fn enter` which
|
||||
/// provides a `TyCtxt`.
|
||||
///
|
||||
/// By only providing the `TyCtxt` inside of the closure we enforce that the type
|
||||
/// context and any interned alue (types, args, etc.) can only be used while `ty::tls`
|
||||
/// has a valid reference to the context, to allow formatting values that need it.
|
||||
pub fn create_global_ctxt(
|
||||
s: &'tcx Session,
|
||||
lint_store: Lrc<dyn Any + sync::DynSend + sync::DynSync>,
|
||||
@ -721,6 +724,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
selection_cache: Default::default(),
|
||||
evaluation_cache: Default::default(),
|
||||
new_solver_evaluation_cache: Default::default(),
|
||||
new_solver_coherence_evaluation_cache: Default::default(),
|
||||
data_layout,
|
||||
alloc_map: Lock::new(interpret::AllocMap::new()),
|
||||
}
|
||||
|
@ -6,35 +6,33 @@ use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
use std::iter;
|
||||
|
||||
use self::SimplifiedType::*;
|
||||
|
||||
/// See `simplify_type`.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
|
||||
pub enum SimplifiedType {
|
||||
BoolSimplifiedType,
|
||||
CharSimplifiedType,
|
||||
IntSimplifiedType(ty::IntTy),
|
||||
UintSimplifiedType(ty::UintTy),
|
||||
FloatSimplifiedType(ty::FloatTy),
|
||||
AdtSimplifiedType(DefId),
|
||||
ForeignSimplifiedType(DefId),
|
||||
StrSimplifiedType,
|
||||
ArraySimplifiedType,
|
||||
SliceSimplifiedType,
|
||||
RefSimplifiedType(Mutability),
|
||||
PtrSimplifiedType(Mutability),
|
||||
NeverSimplifiedType,
|
||||
TupleSimplifiedType(usize),
|
||||
Bool,
|
||||
Char,
|
||||
Int(ty::IntTy),
|
||||
Uint(ty::UintTy),
|
||||
Float(ty::FloatTy),
|
||||
Adt(DefId),
|
||||
Foreign(DefId),
|
||||
Str,
|
||||
Array,
|
||||
Slice,
|
||||
Ref(Mutability),
|
||||
Ptr(Mutability),
|
||||
Never,
|
||||
Tuple(usize),
|
||||
/// A trait object, all of whose components are markers
|
||||
/// (e.g., `dyn Send + Sync`).
|
||||
MarkerTraitObjectSimplifiedType,
|
||||
TraitSimplifiedType(DefId),
|
||||
ClosureSimplifiedType(DefId),
|
||||
GeneratorSimplifiedType(DefId),
|
||||
GeneratorWitnessSimplifiedType(usize),
|
||||
GeneratorWitnessMIRSimplifiedType(DefId),
|
||||
FunctionSimplifiedType(usize),
|
||||
PlaceholderSimplifiedType,
|
||||
MarkerTraitObject,
|
||||
Trait(DefId),
|
||||
Closure(DefId),
|
||||
Generator(DefId),
|
||||
GeneratorWitness(usize),
|
||||
GeneratorWitnessMIR(DefId),
|
||||
Function(usize),
|
||||
Placeholder,
|
||||
}
|
||||
|
||||
/// Generic parameters are pretty much just bound variables, e.g.
|
||||
@ -64,6 +62,9 @@ pub enum TreatParams {
|
||||
/// correct mode for *lookup*, as during candidate selection.
|
||||
///
|
||||
/// N.B. during deep rejection, this acts identically to `ForLookup`.
|
||||
///
|
||||
/// FIXME(-Ztrait-solver=next): Remove this variant and cleanup
|
||||
/// the code.
|
||||
NextSolverLookup,
|
||||
}
|
||||
|
||||
@ -110,34 +111,36 @@ pub fn simplify_type<'tcx>(
|
||||
treat_params: TreatParams,
|
||||
) -> Option<SimplifiedType> {
|
||||
match *ty.kind() {
|
||||
ty::Bool => Some(BoolSimplifiedType),
|
||||
ty::Char => Some(CharSimplifiedType),
|
||||
ty::Int(int_type) => Some(IntSimplifiedType(int_type)),
|
||||
ty::Uint(uint_type) => Some(UintSimplifiedType(uint_type)),
|
||||
ty::Float(float_type) => Some(FloatSimplifiedType(float_type)),
|
||||
ty::Adt(def, _) => Some(AdtSimplifiedType(def.did())),
|
||||
ty::Str => Some(StrSimplifiedType),
|
||||
ty::Array(..) => Some(ArraySimplifiedType),
|
||||
ty::Slice(..) => Some(SliceSimplifiedType),
|
||||
ty::RawPtr(ptr) => Some(PtrSimplifiedType(ptr.mutbl)),
|
||||
ty::Bool => Some(SimplifiedType::Bool),
|
||||
ty::Char => Some(SimplifiedType::Char),
|
||||
ty::Int(int_type) => Some(SimplifiedType::Int(int_type)),
|
||||
ty::Uint(uint_type) => Some(SimplifiedType::Uint(uint_type)),
|
||||
ty::Float(float_type) => Some(SimplifiedType::Float(float_type)),
|
||||
ty::Adt(def, _) => Some(SimplifiedType::Adt(def.did())),
|
||||
ty::Str => Some(SimplifiedType::Str),
|
||||
ty::Array(..) => Some(SimplifiedType::Array),
|
||||
ty::Slice(..) => Some(SimplifiedType::Slice),
|
||||
ty::RawPtr(ptr) => Some(SimplifiedType::Ptr(ptr.mutbl)),
|
||||
ty::Dynamic(trait_info, ..) => match trait_info.principal_def_id() {
|
||||
Some(principal_def_id) if !tcx.trait_is_auto(principal_def_id) => {
|
||||
Some(TraitSimplifiedType(principal_def_id))
|
||||
Some(SimplifiedType::Trait(principal_def_id))
|
||||
}
|
||||
_ => Some(MarkerTraitObjectSimplifiedType),
|
||||
_ => Some(SimplifiedType::MarkerTraitObject),
|
||||
},
|
||||
ty::Ref(_, _, mutbl) => Some(RefSimplifiedType(mutbl)),
|
||||
ty::FnDef(def_id, _) | ty::Closure(def_id, _) => Some(ClosureSimplifiedType(def_id)),
|
||||
ty::Generator(def_id, _, _) => Some(GeneratorSimplifiedType(def_id)),
|
||||
ty::GeneratorWitness(tys) => Some(GeneratorWitnessSimplifiedType(tys.skip_binder().len())),
|
||||
ty::GeneratorWitnessMIR(def_id, _) => Some(GeneratorWitnessMIRSimplifiedType(def_id)),
|
||||
ty::Never => Some(NeverSimplifiedType),
|
||||
ty::Tuple(tys) => Some(TupleSimplifiedType(tys.len())),
|
||||
ty::FnPtr(f) => Some(FunctionSimplifiedType(f.skip_binder().inputs().len())),
|
||||
ty::Placeholder(..) => Some(PlaceholderSimplifiedType),
|
||||
ty::Ref(_, _, mutbl) => Some(SimplifiedType::Ref(mutbl)),
|
||||
ty::FnDef(def_id, _) | ty::Closure(def_id, _) => Some(SimplifiedType::Closure(def_id)),
|
||||
ty::Generator(def_id, _, _) => Some(SimplifiedType::Generator(def_id)),
|
||||
ty::GeneratorWitness(tys) => {
|
||||
Some(SimplifiedType::GeneratorWitness(tys.skip_binder().len()))
|
||||
}
|
||||
ty::GeneratorWitnessMIR(def_id, _) => Some(SimplifiedType::GeneratorWitnessMIR(def_id)),
|
||||
ty::Never => Some(SimplifiedType::Never),
|
||||
ty::Tuple(tys) => Some(SimplifiedType::Tuple(tys.len())),
|
||||
ty::FnPtr(f) => Some(SimplifiedType::Function(f.skip_binder().inputs().len())),
|
||||
ty::Placeholder(..) => Some(SimplifiedType::Placeholder),
|
||||
ty::Param(_) => match treat_params {
|
||||
TreatParams::ForLookup | TreatParams::NextSolverLookup => {
|
||||
Some(PlaceholderSimplifiedType)
|
||||
Some(SimplifiedType::Placeholder)
|
||||
}
|
||||
TreatParams::AsCandidateKey => None,
|
||||
},
|
||||
@ -147,11 +150,13 @@ pub fn simplify_type<'tcx>(
|
||||
//
|
||||
// We will have to be careful with lazy normalization here.
|
||||
// FIXME(lazy_normalization): This is probably not right...
|
||||
TreatParams::ForLookup if !ty.has_non_region_infer() => Some(PlaceholderSimplifiedType),
|
||||
TreatParams::NextSolverLookup => Some(PlaceholderSimplifiedType),
|
||||
TreatParams::ForLookup if !ty.has_non_region_infer() => {
|
||||
Some(SimplifiedType::Placeholder)
|
||||
}
|
||||
TreatParams::NextSolverLookup => Some(SimplifiedType::Placeholder),
|
||||
TreatParams::ForLookup | TreatParams::AsCandidateKey => None,
|
||||
},
|
||||
ty::Foreign(def_id) => Some(ForeignSimplifiedType(def_id)),
|
||||
ty::Foreign(def_id) => Some(SimplifiedType::Foreign(def_id)),
|
||||
ty::Bound(..) | ty::Infer(_) | ty::Error(_) => None,
|
||||
}
|
||||
}
|
||||
@ -159,12 +164,12 @@ pub fn simplify_type<'tcx>(
|
||||
impl SimplifiedType {
|
||||
pub fn def(self) -> Option<DefId> {
|
||||
match self {
|
||||
AdtSimplifiedType(d)
|
||||
| ForeignSimplifiedType(d)
|
||||
| TraitSimplifiedType(d)
|
||||
| ClosureSimplifiedType(d)
|
||||
| GeneratorSimplifiedType(d)
|
||||
| GeneratorWitnessMIRSimplifiedType(d) => Some(d),
|
||||
SimplifiedType::Adt(d)
|
||||
| SimplifiedType::Foreign(d)
|
||||
| SimplifiedType::Trait(d)
|
||||
| SimplifiedType::Closure(d)
|
||||
| SimplifiedType::Generator(d)
|
||||
| SimplifiedType::GeneratorWitnessMIR(d) => Some(d),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
@ -313,7 +313,16 @@ impl<'tcx> SizeSkeleton<'tcx> {
|
||||
) -> Result<SizeSkeleton<'tcx>, &'tcx LayoutError<'tcx>> {
|
||||
debug_assert!(!ty.has_non_region_infer());
|
||||
|
||||
// First try computing a static layout.
|
||||
// First, try computing an exact naive layout (this covers simple types with generic
|
||||
// references, where a full static layout would fail).
|
||||
if let Ok(layout) = tcx.naive_layout_of(param_env.and(ty)) {
|
||||
if layout.exact {
|
||||
return Ok(SizeSkeleton::Known(layout.size));
|
||||
}
|
||||
}
|
||||
|
||||
// Second, try computing a full static layout (this covers cases when the naive layout
|
||||
// wasn't smart enough, but cannot deal with generic references).
|
||||
let err = match tcx.layout_of(param_env.and(ty)) {
|
||||
Ok(layout) => {
|
||||
return Ok(SizeSkeleton::Known(layout.size));
|
||||
@ -327,6 +336,7 @@ impl<'tcx> SizeSkeleton<'tcx> {
|
||||
) => return Err(e),
|
||||
};
|
||||
|
||||
// Third, fall back to ad-hoc cases.
|
||||
match *ty.kind() {
|
||||
ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
|
||||
let non_zero = !ty.is_unsafe_ptr();
|
||||
@ -621,6 +631,219 @@ impl<T, E> MaybeResult<T> for Result<T, E> {
|
||||
|
||||
pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
|
||||
|
||||
#[derive(Copy, Clone, Debug, HashStable)]
|
||||
pub struct TyAndNaiveLayout<'tcx> {
|
||||
pub ty: Ty<'tcx>,
|
||||
pub layout: NaiveLayout,
|
||||
}
|
||||
|
||||
impl std::ops::Deref for TyAndNaiveLayout<'_> {
|
||||
type Target = NaiveLayout;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.layout
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::DerefMut for TyAndNaiveLayout<'_> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.layout
|
||||
}
|
||||
}
|
||||
|
||||
/// Extremely simplified approximation of a type's layout returned by the
|
||||
/// `naive_layout_of` query.
|
||||
#[derive(Copy, Clone, Debug, HashStable)]
|
||||
pub struct NaiveLayout {
|
||||
pub abi: NaiveAbi,
|
||||
/// Niche information, required for tracking non-null enum optimizations.
|
||||
pub niches: NaiveNiches,
|
||||
/// An underestimate of the layout's size.
|
||||
pub size: Size,
|
||||
/// An underestimate of the layout's required alignment.
|
||||
pub align: Align,
|
||||
/// If `true`, `size` and `align` must be exact values.
|
||||
pub exact: bool,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)]
|
||||
pub enum NaiveNiches {
|
||||
None,
|
||||
Some,
|
||||
Maybe,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)]
|
||||
pub enum NaiveAbi {
|
||||
/// A scalar layout, always implies `exact` and a non-zero `size`.
|
||||
Scalar(Primitive),
|
||||
/// An uninhabited layout. (needed to properly track `Scalar` and niches)
|
||||
Uninhabited,
|
||||
/// An unsized aggregate. (needed to properly track `Scalar` and niches)
|
||||
Unsized,
|
||||
/// Any other sized layout.
|
||||
Sized,
|
||||
}
|
||||
|
||||
impl NaiveAbi {
|
||||
#[inline]
|
||||
pub fn as_aggregate(self) -> Self {
|
||||
match self {
|
||||
NaiveAbi::Scalar(_) => NaiveAbi::Sized,
|
||||
_ => self,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NaiveLayout {
|
||||
/// The layout of an empty aggregate, e.g. `()`.
|
||||
pub const EMPTY: Self = Self {
|
||||
size: Size::ZERO,
|
||||
align: Align::ONE,
|
||||
exact: true,
|
||||
abi: NaiveAbi::Sized,
|
||||
niches: NaiveNiches::None,
|
||||
};
|
||||
|
||||
/// Returns whether `self` is a valid approximation of the given full `layout`.
|
||||
///
|
||||
/// This should always return `true` when both layouts are computed from the same type.
|
||||
pub fn is_refined_by(&self, layout: Layout<'_>) -> bool {
|
||||
if self.size > layout.size() || self.align > layout.align().abi {
|
||||
return false;
|
||||
}
|
||||
|
||||
if let NaiveAbi::Scalar(prim) = self.abi {
|
||||
if !self.exact
|
||||
|| self.size == Size::ZERO
|
||||
|| !matches!(layout.abi(), Abi::Scalar(s) if s.primitive() == prim)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
match (self.niches, layout.largest_niche()) {
|
||||
(NaiveNiches::None, Some(_)) => return false,
|
||||
(NaiveNiches::Some, None) => return false,
|
||||
_ => (),
|
||||
}
|
||||
|
||||
!self.exact || (self.size, self.align) == (layout.size(), layout.align().abi)
|
||||
}
|
||||
|
||||
/// Returns if this layout is known to be pointer-like (`None` if uncertain)
|
||||
///
|
||||
/// See the corresponding `Layout::is_pointer_like` method.
|
||||
pub fn is_pointer_like(&self, dl: &TargetDataLayout) -> Option<bool> {
|
||||
match self.abi {
|
||||
NaiveAbi::Scalar(_) => {
|
||||
assert!(self.exact);
|
||||
Some(self.size == dl.pointer_size && self.align == dl.pointer_align.abi)
|
||||
}
|
||||
NaiveAbi::Uninhabited | NaiveAbi::Unsized => Some(false),
|
||||
NaiveAbi::Sized if self.exact => Some(false),
|
||||
NaiveAbi::Sized => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Artificially lowers the alignment of this layout.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn packed(mut self, align: Align) -> Self {
|
||||
if self.align > align {
|
||||
self.align = align;
|
||||
self.abi = self.abi.as_aggregate();
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Artificially raises the alignment of this layout.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn align_to(mut self, align: Align) -> Self {
|
||||
if align > self.align {
|
||||
self.align = align;
|
||||
self.abi = self.abi.as_aggregate();
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Artificially makes this layout inexact.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn inexact(mut self) -> Self {
|
||||
self.abi = self.abi.as_aggregate();
|
||||
self.exact = false;
|
||||
self
|
||||
}
|
||||
|
||||
/// Pads this layout so that its size is a multiple of `align`.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn pad_to_align(mut self, align: Align) -> Self {
|
||||
let new_size = self.size.align_to(align);
|
||||
if new_size > self.size {
|
||||
self.abi = self.abi.as_aggregate();
|
||||
self.size = new_size;
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the layout of `self` immediately followed by `other`, without any
|
||||
/// padding between them, as in a packed `struct` or tuple.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn concat(&self, other: &Self, dl: &TargetDataLayout) -> Option<Self> {
|
||||
use NaiveAbi::*;
|
||||
|
||||
let size = self.size.checked_add(other.size, dl)?;
|
||||
let align = cmp::max(self.align, other.align);
|
||||
let exact = self.exact && other.exact;
|
||||
let abi = match (self.abi, other.abi) {
|
||||
// The uninhabited and unsized ABIs override everything.
|
||||
(Uninhabited, _) | (_, Uninhabited) => Uninhabited,
|
||||
(Unsized, _) | (_, Unsized) => Unsized,
|
||||
// A scalar struct must have a single non ZST-field.
|
||||
(_, s @ Scalar(_)) if exact && self.size == Size::ZERO => s,
|
||||
(s @ Scalar(_), _) if exact && other.size == Size::ZERO => s,
|
||||
// Default case.
|
||||
(_, _) => Sized,
|
||||
};
|
||||
let niches = match (self.niches, other.niches) {
|
||||
(NaiveNiches::Some, _) | (_, NaiveNiches::Some) => NaiveNiches::Some,
|
||||
(NaiveNiches::None, NaiveNiches::None) => NaiveNiches::None,
|
||||
(_, _) => NaiveNiches::Maybe,
|
||||
};
|
||||
Some(Self { abi, size, align, exact, niches })
|
||||
}
|
||||
|
||||
/// Returns the layout of `self` superposed with `other`, as in an `enum`
|
||||
/// or an `union`.
|
||||
///
|
||||
/// Note: This always ignore niche information from `other`.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn union(&self, other: &Self) -> Self {
|
||||
use NaiveAbi::*;
|
||||
|
||||
let size = cmp::max(self.size, other.size);
|
||||
let align = cmp::max(self.align, other.align);
|
||||
let exact = self.exact && other.exact;
|
||||
let abi = match (self.abi, other.abi) {
|
||||
// The unsized ABI overrides everything.
|
||||
(Unsized, _) | (_, Unsized) => Unsized,
|
||||
// A scalar union must have a single non ZST-field...
|
||||
(_, s @ Scalar(_)) if exact && self.size == Size::ZERO => s,
|
||||
(s @ Scalar(_), _) if exact && other.size == Size::ZERO => s,
|
||||
// ...or identical scalar fields.
|
||||
(Scalar(s1), Scalar(s2)) if s1 == s2 => Scalar(s1),
|
||||
// Default cases.
|
||||
(Uninhabited, Uninhabited) => Uninhabited,
|
||||
(_, _) => Sized,
|
||||
};
|
||||
Self { abi, size, align, exact, niches: self.niches }
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for contexts that want to be able to compute layouts of types.
|
||||
/// This automatically gives access to `LayoutOf`, through a blanket `impl`.
|
||||
pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
|
||||
@ -673,6 +896,19 @@ pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
|
||||
.map_err(|err| self.handle_layout_err(*err, span, ty)),
|
||||
)
|
||||
}
|
||||
|
||||
/// Computes the naive layout estimate of a type. Note that this implicitly
|
||||
/// executes in "reveal all" mode, and will normalize the input type.
|
||||
///
|
||||
/// Unlike `layout_of`, this doesn't look past references (beyond the `Pointee::Metadata`
|
||||
/// projection), and as such can be called on generic types like `Option<&T>`.
|
||||
#[inline]
|
||||
fn naive_layout_of(
|
||||
&self,
|
||||
ty: Ty<'tcx>,
|
||||
) -> Result<TyAndNaiveLayout<'tcx>, &'tcx LayoutError<'tcx>> {
|
||||
self.tcx().naive_layout_of(self.param_env().and(ty))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
|
||||
@ -969,6 +1205,9 @@ where
|
||||
this: TyAndLayout<'tcx>,
|
||||
cx: &C,
|
||||
offset: Size,
|
||||
// If true, assume that pointers are either null or valid (according to their type),
|
||||
// enabling extra optimizations.
|
||||
mut assume_valid_ptr: bool,
|
||||
) -> Option<PointeeInfo> {
|
||||
let tcx = cx.tcx();
|
||||
let param_env = cx.param_env();
|
||||
@ -991,19 +1230,19 @@ where
|
||||
// Freeze/Unpin queries, and can save time in the codegen backend (noalias
|
||||
// attributes in LLVM have compile-time cost even in unoptimized builds).
|
||||
let optimize = tcx.sess.opts.optimize != OptLevel::No;
|
||||
let kind = match mt {
|
||||
hir::Mutability::Not => PointerKind::SharedRef {
|
||||
let safe = match (assume_valid_ptr, mt) {
|
||||
(true, hir::Mutability::Not) => Some(PointerKind::SharedRef {
|
||||
frozen: optimize && ty.is_freeze(tcx, cx.param_env()),
|
||||
},
|
||||
hir::Mutability::Mut => PointerKind::MutableRef {
|
||||
}),
|
||||
(true, hir::Mutability::Mut) => Some(PointerKind::MutableRef {
|
||||
unpin: optimize && ty.is_unpin(tcx, cx.param_env()),
|
||||
},
|
||||
}),
|
||||
(false, _) => None,
|
||||
};
|
||||
|
||||
tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
|
||||
size: layout.size,
|
||||
align: layout.align.abi,
|
||||
safe: Some(kind),
|
||||
safe,
|
||||
})
|
||||
}
|
||||
|
||||
@ -1012,19 +1251,21 @@ where
|
||||
// Within the discriminant field, only the niche itself is
|
||||
// always initialized, so we only check for a pointer at its
|
||||
// offset.
|
||||
//
|
||||
// If the niche is a pointer, it's either valid (according
|
||||
// to its type), or null (which the niche field's scalar
|
||||
// validity range encodes). This allows using
|
||||
// `dereferenceable_or_null` for e.g., `Option<&T>`, and
|
||||
// this will continue to work as long as we don't start
|
||||
// using more niches than just null (e.g., the first page of
|
||||
// the address space, or unaligned pointers).
|
||||
Variants::Multiple {
|
||||
tag_encoding: TagEncoding::Niche { untagged_variant, .. },
|
||||
tag_encoding:
|
||||
TagEncoding::Niche {
|
||||
untagged_variant,
|
||||
niche_variants: ref variants,
|
||||
niche_start,
|
||||
},
|
||||
tag_field,
|
||||
..
|
||||
} if this.fields.offset(tag_field) == offset => {
|
||||
// We can only continue assuming pointer validity if the only possible
|
||||
// discriminant value is null. The null special-case is permitted by LLVM's
|
||||
// `dereferenceable_or_null`, and allow types like `Option<&T>` to benefit
|
||||
// from optimizations.
|
||||
assume_valid_ptr &= niche_start == 0 && variants.start() == variants.end();
|
||||
Some(this.for_variant(cx, untagged_variant))
|
||||
}
|
||||
_ => Some(this),
|
||||
@ -1050,9 +1291,12 @@ where
|
||||
result = field.to_result().ok().and_then(|field| {
|
||||
if ptr_end <= field_start + field.size {
|
||||
// We found the right field, look inside it.
|
||||
let field_info =
|
||||
field.pointee_info_at(cx, offset - field_start);
|
||||
field_info
|
||||
Self::ty_and_layout_pointee_info_at(
|
||||
field,
|
||||
cx,
|
||||
offset - field_start,
|
||||
assume_valid_ptr,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -1067,7 +1311,7 @@ where
|
||||
// FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
|
||||
if let Some(ref mut pointee) = result {
|
||||
if let ty::Adt(def, _) = this.ty.kind() {
|
||||
if def.is_box() && offset.bytes() == 0 {
|
||||
if assume_valid_ptr && def.is_box() && offset.bytes() == 0 {
|
||||
let optimize = tcx.sess.opts.optimize != OptLevel::No;
|
||||
pointee.safe = Some(PointerKind::Box {
|
||||
unpin: optimize && this.ty.boxed_ty().is_unpin(tcx, cx.param_env()),
|
||||
|
@ -88,7 +88,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeLiveLocals {
|
||||
}
|
||||
}
|
||||
|
||||
struct TransferFunction<'a, T>(&'a mut T);
|
||||
pub struct TransferFunction<'a, T>(pub &'a mut T);
|
||||
|
||||
impl<'tcx, T> Visitor<'tcx> for TransferFunction<'_, T>
|
||||
where
|
||||
|
@ -26,6 +26,7 @@ pub use self::borrowed_locals::borrowed_locals;
|
||||
pub use self::borrowed_locals::MaybeBorrowedLocals;
|
||||
pub use self::liveness::MaybeLiveLocals;
|
||||
pub use self::liveness::MaybeTransitiveLiveLocals;
|
||||
pub use self::liveness::TransferFunction as LivenessTransferFunction;
|
||||
pub use self::storage_liveness::{MaybeRequiresStorage, MaybeStorageDead, MaybeStorageLive};
|
||||
|
||||
/// `MaybeInitializedPlaces` tracks all places that might be
|
||||
|
@ -14,8 +14,7 @@ use rustc_middle::mir::visit::{
|
||||
};
|
||||
use rustc_middle::mir::*;
|
||||
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
|
||||
use rustc_middle::ty::GenericArgs;
|
||||
use rustc_middle::ty::{self, ConstKind, Instance, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
|
||||
use rustc_middle::ty::{self, GenericArgs, Instance, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
|
||||
use rustc_span::{def_id::DefId, Span, DUMMY_SP};
|
||||
use rustc_target::abi::{self, Align, HasDataLayout, Size, TargetDataLayout};
|
||||
use rustc_target::spec::abi::Abi as CallAbi;
|
||||
@ -407,51 +406,9 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
ecx.machine.written_only_inside_own_block_locals.remove(&local);
|
||||
}
|
||||
|
||||
/// Returns the value, if any, of evaluating `c`.
|
||||
fn eval_constant(&mut self, c: &Constant<'tcx>) -> Option<OpTy<'tcx>> {
|
||||
// FIXME we need to revisit this for #67176
|
||||
if c.has_param() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// No span, we don't want errors to be shown.
|
||||
self.ecx.eval_mir_constant(&c.literal, None, None).ok()
|
||||
}
|
||||
|
||||
/// Returns the value, if any, of evaluating `place`.
|
||||
fn eval_place(&mut self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
|
||||
trace!("eval_place(place={:?})", place);
|
||||
self.ecx.eval_place_to_op(place, None).ok()
|
||||
}
|
||||
|
||||
/// Returns the value, if any, of evaluating `op`. Calls upon `eval_constant`
|
||||
/// or `eval_place`, depending on the variant of `Operand` used.
|
||||
fn eval_operand(&mut self, op: &Operand<'tcx>) -> Option<OpTy<'tcx>> {
|
||||
match *op {
|
||||
Operand::Constant(ref c) => self.eval_constant(c),
|
||||
Operand::Move(place) | Operand::Copy(place) => self.eval_place(place),
|
||||
}
|
||||
}
|
||||
|
||||
fn propagate_operand(&mut self, operand: &mut Operand<'tcx>) {
|
||||
match *operand {
|
||||
Operand::Copy(l) | Operand::Move(l) => {
|
||||
if let Some(value) = self.get_const(l) && self.should_const_prop(&value) {
|
||||
// FIXME(felix91gr): this code only handles `Scalar` cases.
|
||||
// For now, we're not handling `ScalarPair` cases because
|
||||
// doing so here would require a lot of code duplication.
|
||||
// We should hopefully generalize `Operand` handling into a fn,
|
||||
// and use it to do const-prop here and everywhere else
|
||||
// where it makes sense.
|
||||
if let interpret::Operand::Immediate(interpret::Immediate::Scalar(
|
||||
scalar,
|
||||
)) = *value
|
||||
{
|
||||
*operand = self.operand_from_scalar(scalar, value.layout.ty);
|
||||
}
|
||||
}
|
||||
}
|
||||
Operand::Constant(_) => (),
|
||||
if let Some(place) = operand.place() && let Some(op) = self.replace_with_const(place) {
|
||||
*operand = op;
|
||||
}
|
||||
}
|
||||
|
||||
@ -579,93 +536,45 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||
}))
|
||||
}
|
||||
|
||||
fn replace_with_const(&mut self, place: Place<'tcx>, rval: &mut Rvalue<'tcx>) {
|
||||
fn replace_with_const(&mut self, place: Place<'tcx>) -> Option<Operand<'tcx>> {
|
||||
// This will return None if the above `const_prop` invocation only "wrote" a
|
||||
// type whose creation requires no write. E.g. a generator whose initial state
|
||||
// consists solely of uninitialized memory (so it doesn't capture any locals).
|
||||
let Some(ref value) = self.get_const(place) else { return };
|
||||
if !self.should_const_prop(value) {
|
||||
return;
|
||||
let value = self.get_const(place)?;
|
||||
if !self.tcx.consider_optimizing(|| format!("ConstantPropagation - {value:?}")) {
|
||||
return None;
|
||||
}
|
||||
trace!("replacing {:?}={:?} with {:?}", place, rval, value);
|
||||
trace!("replacing {:?} with {:?}", place, value);
|
||||
|
||||
if let Rvalue::Use(Operand::Constant(c)) = rval {
|
||||
match c.literal {
|
||||
ConstantKind::Ty(c) if matches!(c.kind(), ConstKind::Unevaluated(..)) => {}
|
||||
_ => {
|
||||
trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trace!("attempting to replace {:?} with {:?}", rval, value);
|
||||
// FIXME> figure out what to do when read_immediate_raw fails
|
||||
let imm = self.ecx.read_immediate_raw(value).ok();
|
||||
let imm = self.ecx.read_immediate_raw(&value).ok()?;
|
||||
|
||||
if let Some(Right(imm)) = imm {
|
||||
match *imm {
|
||||
interpret::Immediate::Scalar(scalar) => {
|
||||
*rval = Rvalue::Use(self.operand_from_scalar(scalar, value.layout.ty));
|
||||
}
|
||||
Immediate::ScalarPair(..) => {
|
||||
// Found a value represented as a pair. For now only do const-prop if the type
|
||||
// of `rvalue` is also a tuple with two scalars.
|
||||
// FIXME: enable the general case stated above ^.
|
||||
let ty = value.layout.ty;
|
||||
// Only do it for tuples
|
||||
if let ty::Tuple(types) = ty.kind() {
|
||||
// Only do it if tuple is also a pair with two scalars
|
||||
if let [ty1, ty2] = types[..] {
|
||||
let ty_is_scalar = |ty| {
|
||||
self.ecx.layout_of(ty).ok().map(|layout| layout.abi.is_scalar())
|
||||
== Some(true)
|
||||
};
|
||||
let alloc = if ty_is_scalar(ty1) && ty_is_scalar(ty2) {
|
||||
let alloc = self
|
||||
.ecx
|
||||
.intern_with_temp_alloc(value.layout, |ecx, dest| {
|
||||
ecx.write_immediate(*imm, dest)
|
||||
})
|
||||
.unwrap();
|
||||
Some(alloc)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(alloc) = alloc {
|
||||
// Assign entire constant in a single statement.
|
||||
// We can't use aggregates, as we run after the aggregate-lowering `MirPhase`.
|
||||
let const_val = ConstValue::ByRef { alloc, offset: Size::ZERO };
|
||||
let literal = ConstantKind::Val(const_val, ty);
|
||||
*rval = Rvalue::Use(Operand::Constant(Box::new(Constant {
|
||||
span: DUMMY_SP,
|
||||
user_ty: None,
|
||||
literal,
|
||||
})));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Scalars or scalar pairs that contain undef values are assumed to not have
|
||||
// successfully evaluated and are thus not propagated.
|
||||
_ => {}
|
||||
let Right(imm) = imm else { return None };
|
||||
match *imm {
|
||||
Immediate::Scalar(scalar) if scalar.try_to_int().is_ok() => {
|
||||
Some(self.operand_from_scalar(scalar, value.layout.ty))
|
||||
}
|
||||
}
|
||||
}
|
||||
Immediate::ScalarPair(l, r) if l.try_to_int().is_ok() && r.try_to_int().is_ok() => {
|
||||
let alloc = self
|
||||
.ecx
|
||||
.intern_with_temp_alloc(value.layout, |ecx, dest| {
|
||||
ecx.write_immediate(*imm, dest)
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
/// Returns `true` if and only if this `op` should be const-propagated into.
|
||||
fn should_const_prop(&mut self, op: &OpTy<'tcx>) -> bool {
|
||||
if !self.tcx.consider_optimizing(|| format!("ConstantPropagation - OpTy: {:?}", op)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
match **op {
|
||||
interpret::Operand::Immediate(Immediate::Scalar(s)) => s.try_to_int().is_ok(),
|
||||
interpret::Operand::Immediate(Immediate::ScalarPair(l, r)) => {
|
||||
l.try_to_int().is_ok() && r.try_to_int().is_ok()
|
||||
let literal = ConstantKind::Val(
|
||||
ConstValue::ByRef { alloc, offset: Size::ZERO },
|
||||
value.layout.ty,
|
||||
);
|
||||
Some(Operand::Constant(Box::new(Constant {
|
||||
span: DUMMY_SP,
|
||||
user_ty: None,
|
||||
literal,
|
||||
})))
|
||||
}
|
||||
_ => false,
|
||||
// Scalars or scalar pairs that contain undef values are assumed to not have
|
||||
// successfully evaluated and are thus not propagated.
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -810,12 +719,7 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
|
||||
|
||||
fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) {
|
||||
self.super_operand(operand, location);
|
||||
|
||||
// Only const prop copies and moves on `mir_opt_level=3` as doing so
|
||||
// currently slightly increases compile time in some cases.
|
||||
if self.tcx.sess.mir_opt_level() >= 3 {
|
||||
self.propagate_operand(operand)
|
||||
}
|
||||
self.propagate_operand(operand)
|
||||
}
|
||||
|
||||
fn process_projection_elem(
|
||||
@ -825,8 +729,7 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
|
||||
) -> Option<PlaceElem<'tcx>> {
|
||||
if let PlaceElem::Index(local) = elem
|
||||
&& let Some(value) = self.get_const(local.into())
|
||||
&& self.should_const_prop(&value)
|
||||
&& let interpret::Operand::Immediate(interpret::Immediate::Scalar(scalar)) = *value
|
||||
&& let interpret::Operand::Immediate(Immediate::Scalar(scalar)) = *value
|
||||
&& let Ok(offset) = scalar.to_target_usize(&self.tcx)
|
||||
&& let Some(min_length) = offset.checked_add(1)
|
||||
{
|
||||
@ -852,7 +755,14 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
|
||||
ConstPropMode::NoPropagation => self.ensure_not_propagated(place.local),
|
||||
ConstPropMode::OnlyInsideOwnBlock | ConstPropMode::FullConstProp => {
|
||||
if let Some(()) = self.eval_rvalue_with_identities(rvalue, *place) {
|
||||
self.replace_with_const(*place, rvalue);
|
||||
// If this was already an evaluated constant, keep it.
|
||||
if let Rvalue::Use(Operand::Constant(c)) = rvalue
|
||||
&& let ConstantKind::Val(..) = c.literal
|
||||
{
|
||||
trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c);
|
||||
} else if let Some(operand) = self.replace_with_const(*place) {
|
||||
*rvalue = Rvalue::Use(operand);
|
||||
}
|
||||
} else {
|
||||
// Const prop failed, so erase the destination, ensuring that whatever happens
|
||||
// from here on, does not know about the previous value.
|
||||
@ -919,45 +829,6 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
|
||||
self.super_terminator(terminator, location);
|
||||
|
||||
match &mut terminator.kind {
|
||||
TerminatorKind::Assert { expected, ref mut cond, .. } => {
|
||||
if let Some(ref value) = self.eval_operand(&cond)
|
||||
&& let Ok(value_const) = self.ecx.read_scalar(&value)
|
||||
&& self.should_const_prop(value)
|
||||
{
|
||||
trace!("assertion on {:?} should be {:?}", value, expected);
|
||||
*cond = self.operand_from_scalar(value_const, self.tcx.types.bool);
|
||||
}
|
||||
}
|
||||
TerminatorKind::SwitchInt { ref mut discr, .. } => {
|
||||
// FIXME: This is currently redundant with `visit_operand`, but sadly
|
||||
// always visiting operands currently causes a perf regression in LLVM codegen, so
|
||||
// `visit_operand` currently only runs for propagates places for `mir_opt_level=4`.
|
||||
self.propagate_operand(discr)
|
||||
}
|
||||
// None of these have Operands to const-propagate.
|
||||
TerminatorKind::Goto { .. }
|
||||
| TerminatorKind::Resume
|
||||
| TerminatorKind::Terminate
|
||||
| TerminatorKind::Return
|
||||
| TerminatorKind::Unreachable
|
||||
| TerminatorKind::Drop { .. }
|
||||
| TerminatorKind::Yield { .. }
|
||||
| TerminatorKind::GeneratorDrop
|
||||
| TerminatorKind::FalseEdge { .. }
|
||||
| TerminatorKind::FalseUnwind { .. }
|
||||
| TerminatorKind::InlineAsm { .. } => {}
|
||||
// Every argument in our function calls have already been propagated in `visit_operand`.
|
||||
//
|
||||
// NOTE: because LLVM codegen gives slight performance regressions with it, so this is
|
||||
// gated on `mir_opt_level=3`.
|
||||
TerminatorKind::Call { .. } => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
|
||||
self.super_basic_block_data(block, data);
|
||||
|
||||
|
@ -13,9 +13,12 @@
|
||||
//!
|
||||
|
||||
use rustc_index::bit_set::BitSet;
|
||||
use rustc_middle::mir::visit::Visitor;
|
||||
use rustc_middle::mir::*;
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_mir_dataflow::impls::{borrowed_locals, MaybeTransitiveLiveLocals};
|
||||
use rustc_mir_dataflow::impls::{
|
||||
borrowed_locals, LivenessTransferFunction, MaybeTransitiveLiveLocals,
|
||||
};
|
||||
use rustc_mir_dataflow::Analysis;
|
||||
|
||||
/// Performs the optimization on the body
|
||||
@ -28,8 +31,33 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS
|
||||
.iterate_to_fixpoint()
|
||||
.into_results_cursor(body);
|
||||
|
||||
// For blocks with a call terminator, if an argument copy can be turned into a move,
|
||||
// record it as (block, argument index).
|
||||
let mut call_operands_to_move = Vec::new();
|
||||
let mut patch = Vec::new();
|
||||
|
||||
for (bb, bb_data) in traversal::preorder(body) {
|
||||
if let TerminatorKind::Call { ref args, .. } = bb_data.terminator().kind {
|
||||
let loc = Location { block: bb, statement_index: bb_data.statements.len() };
|
||||
|
||||
// Position ourselves between the evaluation of `args` and the write to `destination`.
|
||||
live.seek_to_block_end(bb);
|
||||
let mut state = live.get().clone();
|
||||
|
||||
for (index, arg) in args.iter().enumerate().rev() {
|
||||
if let Operand::Copy(place) = *arg
|
||||
&& !place.is_indirect()
|
||||
&& !borrowed.contains(place.local)
|
||||
&& !state.contains(place.local)
|
||||
{
|
||||
call_operands_to_move.push((bb, index));
|
||||
}
|
||||
|
||||
// Account that `arg` is read from, so we don't promote another argument to a move.
|
||||
LivenessTransferFunction(&mut state).visit_operand(arg, loc);
|
||||
}
|
||||
}
|
||||
|
||||
for (statement_index, statement) in bb_data.statements.iter().enumerate().rev() {
|
||||
let loc = Location { block: bb, statement_index };
|
||||
if let StatementKind::Assign(assign) = &statement.kind {
|
||||
@ -64,7 +92,7 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS
|
||||
}
|
||||
}
|
||||
|
||||
if patch.is_empty() {
|
||||
if patch.is_empty() && call_operands_to_move.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -72,6 +100,14 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS
|
||||
for Location { block, statement_index } in patch {
|
||||
bbs[block].statements[statement_index].make_nop();
|
||||
}
|
||||
for (block, argument_index) in call_operands_to_move {
|
||||
let TerminatorKind::Call { ref mut args, .. } = bbs[block].terminator_mut().kind else {
|
||||
bug!()
|
||||
};
|
||||
let arg = &mut args[argument_index];
|
||||
let Operand::Copy(place) = *arg else { bug!() };
|
||||
*arg = Operand::Move(place);
|
||||
}
|
||||
|
||||
crate::simplify::simplify_locals(body, tcx)
|
||||
}
|
||||
|
@ -440,6 +440,10 @@ impl<'tcx> Inliner<'tcx> {
|
||||
validation: Ok(()),
|
||||
};
|
||||
|
||||
for var_debug_info in callee_body.var_debug_info.iter() {
|
||||
checker.visit_var_debug_info(var_debug_info);
|
||||
}
|
||||
|
||||
// Traverse the MIR manually so we can account for the effects of inlining on the CFG.
|
||||
let mut work_list = vec![START_BLOCK];
|
||||
let mut visited = BitSet::new_empty(callee_body.basic_blocks.len());
|
||||
@ -847,7 +851,16 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
|
||||
if let ProjectionElem::Field(f, ty) = elem {
|
||||
let parent_ty = place_ref.ty(&self.callee_body.local_decls, self.tcx);
|
||||
let check_equal = |this: &mut Self, f_ty| {
|
||||
if !util::is_equal_up_to_subtyping(this.tcx, this.param_env, ty, f_ty) {
|
||||
// Fast path if there is nothing to substitute.
|
||||
if ty == f_ty {
|
||||
return;
|
||||
}
|
||||
let ty = this.instance.subst_mir(this.tcx, ty::EarlyBinder::bind(&ty));
|
||||
let f_ty = this.instance.subst_mir(this.tcx, ty::EarlyBinder::bind(&f_ty));
|
||||
if ty == f_ty {
|
||||
return;
|
||||
}
|
||||
if !util::is_subtype(this.tcx, this.param_env, ty, f_ty) {
|
||||
trace!(?ty, ?f_ty);
|
||||
this.validation = Err("failed to normalize projection type");
|
||||
return;
|
||||
|
@ -176,7 +176,8 @@ impl QueryJobId {
|
||||
while let Some(id) = current_id {
|
||||
let info = query_map.get(&id).unwrap();
|
||||
// FIXME: This string comparison should probably not be done.
|
||||
if format!("{:?}", info.query.dep_kind) == "layout_of" {
|
||||
let query_name = format!("{:?}", info.query.dep_kind);
|
||||
if query_name == "layout_of" || query_name == "naive_layout_of" {
|
||||
depth += 1;
|
||||
last_layout = Some((info.clone(), depth));
|
||||
}
|
||||
|
@ -26,6 +26,8 @@ session_feature_gate_error = {$explain}
|
||||
|
||||
session_file_is_not_writeable = output file {$file} is not writeable -- check its permissions
|
||||
|
||||
session_file_write_fail = failed to write `{$path}` due to error `{$err}`
|
||||
|
||||
session_hexadecimal_float_literal_not_supported = hexadecimal float literal is not supported
|
||||
|
||||
session_incompatible_linker_flavor = linker flavor `{$flavor}` is incompatible with the current target
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
pub use crate::options::*;
|
||||
|
||||
use crate::errors::FileWriteFail;
|
||||
use crate::search_paths::SearchPath;
|
||||
use crate::utils::{CanonicalizedPath, NativeLib, NativeLibKind};
|
||||
use crate::{lint, HashStableContext};
|
||||
@ -31,6 +32,7 @@ use std::collections::btree_map::{
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::hash::Hash;
|
||||
use std::iter;
|
||||
use std::path::{Path, PathBuf};
|
||||
@ -710,8 +712,14 @@ impl ExternEntry {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct PrintRequest {
|
||||
pub kind: PrintKind,
|
||||
pub out: OutFileName,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||
pub enum PrintRequest {
|
||||
pub enum PrintKind {
|
||||
FileNames,
|
||||
Sysroot,
|
||||
TargetLibdir,
|
||||
@ -855,6 +863,17 @@ impl OutFileName {
|
||||
OutFileName::Stdout => outputs.temp_path(flavor, codegen_unit_name),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn overwrite(&self, content: &str, sess: &Session) {
|
||||
match self {
|
||||
OutFileName::Stdout => print!("{content}"),
|
||||
OutFileName::Real(path) => {
|
||||
if let Err(e) = fs::write(path, content) {
|
||||
sess.emit_fatal(FileWriteFail { path, err: e.to_string() });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Hash, Debug, HashStable_Generic)]
|
||||
@ -2005,13 +2024,7 @@ fn parse_output_types(
|
||||
if !unstable_opts.parse_only {
|
||||
for list in matches.opt_strs("emit") {
|
||||
for output_type in list.split(',') {
|
||||
let (shorthand, path) = match output_type.split_once('=') {
|
||||
None => (output_type, None),
|
||||
Some((shorthand, "-")) => (shorthand, Some(OutFileName::Stdout)),
|
||||
Some((shorthand, path)) => {
|
||||
(shorthand, Some(OutFileName::Real(PathBuf::from(path))))
|
||||
}
|
||||
};
|
||||
let (shorthand, path) = split_out_file_name(output_type);
|
||||
let output_type = OutputType::from_shorthand(shorthand).unwrap_or_else(|| {
|
||||
handler.early_error(format!(
|
||||
"unknown emission type: `{shorthand}` - expected one of: {display}",
|
||||
@ -2028,6 +2041,14 @@ fn parse_output_types(
|
||||
OutputTypes(output_types)
|
||||
}
|
||||
|
||||
fn split_out_file_name(arg: &str) -> (&str, Option<OutFileName>) {
|
||||
match arg.split_once('=') {
|
||||
None => (arg, None),
|
||||
Some((kind, "-")) => (kind, Some(OutFileName::Stdout)),
|
||||
Some((kind, path)) => (kind, Some(OutFileName::Real(PathBuf::from(path)))),
|
||||
}
|
||||
}
|
||||
|
||||
fn should_override_cgus_and_disable_thinlto(
|
||||
handler: &EarlyErrorHandler,
|
||||
output_types: &OutputTypes,
|
||||
@ -2091,41 +2112,49 @@ fn collect_print_requests(
|
||||
) -> Vec<PrintRequest> {
|
||||
let mut prints = Vec::<PrintRequest>::new();
|
||||
if cg.target_cpu.as_ref().is_some_and(|s| s == "help") {
|
||||
prints.push(PrintRequest::TargetCPUs);
|
||||
prints.push(PrintRequest { kind: PrintKind::TargetCPUs, out: OutFileName::Stdout });
|
||||
cg.target_cpu = None;
|
||||
};
|
||||
if cg.target_feature == "help" {
|
||||
prints.push(PrintRequest::TargetFeatures);
|
||||
prints.push(PrintRequest { kind: PrintKind::TargetFeatures, out: OutFileName::Stdout });
|
||||
cg.target_feature = String::new();
|
||||
}
|
||||
|
||||
const PRINT_REQUESTS: &[(&str, PrintRequest)] = &[
|
||||
("crate-name", PrintRequest::CrateName),
|
||||
("file-names", PrintRequest::FileNames),
|
||||
("sysroot", PrintRequest::Sysroot),
|
||||
("target-libdir", PrintRequest::TargetLibdir),
|
||||
("cfg", PrintRequest::Cfg),
|
||||
("calling-conventions", PrintRequest::CallingConventions),
|
||||
("target-list", PrintRequest::TargetList),
|
||||
("target-cpus", PrintRequest::TargetCPUs),
|
||||
("target-features", PrintRequest::TargetFeatures),
|
||||
("relocation-models", PrintRequest::RelocationModels),
|
||||
("code-models", PrintRequest::CodeModels),
|
||||
("tls-models", PrintRequest::TlsModels),
|
||||
("native-static-libs", PrintRequest::NativeStaticLibs),
|
||||
("stack-protector-strategies", PrintRequest::StackProtectorStrategies),
|
||||
("target-spec-json", PrintRequest::TargetSpec),
|
||||
("all-target-specs-json", PrintRequest::AllTargetSpecs),
|
||||
("link-args", PrintRequest::LinkArgs),
|
||||
("split-debuginfo", PrintRequest::SplitDebuginfo),
|
||||
("deployment-target", PrintRequest::DeploymentTarget),
|
||||
const PRINT_KINDS: &[(&str, PrintKind)] = &[
|
||||
("crate-name", PrintKind::CrateName),
|
||||
("file-names", PrintKind::FileNames),
|
||||
("sysroot", PrintKind::Sysroot),
|
||||
("target-libdir", PrintKind::TargetLibdir),
|
||||
("cfg", PrintKind::Cfg),
|
||||
("calling-conventions", PrintKind::CallingConventions),
|
||||
("target-list", PrintKind::TargetList),
|
||||
("target-cpus", PrintKind::TargetCPUs),
|
||||
("target-features", PrintKind::TargetFeatures),
|
||||
("relocation-models", PrintKind::RelocationModels),
|
||||
("code-models", PrintKind::CodeModels),
|
||||
("tls-models", PrintKind::TlsModels),
|
||||
("native-static-libs", PrintKind::NativeStaticLibs),
|
||||
("stack-protector-strategies", PrintKind::StackProtectorStrategies),
|
||||
("target-spec-json", PrintKind::TargetSpec),
|
||||
("all-target-specs-json", PrintKind::AllTargetSpecs),
|
||||
("link-args", PrintKind::LinkArgs),
|
||||
("split-debuginfo", PrintKind::SplitDebuginfo),
|
||||
("deployment-target", PrintKind::DeploymentTarget),
|
||||
];
|
||||
|
||||
// We disallow reusing the same path in multiple prints, such as `--print
|
||||
// cfg=output.txt --print link-args=output.txt`, because outputs are printed
|
||||
// by disparate pieces of the compiler, and keeping track of which files
|
||||
// need to be overwritten vs appended to is annoying.
|
||||
let mut printed_paths = FxHashSet::default();
|
||||
|
||||
prints.extend(matches.opt_strs("print").into_iter().map(|req| {
|
||||
match PRINT_REQUESTS.iter().find(|&&(name, _)| name == req) {
|
||||
Some((_, PrintRequest::TargetSpec)) => {
|
||||
let (req, out) = split_out_file_name(&req);
|
||||
|
||||
let kind = match PRINT_KINDS.iter().find(|&&(name, _)| name == req) {
|
||||
Some((_, PrintKind::TargetSpec)) => {
|
||||
if unstable_opts.unstable_options {
|
||||
PrintRequest::TargetSpec
|
||||
PrintKind::TargetSpec
|
||||
} else {
|
||||
handler.early_error(
|
||||
"the `-Z unstable-options` flag must also be passed to \
|
||||
@ -2133,9 +2162,9 @@ fn collect_print_requests(
|
||||
);
|
||||
}
|
||||
}
|
||||
Some((_, PrintRequest::AllTargetSpecs)) => {
|
||||
Some((_, PrintKind::AllTargetSpecs)) => {
|
||||
if unstable_opts.unstable_options {
|
||||
PrintRequest::AllTargetSpecs
|
||||
PrintKind::AllTargetSpecs
|
||||
} else {
|
||||
handler.early_error(
|
||||
"the `-Z unstable-options` flag must also be passed to \
|
||||
@ -2143,16 +2172,28 @@ fn collect_print_requests(
|
||||
);
|
||||
}
|
||||
}
|
||||
Some(&(_, print_request)) => print_request,
|
||||
Some(&(_, print_kind)) => print_kind,
|
||||
None => {
|
||||
let prints =
|
||||
PRINT_REQUESTS.iter().map(|(name, _)| format!("`{name}`")).collect::<Vec<_>>();
|
||||
PRINT_KINDS.iter().map(|(name, _)| format!("`{name}`")).collect::<Vec<_>>();
|
||||
let prints = prints.join(", ");
|
||||
handler.early_error(format!(
|
||||
"unknown print request `{req}`. Valid print requests are: {prints}"
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let out = out.unwrap_or(OutFileName::Stdout);
|
||||
if let OutFileName::Real(path) = &out {
|
||||
if !printed_paths.insert(path.clone()) {
|
||||
handler.early_error(format!(
|
||||
"cannot print multiple outputs to the same path: {}",
|
||||
path.display(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
PrintRequest { kind, out }
|
||||
}));
|
||||
|
||||
prints
|
||||
@ -3076,6 +3117,7 @@ pub(crate) mod dep_tracking {
|
||||
use rustc_feature::UnstableFeatures;
|
||||
use rustc_span::edition::Edition;
|
||||
use rustc_span::RealFileName;
|
||||
use rustc_target::abi::ReferenceNichePolicy;
|
||||
use rustc_target::spec::{CodeModel, MergeFunctions, PanicStrategy, RelocModel};
|
||||
use rustc_target::spec::{
|
||||
RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TargetTriple, TlsModel,
|
||||
@ -3171,6 +3213,7 @@ pub(crate) mod dep_tracking {
|
||||
OomStrategy,
|
||||
LanguageIdentifier,
|
||||
TraitSolver,
|
||||
ReferenceNichePolicy,
|
||||
);
|
||||
|
||||
impl<T1, T2> DepTrackingHash for (T1, T2)
|
||||
|
@ -163,6 +163,13 @@ pub struct FileIsNotWriteable<'a> {
|
||||
pub file: &'a std::path::Path,
|
||||
}
|
||||
|
||||
#[derive(Diagnostic)]
|
||||
#[diag(session_file_write_fail)]
|
||||
pub(crate) struct FileWriteFail<'a> {
|
||||
pub path: &'a std::path::Path,
|
||||
pub err: String,
|
||||
}
|
||||
|
||||
#[derive(Diagnostic)]
|
||||
#[diag(session_crate_name_does_not_match)]
|
||||
pub struct CrateNameDoesNotMatch {
|
||||
|
@ -6,6 +6,7 @@ use crate::{lint, EarlyErrorHandler};
|
||||
use rustc_data_structures::profiling::TimePassesFormat;
|
||||
use rustc_errors::ColorConfig;
|
||||
use rustc_errors::{LanguageIdentifier, TerminalUrl};
|
||||
use rustc_target::abi::ReferenceNichePolicy;
|
||||
use rustc_target::spec::{CodeModel, LinkerFlavorCli, MergeFunctions, PanicStrategy, SanitizerSet};
|
||||
use rustc_target::spec::{
|
||||
RelocModel, RelroLevel, SplitDebuginfo, StackProtector, TargetTriple, TlsModel,
|
||||
@ -421,6 +422,8 @@ mod desc {
|
||||
pub const parse_proc_macro_execution_strategy: &str =
|
||||
"one of supported execution strategies (`same-thread`, or `cross-thread`)";
|
||||
pub const parse_dump_solver_proof_tree: &str = "one of: `always`, `on-request`, `on-error`";
|
||||
pub const parse_opt_reference_niches: &str =
|
||||
"`null`, or a `,` separated combination of `size` or `align`";
|
||||
}
|
||||
|
||||
mod parse {
|
||||
@ -1253,6 +1256,31 @@ mod parse {
|
||||
};
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn parse_opt_reference_niches(
|
||||
slot: &mut Option<ReferenceNichePolicy>,
|
||||
v: Option<&str>,
|
||||
) -> bool {
|
||||
let Some(s) = v else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let slot = slot.get_or_insert_default();
|
||||
|
||||
if s == "null" {
|
||||
return true;
|
||||
}
|
||||
|
||||
for opt in s.split(",") {
|
||||
match opt {
|
||||
"size" => slot.size = true,
|
||||
"align" => slot.align = true,
|
||||
_ => return false,
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
options! {
|
||||
@ -1668,6 +1696,9 @@ options! {
|
||||
"use a more precise version of drop elaboration for matches on enums (default: yes). \
|
||||
This results in better codegen, but has caused miscompilations on some tier 2 platforms. \
|
||||
See #77382 and #74551."),
|
||||
#[rustc_lint_opt_deny_field_access("use `Session::print_codegen_stats` instead of this field")]
|
||||
print_codegen_stats: bool = (false, parse_bool, [UNTRACKED],
|
||||
"print codegen statistics (default: no)"),
|
||||
print_fuel: Option<String> = (None, parse_opt_string, [TRACKED],
|
||||
"make rustc print the total optimization fuel used by a crate"),
|
||||
print_llvm_passes: bool = (false, parse_bool, [UNTRACKED],
|
||||
@ -1698,6 +1729,8 @@ options! {
|
||||
"enable queries of the dependency graph for regression testing (default: no)"),
|
||||
randomize_layout: bool = (false, parse_bool, [TRACKED],
|
||||
"randomize the layout of types (default: no)"),
|
||||
reference_niches: Option<ReferenceNichePolicy> = (None, parse_opt_reference_niches, [TRACKED],
|
||||
"override the set of discriminant niches that may be exposed by references"),
|
||||
relax_elf_relocations: Option<bool> = (None, parse_opt_bool, [TRACKED],
|
||||
"whether ELF relocations can be relaxed"),
|
||||
relro_level: Option<RelroLevel> = (None, parse_relro_level, [TRACKED],
|
||||
|
@ -1057,6 +1057,10 @@ impl Session {
|
||||
self.opts.unstable_opts.verbose
|
||||
}
|
||||
|
||||
pub fn print_llvm_stats(&self) -> bool {
|
||||
self.opts.unstable_opts.print_codegen_stats
|
||||
}
|
||||
|
||||
pub fn verify_llvm_ir(&self) -> bool {
|
||||
self.opts.unstable_opts.verify_llvm_ir || option_env!("RUSTC_VERIFY_LLVM_IR").is_some()
|
||||
}
|
||||
@ -1421,7 +1425,7 @@ pub fn build_session(
|
||||
let loader = file_loader.unwrap_or_else(|| Box::new(RealFileLoader));
|
||||
let hash_kind = sopts.unstable_opts.src_hash_algorithm.unwrap_or_else(|| {
|
||||
if target_cfg.is_like_msvc {
|
||||
SourceFileHashAlgorithm::Sha1
|
||||
SourceFileHashAlgorithm::Sha256
|
||||
} else {
|
||||
SourceFileHashAlgorithm::Md5
|
||||
}
|
||||
|
@ -31,6 +31,34 @@ pub fn adt_def(did: DefId) -> stable_mir::ty::AdtDef {
|
||||
with_tables(|t| t.adt_def(did))
|
||||
}
|
||||
|
||||
pub fn foreign_def(did: DefId) -> stable_mir::ty::ForeignDef {
|
||||
with_tables(|t| t.foreign_def(did))
|
||||
}
|
||||
|
||||
pub fn fn_def(did: DefId) -> stable_mir::ty::FnDef {
|
||||
with_tables(|t| t.fn_def(did))
|
||||
}
|
||||
|
||||
pub fn closure_def(did: DefId) -> stable_mir::ty::ClosureDef {
|
||||
with_tables(|t| t.closure_def(did))
|
||||
}
|
||||
|
||||
pub fn generator_def(did: DefId) -> stable_mir::ty::GeneratorDef {
|
||||
with_tables(|t| t.generator_def(did))
|
||||
}
|
||||
|
||||
pub fn alias_def(did: DefId) -> stable_mir::ty::AliasDef {
|
||||
with_tables(|t| t.alias_def(did))
|
||||
}
|
||||
|
||||
pub fn param_def(did: DefId) -> stable_mir::ty::ParamDef {
|
||||
with_tables(|t| t.param_def(did))
|
||||
}
|
||||
|
||||
pub fn br_named_def(did: DefId) -> stable_mir::ty::BrNamedDef {
|
||||
with_tables(|t| t.br_named_def(did))
|
||||
}
|
||||
|
||||
impl<'tcx> Tables<'tcx> {
|
||||
pub fn item_def_id(&self, item: &stable_mir::CrateItem) -> DefId {
|
||||
self.def_ids[item.0]
|
||||
@ -44,6 +72,34 @@ impl<'tcx> Tables<'tcx> {
|
||||
stable_mir::ty::AdtDef(self.create_def_id(did))
|
||||
}
|
||||
|
||||
pub fn foreign_def(&mut self, did: DefId) -> stable_mir::ty::ForeignDef {
|
||||
stable_mir::ty::ForeignDef(self.create_def_id(did))
|
||||
}
|
||||
|
||||
pub fn fn_def(&mut self, did: DefId) -> stable_mir::ty::FnDef {
|
||||
stable_mir::ty::FnDef(self.create_def_id(did))
|
||||
}
|
||||
|
||||
pub fn closure_def(&mut self, did: DefId) -> stable_mir::ty::ClosureDef {
|
||||
stable_mir::ty::ClosureDef(self.create_def_id(did))
|
||||
}
|
||||
|
||||
pub fn generator_def(&mut self, did: DefId) -> stable_mir::ty::GeneratorDef {
|
||||
stable_mir::ty::GeneratorDef(self.create_def_id(did))
|
||||
}
|
||||
|
||||
pub fn alias_def(&mut self, did: DefId) -> stable_mir::ty::AliasDef {
|
||||
stable_mir::ty::AliasDef(self.create_def_id(did))
|
||||
}
|
||||
|
||||
pub fn param_def(&mut self, did: DefId) -> stable_mir::ty::ParamDef {
|
||||
stable_mir::ty::ParamDef(self.create_def_id(did))
|
||||
}
|
||||
|
||||
pub fn br_named_def(&mut self, did: DefId) -> stable_mir::ty::BrNamedDef {
|
||||
stable_mir::ty::BrNamedDef(self.create_def_id(did))
|
||||
}
|
||||
|
||||
fn create_def_id(&mut self, did: DefId) -> stable_mir::DefId {
|
||||
// FIXME: this becomes inefficient when we have too many ids
|
||||
for (i, &d) in self.def_ids.iter().enumerate() {
|
||||
|
@ -8,8 +8,9 @@
|
||||
//! For now, we are developing everything inside `rustc`, thus, we keep this module private.
|
||||
|
||||
use crate::rustc_internal::{self, opaque};
|
||||
use crate::stable_mir::ty::{AdtSubsts, FloatTy, GenericArgKind, IntTy, RigidTy, TyKind, UintTy};
|
||||
use crate::stable_mir::ty::{FloatTy, IntTy, Movability, RigidTy, TyKind, UintTy};
|
||||
use crate::stable_mir::{self, Context};
|
||||
use rustc_hir as hir;
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||
use rustc_span::def_id::{CrateNum, DefId, LOCAL_CRATE};
|
||||
@ -46,8 +47,12 @@ impl<'tcx> Context for Tables<'tcx> {
|
||||
.basic_blocks
|
||||
.iter()
|
||||
.map(|block| stable_mir::mir::BasicBlock {
|
||||
terminator: block.terminator().stable(),
|
||||
statements: block.statements.iter().map(mir::Statement::stable).collect(),
|
||||
terminator: block.terminator().stable(self),
|
||||
statements: block
|
||||
.statements
|
||||
.iter()
|
||||
.map(|statement| statement.stable(self))
|
||||
.collect(),
|
||||
})
|
||||
.collect(),
|
||||
locals: mir.local_decls.iter().map(|decl| self.intern_ty(decl.ty)).collect(),
|
||||
@ -59,7 +64,8 @@ impl<'tcx> Context for Tables<'tcx> {
|
||||
}
|
||||
|
||||
fn ty_kind(&mut self, ty: crate::stable_mir::ty::Ty) -> TyKind {
|
||||
self.rustc_ty_to_ty(self.types[ty.0])
|
||||
let ty = self.types[ty.0];
|
||||
ty.stable(self)
|
||||
}
|
||||
}
|
||||
|
||||
@ -70,83 +76,6 @@ pub struct Tables<'tcx> {
|
||||
}
|
||||
|
||||
impl<'tcx> Tables<'tcx> {
|
||||
fn rustc_ty_to_ty(&mut self, ty: Ty<'tcx>) -> TyKind {
|
||||
match ty.kind() {
|
||||
ty::Bool => TyKind::RigidTy(RigidTy::Bool),
|
||||
ty::Char => TyKind::RigidTy(RigidTy::Char),
|
||||
ty::Int(int_ty) => match int_ty {
|
||||
ty::IntTy::Isize => TyKind::RigidTy(RigidTy::Int(IntTy::Isize)),
|
||||
ty::IntTy::I8 => TyKind::RigidTy(RigidTy::Int(IntTy::I8)),
|
||||
ty::IntTy::I16 => TyKind::RigidTy(RigidTy::Int(IntTy::I16)),
|
||||
ty::IntTy::I32 => TyKind::RigidTy(RigidTy::Int(IntTy::I32)),
|
||||
ty::IntTy::I64 => TyKind::RigidTy(RigidTy::Int(IntTy::I64)),
|
||||
ty::IntTy::I128 => TyKind::RigidTy(RigidTy::Int(IntTy::I128)),
|
||||
},
|
||||
ty::Uint(uint_ty) => match uint_ty {
|
||||
ty::UintTy::Usize => TyKind::RigidTy(RigidTy::Uint(UintTy::Usize)),
|
||||
ty::UintTy::U8 => TyKind::RigidTy(RigidTy::Uint(UintTy::U8)),
|
||||
ty::UintTy::U16 => TyKind::RigidTy(RigidTy::Uint(UintTy::U16)),
|
||||
ty::UintTy::U32 => TyKind::RigidTy(RigidTy::Uint(UintTy::U32)),
|
||||
ty::UintTy::U64 => TyKind::RigidTy(RigidTy::Uint(UintTy::U64)),
|
||||
ty::UintTy::U128 => TyKind::RigidTy(RigidTy::Uint(UintTy::U128)),
|
||||
},
|
||||
ty::Float(float_ty) => match float_ty {
|
||||
ty::FloatTy::F32 => TyKind::RigidTy(RigidTy::Float(FloatTy::F32)),
|
||||
ty::FloatTy::F64 => TyKind::RigidTy(RigidTy::Float(FloatTy::F64)),
|
||||
},
|
||||
ty::Adt(adt_def, substs) => TyKind::RigidTy(RigidTy::Adt(
|
||||
rustc_internal::adt_def(adt_def.did()),
|
||||
AdtSubsts(
|
||||
substs
|
||||
.iter()
|
||||
.map(|arg| match arg.unpack() {
|
||||
ty::GenericArgKind::Lifetime(region) => {
|
||||
GenericArgKind::Lifetime(opaque(®ion))
|
||||
}
|
||||
ty::GenericArgKind::Type(ty) => {
|
||||
GenericArgKind::Type(self.intern_ty(ty))
|
||||
}
|
||||
ty::GenericArgKind::Const(const_) => {
|
||||
GenericArgKind::Const(opaque(&const_))
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
)),
|
||||
ty::Foreign(_) => todo!(),
|
||||
ty::Str => TyKind::RigidTy(RigidTy::Str),
|
||||
ty::Array(ty, constant) => {
|
||||
TyKind::RigidTy(RigidTy::Array(self.intern_ty(*ty), opaque(constant)))
|
||||
}
|
||||
ty::Slice(ty) => TyKind::RigidTy(RigidTy::Slice(self.intern_ty(*ty))),
|
||||
ty::RawPtr(ty::TypeAndMut { ty, mutbl }) => {
|
||||
TyKind::RigidTy(RigidTy::RawPtr(self.intern_ty(*ty), mutbl.stable()))
|
||||
}
|
||||
ty::Ref(region, ty, mutbl) => {
|
||||
TyKind::RigidTy(RigidTy::Ref(opaque(region), self.intern_ty(*ty), mutbl.stable()))
|
||||
}
|
||||
ty::FnDef(_, _) => todo!(),
|
||||
ty::FnPtr(_) => todo!(),
|
||||
ty::Dynamic(_, _, _) => todo!(),
|
||||
ty::Closure(_, _) => todo!(),
|
||||
ty::Generator(_, _, _) => todo!(),
|
||||
ty::Never => todo!(),
|
||||
ty::Tuple(fields) => TyKind::RigidTy(RigidTy::Tuple(
|
||||
fields.iter().map(|ty| self.intern_ty(ty)).collect(),
|
||||
)),
|
||||
ty::Alias(_, _) => todo!(),
|
||||
ty::Param(_) => todo!(),
|
||||
ty::Bound(_, _) => todo!(),
|
||||
ty::Placeholder(..)
|
||||
| ty::GeneratorWitness(_)
|
||||
| ty::GeneratorWitnessMIR(_, _)
|
||||
| ty::Infer(_)
|
||||
| ty::Error(_) => {
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn intern_ty(&mut self, ty: Ty<'tcx>) -> stable_mir::ty::Ty {
|
||||
if let Some(id) = self.types.iter().position(|&t| t == ty) {
|
||||
return stable_mir::ty::Ty(id);
|
||||
@ -166,20 +95,20 @@ fn smir_crate(tcx: TyCtxt<'_>, crate_num: CrateNum) -> stable_mir::Crate {
|
||||
}
|
||||
|
||||
/// Trait used to convert between an internal MIR type to a Stable MIR type.
|
||||
pub(crate) trait Stable {
|
||||
pub(crate) trait Stable<'tcx> {
|
||||
/// The stable representation of the type implementing Stable.
|
||||
type T;
|
||||
/// Converts an object to the equivalent Stable MIR representation.
|
||||
fn stable(&self) -> Self::T;
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T;
|
||||
}
|
||||
|
||||
impl<'tcx> Stable for mir::Statement<'tcx> {
|
||||
impl<'tcx> Stable<'tcx> for mir::Statement<'tcx> {
|
||||
type T = stable_mir::mir::Statement;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use rustc_middle::mir::StatementKind::*;
|
||||
match &self.kind {
|
||||
Assign(assign) => {
|
||||
stable_mir::mir::Statement::Assign(assign.0.stable(), assign.1.stable())
|
||||
stable_mir::mir::Statement::Assign(assign.0.stable(tables), assign.1.stable(tables))
|
||||
}
|
||||
FakeRead(_) => todo!(),
|
||||
SetDiscriminant { .. } => todo!(),
|
||||
@ -197,45 +126,51 @@ impl<'tcx> Stable for mir::Statement<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable for mir::Rvalue<'tcx> {
|
||||
impl<'tcx> Stable<'tcx> for mir::Rvalue<'tcx> {
|
||||
type T = stable_mir::mir::Rvalue;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use mir::Rvalue::*;
|
||||
match self {
|
||||
Use(op) => stable_mir::mir::Rvalue::Use(op.stable()),
|
||||
Use(op) => stable_mir::mir::Rvalue::Use(op.stable(tables)),
|
||||
Repeat(_, _) => todo!(),
|
||||
Ref(region, kind, place) => {
|
||||
stable_mir::mir::Rvalue::Ref(opaque(region), kind.stable(), place.stable())
|
||||
}
|
||||
Ref(region, kind, place) => stable_mir::mir::Rvalue::Ref(
|
||||
opaque(region),
|
||||
kind.stable(tables),
|
||||
place.stable(tables),
|
||||
),
|
||||
ThreadLocalRef(def_id) => {
|
||||
stable_mir::mir::Rvalue::ThreadLocalRef(rustc_internal::crate_item(*def_id))
|
||||
}
|
||||
AddressOf(mutability, place) => {
|
||||
stable_mir::mir::Rvalue::AddressOf(mutability.stable(), place.stable())
|
||||
stable_mir::mir::Rvalue::AddressOf(mutability.stable(tables), place.stable(tables))
|
||||
}
|
||||
Len(place) => stable_mir::mir::Rvalue::Len(place.stable()),
|
||||
Len(place) => stable_mir::mir::Rvalue::Len(place.stable(tables)),
|
||||
Cast(_, _, _) => todo!(),
|
||||
BinaryOp(bin_op, ops) => {
|
||||
stable_mir::mir::Rvalue::BinaryOp(bin_op.stable(), ops.0.stable(), ops.1.stable())
|
||||
}
|
||||
BinaryOp(bin_op, ops) => stable_mir::mir::Rvalue::BinaryOp(
|
||||
bin_op.stable(tables),
|
||||
ops.0.stable(tables),
|
||||
ops.1.stable(tables),
|
||||
),
|
||||
CheckedBinaryOp(bin_op, ops) => stable_mir::mir::Rvalue::CheckedBinaryOp(
|
||||
bin_op.stable(),
|
||||
ops.0.stable(),
|
||||
ops.1.stable(),
|
||||
bin_op.stable(tables),
|
||||
ops.0.stable(tables),
|
||||
ops.1.stable(tables),
|
||||
),
|
||||
NullaryOp(_, _) => todo!(),
|
||||
UnaryOp(un_op, op) => stable_mir::mir::Rvalue::UnaryOp(un_op.stable(), op.stable()),
|
||||
Discriminant(place) => stable_mir::mir::Rvalue::Discriminant(place.stable()),
|
||||
UnaryOp(un_op, op) => {
|
||||
stable_mir::mir::Rvalue::UnaryOp(un_op.stable(tables), op.stable(tables))
|
||||
}
|
||||
Discriminant(place) => stable_mir::mir::Rvalue::Discriminant(place.stable(tables)),
|
||||
Aggregate(_, _) => todo!(),
|
||||
ShallowInitBox(_, _) => todo!(),
|
||||
CopyForDeref(place) => stable_mir::mir::Rvalue::CopyForDeref(place.stable()),
|
||||
CopyForDeref(place) => stable_mir::mir::Rvalue::CopyForDeref(place.stable(tables)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stable for mir::Mutability {
|
||||
impl<'tcx> Stable<'tcx> for mir::Mutability {
|
||||
type T = stable_mir::mir::Mutability;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
|
||||
use mir::Mutability::*;
|
||||
match *self {
|
||||
Not => stable_mir::mir::Mutability::Not,
|
||||
@ -244,21 +179,21 @@ impl Stable for mir::Mutability {
|
||||
}
|
||||
}
|
||||
|
||||
impl Stable for mir::BorrowKind {
|
||||
impl<'tcx> Stable<'tcx> for mir::BorrowKind {
|
||||
type T = stable_mir::mir::BorrowKind;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use mir::BorrowKind::*;
|
||||
match *self {
|
||||
Shared => stable_mir::mir::BorrowKind::Shared,
|
||||
Shallow => stable_mir::mir::BorrowKind::Shallow,
|
||||
Mut { kind } => stable_mir::mir::BorrowKind::Mut { kind: kind.stable() },
|
||||
Mut { kind } => stable_mir::mir::BorrowKind::Mut { kind: kind.stable(tables) },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stable for mir::MutBorrowKind {
|
||||
impl<'tcx> Stable<'tcx> for mir::MutBorrowKind {
|
||||
type T = stable_mir::mir::MutBorrowKind;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
|
||||
use mir::MutBorrowKind::*;
|
||||
match *self {
|
||||
Default => stable_mir::mir::MutBorrowKind::Default,
|
||||
@ -268,28 +203,28 @@ impl Stable for mir::MutBorrowKind {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable for mir::NullOp<'tcx> {
|
||||
impl<'tcx> Stable<'tcx> for mir::NullOp<'tcx> {
|
||||
type T = stable_mir::mir::NullOp;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use mir::NullOp::*;
|
||||
match self {
|
||||
SizeOf => stable_mir::mir::NullOp::SizeOf,
|
||||
AlignOf => stable_mir::mir::NullOp::AlignOf,
|
||||
OffsetOf(indices) => {
|
||||
stable_mir::mir::NullOp::OffsetOf(indices.iter().map(|idx| idx.stable()).collect())
|
||||
}
|
||||
OffsetOf(indices) => stable_mir::mir::NullOp::OffsetOf(
|
||||
indices.iter().map(|idx| idx.stable(tables)).collect(),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stable for mir::CastKind {
|
||||
impl<'tcx> Stable<'tcx> for mir::CastKind {
|
||||
type T = stable_mir::mir::CastKind;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use mir::CastKind::*;
|
||||
match self {
|
||||
PointerExposeAddress => stable_mir::mir::CastKind::PointerExposeAddress,
|
||||
PointerFromExposedAddress => stable_mir::mir::CastKind::PointerFromExposedAddress,
|
||||
PointerCoercion(c) => stable_mir::mir::CastKind::PointerCoercion(c.stable()),
|
||||
PointerCoercion(c) => stable_mir::mir::CastKind::PointerCoercion(c.stable(tables)),
|
||||
DynStar => stable_mir::mir::CastKind::DynStar,
|
||||
IntToInt => stable_mir::mir::CastKind::IntToInt,
|
||||
FloatToInt => stable_mir::mir::CastKind::FloatToInt,
|
||||
@ -302,15 +237,36 @@ impl Stable for mir::CastKind {
|
||||
}
|
||||
}
|
||||
|
||||
impl Stable for ty::adjustment::PointerCoercion {
|
||||
impl<'tcx> Stable<'tcx> for ty::AliasKind {
|
||||
type T = stable_mir::ty::AliasKind;
|
||||
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
|
||||
use ty::AliasKind::*;
|
||||
match self {
|
||||
Projection => stable_mir::ty::AliasKind::Projection,
|
||||
Inherent => stable_mir::ty::AliasKind::Inherent,
|
||||
Opaque => stable_mir::ty::AliasKind::Opaque,
|
||||
Weak => stable_mir::ty::AliasKind::Weak,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable<'tcx> for ty::AliasTy<'tcx> {
|
||||
type T = stable_mir::ty::AliasTy;
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
let ty::AliasTy { args, def_id, .. } = self;
|
||||
stable_mir::ty::AliasTy { def_id: tables.alias_def(*def_id), args: args.stable(tables) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable<'tcx> for ty::adjustment::PointerCoercion {
|
||||
type T = stable_mir::mir::PointerCoercion;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use ty::adjustment::PointerCoercion;
|
||||
match self {
|
||||
PointerCoercion::ReifyFnPointer => stable_mir::mir::PointerCoercion::ReifyFnPointer,
|
||||
PointerCoercion::UnsafeFnPointer => stable_mir::mir::PointerCoercion::UnsafeFnPointer,
|
||||
PointerCoercion::ClosureFnPointer(unsafety) => {
|
||||
stable_mir::mir::PointerCoercion::ClosureFnPointer(unsafety.stable())
|
||||
stable_mir::mir::PointerCoercion::ClosureFnPointer(unsafety.stable(tables))
|
||||
}
|
||||
PointerCoercion::MutToConstPointer => {
|
||||
stable_mir::mir::PointerCoercion::MutToConstPointer
|
||||
@ -321,9 +277,9 @@ impl Stable for ty::adjustment::PointerCoercion {
|
||||
}
|
||||
}
|
||||
|
||||
impl Stable for rustc_hir::Unsafety {
|
||||
impl<'tcx> Stable<'tcx> for rustc_hir::Unsafety {
|
||||
type T = stable_mir::mir::Safety;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
|
||||
match self {
|
||||
rustc_hir::Unsafety::Unsafe => stable_mir::mir::Safety::Unsafe,
|
||||
rustc_hir::Unsafety::Normal => stable_mir::mir::Safety::Normal,
|
||||
@ -331,28 +287,28 @@ impl Stable for rustc_hir::Unsafety {
|
||||
}
|
||||
}
|
||||
|
||||
impl Stable for FieldIdx {
|
||||
impl<'tcx> Stable<'tcx> for FieldIdx {
|
||||
type T = usize;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
|
||||
self.as_usize()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable for mir::Operand<'tcx> {
|
||||
impl<'tcx> Stable<'tcx> for mir::Operand<'tcx> {
|
||||
type T = stable_mir::mir::Operand;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use mir::Operand::*;
|
||||
match self {
|
||||
Copy(place) => stable_mir::mir::Operand::Copy(place.stable()),
|
||||
Move(place) => stable_mir::mir::Operand::Move(place.stable()),
|
||||
Copy(place) => stable_mir::mir::Operand::Copy(place.stable(tables)),
|
||||
Move(place) => stable_mir::mir::Operand::Move(place.stable(tables)),
|
||||
Constant(c) => stable_mir::mir::Operand::Constant(c.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable for mir::Place<'tcx> {
|
||||
impl<'tcx> Stable<'tcx> for mir::Place<'tcx> {
|
||||
type T = stable_mir::mir::Place;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
|
||||
stable_mir::mir::Place {
|
||||
local: self.local.as_usize(),
|
||||
projection: format!("{:?}", self.projection),
|
||||
@ -360,9 +316,9 @@ impl<'tcx> Stable for mir::Place<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
impl Stable for mir::UnwindAction {
|
||||
impl<'tcx> Stable<'tcx> for mir::UnwindAction {
|
||||
type T = stable_mir::mir::UnwindAction;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
|
||||
use rustc_middle::mir::UnwindAction;
|
||||
match self {
|
||||
UnwindAction::Continue => stable_mir::mir::UnwindAction::Continue,
|
||||
@ -373,46 +329,48 @@ impl Stable for mir::UnwindAction {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable for mir::AssertMessage<'tcx> {
|
||||
impl<'tcx> Stable<'tcx> for mir::AssertMessage<'tcx> {
|
||||
type T = stable_mir::mir::AssertMessage;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use rustc_middle::mir::AssertKind;
|
||||
match self {
|
||||
AssertKind::BoundsCheck { len, index } => stable_mir::mir::AssertMessage::BoundsCheck {
|
||||
len: len.stable(),
|
||||
index: index.stable(),
|
||||
len: len.stable(tables),
|
||||
index: index.stable(tables),
|
||||
},
|
||||
AssertKind::Overflow(bin_op, op1, op2) => stable_mir::mir::AssertMessage::Overflow(
|
||||
bin_op.stable(),
|
||||
op1.stable(),
|
||||
op2.stable(),
|
||||
bin_op.stable(tables),
|
||||
op1.stable(tables),
|
||||
op2.stable(tables),
|
||||
),
|
||||
AssertKind::OverflowNeg(op) => stable_mir::mir::AssertMessage::OverflowNeg(op.stable()),
|
||||
AssertKind::OverflowNeg(op) => {
|
||||
stable_mir::mir::AssertMessage::OverflowNeg(op.stable(tables))
|
||||
}
|
||||
AssertKind::DivisionByZero(op) => {
|
||||
stable_mir::mir::AssertMessage::DivisionByZero(op.stable())
|
||||
stable_mir::mir::AssertMessage::DivisionByZero(op.stable(tables))
|
||||
}
|
||||
AssertKind::RemainderByZero(op) => {
|
||||
stable_mir::mir::AssertMessage::RemainderByZero(op.stable())
|
||||
stable_mir::mir::AssertMessage::RemainderByZero(op.stable(tables))
|
||||
}
|
||||
AssertKind::ResumedAfterReturn(generator) => {
|
||||
stable_mir::mir::AssertMessage::ResumedAfterReturn(generator.stable())
|
||||
stable_mir::mir::AssertMessage::ResumedAfterReturn(generator.stable(tables))
|
||||
}
|
||||
AssertKind::ResumedAfterPanic(generator) => {
|
||||
stable_mir::mir::AssertMessage::ResumedAfterPanic(generator.stable())
|
||||
stable_mir::mir::AssertMessage::ResumedAfterPanic(generator.stable(tables))
|
||||
}
|
||||
AssertKind::MisalignedPointerDereference { required, found } => {
|
||||
stable_mir::mir::AssertMessage::MisalignedPointerDereference {
|
||||
required: required.stable(),
|
||||
found: found.stable(),
|
||||
required: required.stable(tables),
|
||||
found: found.stable(tables),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stable for mir::BinOp {
|
||||
impl<'tcx> Stable<'tcx> for mir::BinOp {
|
||||
type T = stable_mir::mir::BinOp;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
|
||||
use mir::BinOp;
|
||||
match self {
|
||||
BinOp::Add => stable_mir::mir::BinOp::Add,
|
||||
@ -441,9 +399,9 @@ impl Stable for mir::BinOp {
|
||||
}
|
||||
}
|
||||
|
||||
impl Stable for mir::UnOp {
|
||||
impl<'tcx> Stable<'tcx> for mir::UnOp {
|
||||
type T = stable_mir::mir::UnOp;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
|
||||
use mir::UnOp;
|
||||
match self {
|
||||
UnOp::Not => stable_mir::mir::UnOp::Not,
|
||||
@ -452,9 +410,9 @@ impl Stable for mir::UnOp {
|
||||
}
|
||||
}
|
||||
|
||||
impl Stable for rustc_hir::GeneratorKind {
|
||||
impl<'tcx> Stable<'tcx> for rustc_hir::GeneratorKind {
|
||||
type T = stable_mir::mir::GeneratorKind;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
|
||||
use rustc_hir::{AsyncGeneratorKind, GeneratorKind};
|
||||
match self {
|
||||
GeneratorKind::Async(async_gen) => {
|
||||
@ -470,16 +428,16 @@ impl Stable for rustc_hir::GeneratorKind {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable for mir::InlineAsmOperand<'tcx> {
|
||||
impl<'tcx> Stable<'tcx> for mir::InlineAsmOperand<'tcx> {
|
||||
type T = stable_mir::mir::InlineAsmOperand;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use rustc_middle::mir::InlineAsmOperand;
|
||||
|
||||
let (in_value, out_place) = match self {
|
||||
InlineAsmOperand::In { value, .. } => (Some(value.stable()), None),
|
||||
InlineAsmOperand::Out { place, .. } => (None, place.map(|place| place.stable())),
|
||||
InlineAsmOperand::In { value, .. } => (Some(value.stable(tables)), None),
|
||||
InlineAsmOperand::Out { place, .. } => (None, place.map(|place| place.stable(tables))),
|
||||
InlineAsmOperand::InOut { in_value, out_place, .. } => {
|
||||
(Some(in_value.stable()), out_place.map(|place| place.stable()))
|
||||
(Some(in_value.stable(tables)), out_place.map(|place| place.stable(tables)))
|
||||
}
|
||||
InlineAsmOperand::Const { .. }
|
||||
| InlineAsmOperand::SymFn { .. }
|
||||
@ -490,15 +448,15 @@ impl<'tcx> Stable for mir::InlineAsmOperand<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable for mir::Terminator<'tcx> {
|
||||
impl<'tcx> Stable<'tcx> for mir::Terminator<'tcx> {
|
||||
type T = stable_mir::mir::Terminator;
|
||||
fn stable(&self) -> Self::T {
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use rustc_middle::mir::TerminatorKind::*;
|
||||
use stable_mir::mir::Terminator;
|
||||
match &self.kind {
|
||||
Goto { target } => Terminator::Goto { target: target.as_usize() },
|
||||
SwitchInt { discr, targets } => Terminator::SwitchInt {
|
||||
discr: discr.stable(),
|
||||
discr: discr.stable(tables),
|
||||
targets: targets
|
||||
.iter()
|
||||
.map(|(value, target)| stable_mir::mir::SwitchTarget {
|
||||
@ -513,37 +471,235 @@ impl<'tcx> Stable for mir::Terminator<'tcx> {
|
||||
Return => Terminator::Return,
|
||||
Unreachable => Terminator::Unreachable,
|
||||
Drop { place, target, unwind, replace: _ } => Terminator::Drop {
|
||||
place: place.stable(),
|
||||
place: place.stable(tables),
|
||||
target: target.as_usize(),
|
||||
unwind: unwind.stable(),
|
||||
unwind: unwind.stable(tables),
|
||||
},
|
||||
Call { func, args, destination, target, unwind, call_source: _, fn_span: _ } => {
|
||||
Terminator::Call {
|
||||
func: func.stable(),
|
||||
args: args.iter().map(|arg| arg.stable()).collect(),
|
||||
destination: destination.stable(),
|
||||
func: func.stable(tables),
|
||||
args: args.iter().map(|arg| arg.stable(tables)).collect(),
|
||||
destination: destination.stable(tables),
|
||||
target: target.map(|t| t.as_usize()),
|
||||
unwind: unwind.stable(),
|
||||
unwind: unwind.stable(tables),
|
||||
}
|
||||
}
|
||||
Assert { cond, expected, msg, target, unwind } => Terminator::Assert {
|
||||
cond: cond.stable(),
|
||||
cond: cond.stable(tables),
|
||||
expected: *expected,
|
||||
msg: msg.stable(),
|
||||
msg: msg.stable(tables),
|
||||
target: target.as_usize(),
|
||||
unwind: unwind.stable(),
|
||||
unwind: unwind.stable(tables),
|
||||
},
|
||||
InlineAsm { template, operands, options, line_spans, destination, unwind } => {
|
||||
Terminator::InlineAsm {
|
||||
template: format!("{:?}", template),
|
||||
operands: operands.iter().map(|operand| operand.stable()).collect(),
|
||||
operands: operands.iter().map(|operand| operand.stable(tables)).collect(),
|
||||
options: format!("{:?}", options),
|
||||
line_spans: format!("{:?}", line_spans),
|
||||
destination: destination.map(|d| d.as_usize()),
|
||||
unwind: unwind.stable(),
|
||||
unwind: unwind.stable(tables),
|
||||
}
|
||||
}
|
||||
Yield { .. } | GeneratorDrop | FalseEdge { .. } | FalseUnwind { .. } => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable<'tcx> for ty::GenericArgs<'tcx> {
|
||||
type T = stable_mir::ty::GenericArgs;
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use stable_mir::ty::{GenericArgKind, GenericArgs};
|
||||
|
||||
GenericArgs(
|
||||
self.iter()
|
||||
.map(|arg| match arg.unpack() {
|
||||
ty::GenericArgKind::Lifetime(region) => {
|
||||
GenericArgKind::Lifetime(opaque(®ion))
|
||||
}
|
||||
ty::GenericArgKind::Type(ty) => GenericArgKind::Type(tables.intern_ty(ty)),
|
||||
ty::GenericArgKind::Const(const_) => GenericArgKind::Const(opaque(&const_)),
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable<'tcx> for ty::PolyFnSig<'tcx> {
|
||||
type T = stable_mir::ty::PolyFnSig;
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use stable_mir::ty::Binder;
|
||||
|
||||
Binder {
|
||||
value: self.skip_binder().stable(tables),
|
||||
bound_vars: self
|
||||
.bound_vars()
|
||||
.iter()
|
||||
.map(|bound_var| bound_var.stable(tables))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable<'tcx> for ty::FnSig<'tcx> {
|
||||
type T = stable_mir::ty::FnSig;
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
use rustc_target::spec::abi;
|
||||
use stable_mir::ty::{Abi, FnSig, Unsafety};
|
||||
|
||||
FnSig {
|
||||
inputs_and_output: self
|
||||
.inputs_and_output
|
||||
.iter()
|
||||
.map(|ty| tables.intern_ty(ty))
|
||||
.collect(),
|
||||
c_variadic: self.c_variadic,
|
||||
unsafety: match self.unsafety {
|
||||
hir::Unsafety::Normal => Unsafety::Normal,
|
||||
hir::Unsafety::Unsafe => Unsafety::Unsafe,
|
||||
},
|
||||
abi: match self.abi {
|
||||
abi::Abi::Rust => Abi::Rust,
|
||||
abi::Abi::C { unwind } => Abi::C { unwind },
|
||||
abi::Abi::Cdecl { unwind } => Abi::Cdecl { unwind },
|
||||
abi::Abi::Stdcall { unwind } => Abi::Stdcall { unwind },
|
||||
abi::Abi::Fastcall { unwind } => Abi::Fastcall { unwind },
|
||||
abi::Abi::Vectorcall { unwind } => Abi::Vectorcall { unwind },
|
||||
abi::Abi::Thiscall { unwind } => Abi::Thiscall { unwind },
|
||||
abi::Abi::Aapcs { unwind } => Abi::Aapcs { unwind },
|
||||
abi::Abi::Win64 { unwind } => Abi::Win64 { unwind },
|
||||
abi::Abi::SysV64 { unwind } => Abi::SysV64 { unwind },
|
||||
abi::Abi::PtxKernel => Abi::PtxKernel,
|
||||
abi::Abi::Msp430Interrupt => Abi::Msp430Interrupt,
|
||||
abi::Abi::X86Interrupt => Abi::X86Interrupt,
|
||||
abi::Abi::AmdGpuKernel => Abi::AmdGpuKernel,
|
||||
abi::Abi::EfiApi => Abi::EfiApi,
|
||||
abi::Abi::AvrInterrupt => Abi::AvrInterrupt,
|
||||
abi::Abi::AvrNonBlockingInterrupt => Abi::AvrNonBlockingInterrupt,
|
||||
abi::Abi::CCmseNonSecureCall => Abi::CCmseNonSecureCall,
|
||||
abi::Abi::Wasm => Abi::Wasm,
|
||||
abi::Abi::System { unwind } => Abi::System { unwind },
|
||||
abi::Abi::RustIntrinsic => Abi::RustIntrinsic,
|
||||
abi::Abi::RustCall => Abi::RustCall,
|
||||
abi::Abi::PlatformIntrinsic => Abi::PlatformIntrinsic,
|
||||
abi::Abi::Unadjusted => Abi::Unadjusted,
|
||||
abi::Abi::RustCold => Abi::RustCold,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable<'tcx> for ty::BoundVariableKind {
|
||||
type T = stable_mir::ty::BoundVariableKind;
|
||||
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
|
||||
use stable_mir::ty::{BoundRegionKind, BoundTyKind, BoundVariableKind};
|
||||
|
||||
match self {
|
||||
ty::BoundVariableKind::Ty(bound_ty_kind) => {
|
||||
BoundVariableKind::Ty(match bound_ty_kind {
|
||||
ty::BoundTyKind::Anon => BoundTyKind::Anon,
|
||||
ty::BoundTyKind::Param(def_id, symbol) => {
|
||||
BoundTyKind::Param(rustc_internal::param_def(*def_id), symbol.to_string())
|
||||
}
|
||||
})
|
||||
}
|
||||
ty::BoundVariableKind::Region(bound_region_kind) => {
|
||||
BoundVariableKind::Region(match bound_region_kind {
|
||||
ty::BoundRegionKind::BrAnon(option_span) => {
|
||||
BoundRegionKind::BrAnon(option_span.map(|span| opaque(&span)))
|
||||
}
|
||||
ty::BoundRegionKind::BrNamed(def_id, symbol) => BoundRegionKind::BrNamed(
|
||||
rustc_internal::br_named_def(*def_id),
|
||||
symbol.to_string(),
|
||||
),
|
||||
ty::BoundRegionKind::BrEnv => BoundRegionKind::BrEnv,
|
||||
})
|
||||
}
|
||||
ty::BoundVariableKind::Const => BoundVariableKind::Const,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> Stable<'tcx> for Ty<'tcx> {
|
||||
type T = stable_mir::ty::TyKind;
|
||||
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
|
||||
match self.kind() {
|
||||
ty::Bool => TyKind::RigidTy(RigidTy::Bool),
|
||||
ty::Char => TyKind::RigidTy(RigidTy::Char),
|
||||
ty::Int(int_ty) => match int_ty {
|
||||
ty::IntTy::Isize => TyKind::RigidTy(RigidTy::Int(IntTy::Isize)),
|
||||
ty::IntTy::I8 => TyKind::RigidTy(RigidTy::Int(IntTy::I8)),
|
||||
ty::IntTy::I16 => TyKind::RigidTy(RigidTy::Int(IntTy::I16)),
|
||||
ty::IntTy::I32 => TyKind::RigidTy(RigidTy::Int(IntTy::I32)),
|
||||
ty::IntTy::I64 => TyKind::RigidTy(RigidTy::Int(IntTy::I64)),
|
||||
ty::IntTy::I128 => TyKind::RigidTy(RigidTy::Int(IntTy::I128)),
|
||||
},
|
||||
ty::Uint(uint_ty) => match uint_ty {
|
||||
ty::UintTy::Usize => TyKind::RigidTy(RigidTy::Uint(UintTy::Usize)),
|
||||
ty::UintTy::U8 => TyKind::RigidTy(RigidTy::Uint(UintTy::U8)),
|
||||
ty::UintTy::U16 => TyKind::RigidTy(RigidTy::Uint(UintTy::U16)),
|
||||
ty::UintTy::U32 => TyKind::RigidTy(RigidTy::Uint(UintTy::U32)),
|
||||
ty::UintTy::U64 => TyKind::RigidTy(RigidTy::Uint(UintTy::U64)),
|
||||
ty::UintTy::U128 => TyKind::RigidTy(RigidTy::Uint(UintTy::U128)),
|
||||
},
|
||||
ty::Float(float_ty) => match float_ty {
|
||||
ty::FloatTy::F32 => TyKind::RigidTy(RigidTy::Float(FloatTy::F32)),
|
||||
ty::FloatTy::F64 => TyKind::RigidTy(RigidTy::Float(FloatTy::F64)),
|
||||
},
|
||||
ty::Adt(adt_def, generic_args) => TyKind::RigidTy(RigidTy::Adt(
|
||||
rustc_internal::adt_def(adt_def.did()),
|
||||
generic_args.stable(tables),
|
||||
)),
|
||||
ty::Foreign(def_id) => {
|
||||
TyKind::RigidTy(RigidTy::Foreign(rustc_internal::foreign_def(*def_id)))
|
||||
}
|
||||
ty::Str => TyKind::RigidTy(RigidTy::Str),
|
||||
ty::Array(ty, constant) => {
|
||||
TyKind::RigidTy(RigidTy::Array(tables.intern_ty(*ty), opaque(constant)))
|
||||
}
|
||||
ty::Slice(ty) => TyKind::RigidTy(RigidTy::Slice(tables.intern_ty(*ty))),
|
||||
ty::RawPtr(ty::TypeAndMut { ty, mutbl }) => {
|
||||
TyKind::RigidTy(RigidTy::RawPtr(tables.intern_ty(*ty), mutbl.stable(tables)))
|
||||
}
|
||||
ty::Ref(region, ty, mutbl) => TyKind::RigidTy(RigidTy::Ref(
|
||||
opaque(region),
|
||||
tables.intern_ty(*ty),
|
||||
mutbl.stable(tables),
|
||||
)),
|
||||
ty::FnDef(def_id, generic_args) => TyKind::RigidTy(RigidTy::FnDef(
|
||||
rustc_internal::fn_def(*def_id),
|
||||
generic_args.stable(tables),
|
||||
)),
|
||||
ty::FnPtr(poly_fn_sig) => TyKind::RigidTy(RigidTy::FnPtr(poly_fn_sig.stable(tables))),
|
||||
ty::Dynamic(_, _, _) => todo!(),
|
||||
ty::Closure(def_id, generic_args) => TyKind::RigidTy(RigidTy::Closure(
|
||||
rustc_internal::closure_def(*def_id),
|
||||
generic_args.stable(tables),
|
||||
)),
|
||||
ty::Generator(def_id, generic_args, movability) => TyKind::RigidTy(RigidTy::Generator(
|
||||
rustc_internal::generator_def(*def_id),
|
||||
generic_args.stable(tables),
|
||||
match movability {
|
||||
hir::Movability::Static => Movability::Static,
|
||||
hir::Movability::Movable => Movability::Movable,
|
||||
},
|
||||
)),
|
||||
ty::Never => TyKind::RigidTy(RigidTy::Never),
|
||||
ty::Tuple(fields) => TyKind::RigidTy(RigidTy::Tuple(
|
||||
fields.iter().map(|ty| tables.intern_ty(ty)).collect(),
|
||||
)),
|
||||
ty::Alias(alias_kind, alias_ty) => {
|
||||
TyKind::Alias(alias_kind.stable(tables), alias_ty.stable(tables))
|
||||
}
|
||||
ty::Param(_) => todo!(),
|
||||
ty::Bound(_, _) => todo!(),
|
||||
ty::Placeholder(..)
|
||||
| ty::GeneratorWitness(_)
|
||||
| ty::GeneratorWitnessMIR(_, _)
|
||||
| ty::Infer(_)
|
||||
| ty::Error(_) => {
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -12,10 +12,12 @@ impl Ty {
|
||||
|
||||
type Const = Opaque;
|
||||
pub(crate) type Region = Opaque;
|
||||
type Span = Opaque;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum TyKind {
|
||||
RigidTy(RigidTy),
|
||||
Alias(AliasKind, AliasTy),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@ -25,12 +27,18 @@ pub enum RigidTy {
|
||||
Int(IntTy),
|
||||
Uint(UintTy),
|
||||
Float(FloatTy),
|
||||
Adt(AdtDef, AdtSubsts),
|
||||
Adt(AdtDef, GenericArgs),
|
||||
Foreign(ForeignDef),
|
||||
Str,
|
||||
Array(Ty, Const),
|
||||
Slice(Ty),
|
||||
RawPtr(Ty, Mutability),
|
||||
Ref(Region, Ty, Mutability),
|
||||
FnDef(FnDef, GenericArgs),
|
||||
FnPtr(PolyFnSig),
|
||||
Closure(ClosureDef, GenericArgs),
|
||||
Generator(GeneratorDef, GenericArgs, Movability),
|
||||
Never,
|
||||
Tuple(Vec<Ty>),
|
||||
}
|
||||
|
||||
@ -60,17 +68,127 @@ pub enum FloatTy {
|
||||
F64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum Movability {
|
||||
Static,
|
||||
Movable,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct ForeignDef(pub(crate) DefId);
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct FnDef(pub(crate) DefId);
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct ClosureDef(pub(crate) DefId);
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct GeneratorDef(pub(crate) DefId);
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct ParamDef(pub(crate) DefId);
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct BrNamedDef(pub(crate) DefId);
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct AdtDef(pub(crate) DefId);
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct AliasDef(pub(crate) DefId);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AdtSubsts(pub Vec<GenericArgKind>);
|
||||
pub struct GenericArgs(pub Vec<GenericArgKind>);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum GenericArgKind {
|
||||
// FIXME add proper region
|
||||
Lifetime(Region),
|
||||
Type(Ty),
|
||||
// FIXME add proper const
|
||||
Const(Const),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum AliasKind {
|
||||
Projection,
|
||||
Inherent,
|
||||
Opaque,
|
||||
Weak,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AliasTy {
|
||||
pub def_id: AliasDef,
|
||||
pub args: GenericArgs,
|
||||
}
|
||||
|
||||
pub type PolyFnSig = Binder<FnSig>;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FnSig {
|
||||
pub inputs_and_output: Vec<Ty>,
|
||||
pub c_variadic: bool,
|
||||
pub unsafety: Unsafety,
|
||||
pub abi: Abi,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Unsafety {
|
||||
Unsafe,
|
||||
Normal,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Abi {
|
||||
Rust,
|
||||
C { unwind: bool },
|
||||
Cdecl { unwind: bool },
|
||||
Stdcall { unwind: bool },
|
||||
Fastcall { unwind: bool },
|
||||
Vectorcall { unwind: bool },
|
||||
Thiscall { unwind: bool },
|
||||
Aapcs { unwind: bool },
|
||||
Win64 { unwind: bool },
|
||||
SysV64 { unwind: bool },
|
||||
PtxKernel,
|
||||
Msp430Interrupt,
|
||||
X86Interrupt,
|
||||
AmdGpuKernel,
|
||||
EfiApi,
|
||||
AvrInterrupt,
|
||||
AvrNonBlockingInterrupt,
|
||||
CCmseNonSecureCall,
|
||||
Wasm,
|
||||
System { unwind: bool },
|
||||
RustIntrinsic,
|
||||
RustCall,
|
||||
PlatformIntrinsic,
|
||||
Unadjusted,
|
||||
RustCold,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Binder<T> {
|
||||
pub value: T,
|
||||
pub bound_vars: Vec<BoundVariableKind>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum BoundVariableKind {
|
||||
Ty(BoundTyKind),
|
||||
Region(BoundRegionKind),
|
||||
Const,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum BoundTyKind {
|
||||
Anon,
|
||||
Param(ParamDef, String),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum BoundRegionKind {
|
||||
BrAnon(Option<Span>),
|
||||
BrNamed(BrNamedDef, String),
|
||||
BrEnv,
|
||||
}
|
||||
|
@ -50,6 +50,9 @@ pub trait TyAbiInterface<'a, C>: Sized {
|
||||
this: TyAndLayout<'a, Self>,
|
||||
cx: &C,
|
||||
offset: Size,
|
||||
// If true, assume that pointers are either null or valid (according to their type),
|
||||
// enabling extra optimizations.
|
||||
assume_valid_ptr: bool,
|
||||
) -> Option<PointeeInfo>;
|
||||
fn is_adt(this: TyAndLayout<'a, Self>) -> bool;
|
||||
fn is_never(this: TyAndLayout<'a, Self>) -> bool;
|
||||
@ -76,7 +79,8 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||
where
|
||||
Ty: TyAbiInterface<'a, C>,
|
||||
{
|
||||
Ty::ty_and_layout_pointee_info_at(self, cx, offset)
|
||||
let assume_valid_ptr = true;
|
||||
Ty::ty_and_layout_pointee_info_at(self, cx, offset, assume_valid_ptr)
|
||||
}
|
||||
|
||||
pub fn is_single_fp_element<C>(self, cx: &C) -> bool
|
||||
@ -140,24 +144,3 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||
offset
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||
/// Returns `true` if the layout corresponds to an unsized type.
|
||||
pub fn is_unsized(&self) -> bool {
|
||||
self.abi.is_unsized()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_sized(&self) -> bool {
|
||||
self.abi.is_sized()
|
||||
}
|
||||
|
||||
/// Returns `true` if the type is a ZST and not unsized.
|
||||
pub fn is_zst(&self) -> bool {
|
||||
match self.abi {
|
||||
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
|
||||
Abi::Uninhabited => self.size.bytes() == 0,
|
||||
Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -10,9 +10,11 @@ use rustc_infer::traits::util::elaborate;
|
||||
use rustc_infer::traits::Reveal;
|
||||
use rustc_middle::traits::solve::inspect::CandidateKind;
|
||||
use rustc_middle::traits::solve::{CanonicalResponse, Certainty, Goal, MaybeCause, QueryResult};
|
||||
use rustc_middle::ty::fast_reject::TreatProjections;
|
||||
use rustc_middle::ty::TypeFoldable;
|
||||
use rustc_middle::ty::fast_reject::{SimplifiedType, TreatParams};
|
||||
use rustc_middle::ty::TypeVisitableExt;
|
||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||
use rustc_middle::ty::{fast_reject, TypeFoldable};
|
||||
use rustc_span::ErrorGuaranteed;
|
||||
use std::fmt::Debug;
|
||||
|
||||
pub(super) mod structural_traits;
|
||||
@ -109,10 +111,10 @@ pub(super) trait GoalKind<'tcx>:
|
||||
|
||||
fn trait_def_id(self, tcx: TyCtxt<'tcx>) -> DefId;
|
||||
|
||||
// Try equating an assumption predicate against a goal's predicate. If it
|
||||
// holds, then execute the `then` callback, which should do any additional
|
||||
// work, then produce a response (typically by executing
|
||||
// [`EvalCtxt::evaluate_added_goals_and_make_canonical_response`]).
|
||||
/// Try equating an assumption predicate against a goal's predicate. If it
|
||||
/// holds, then execute the `then` callback, which should do any additional
|
||||
/// work, then produce a response (typically by executing
|
||||
/// [`EvalCtxt::evaluate_added_goals_and_make_canonical_response`]).
|
||||
fn probe_and_match_goal_against_assumption(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
@ -120,9 +122,9 @@ pub(super) trait GoalKind<'tcx>:
|
||||
then: impl FnOnce(&mut EvalCtxt<'_, 'tcx>) -> QueryResult<'tcx>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// Consider a clause, which consists of a "assumption" and some "requirements",
|
||||
// to satisfy a goal. If the requirements hold, then attempt to satisfy our
|
||||
// goal by equating it with the assumption.
|
||||
/// Consider a clause, which consists of a "assumption" and some "requirements",
|
||||
/// to satisfy a goal. If the requirements hold, then attempt to satisfy our
|
||||
/// goal by equating it with the assumption.
|
||||
fn consider_implied_clause(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
@ -149,9 +151,9 @@ pub(super) trait GoalKind<'tcx>:
|
||||
})
|
||||
}
|
||||
|
||||
// Consider a clause specifically for a `dyn Trait` self type. This requires
|
||||
// additionally checking all of the supertraits and object bounds to hold,
|
||||
// since they're not implied by the well-formedness of the object type.
|
||||
/// Consider a clause specifically for a `dyn Trait` self type. This requires
|
||||
/// additionally checking all of the supertraits and object bounds to hold,
|
||||
/// since they're not implied by the well-formedness of the object type.
|
||||
fn consider_object_bound_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
@ -182,96 +184,113 @@ pub(super) trait GoalKind<'tcx>:
|
||||
impl_def_id: DefId,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// A type implements an `auto trait` if its components do as well. These components
|
||||
// are given by built-in rules from [`instantiate_constituent_tys_for_auto_trait`].
|
||||
/// If the predicate contained an error, we want to avoid emitting unnecessary trait
|
||||
/// errors but still want to emit errors for other trait goals. We have some special
|
||||
/// handling for this case.
|
||||
///
|
||||
/// Trait goals always hold while projection goals never do. This is a bit arbitrary
|
||||
/// but prevents incorrect normalization while hiding any trait errors.
|
||||
fn consider_error_guaranteed_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
guar: ErrorGuaranteed,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
/// A type implements an `auto trait` if its components do as well.
|
||||
///
|
||||
/// These components are given by built-in rules from
|
||||
/// [`structural_traits::instantiate_constituent_tys_for_auto_trait`].
|
||||
fn consider_auto_trait_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// A trait alias holds if the RHS traits and `where` clauses hold.
|
||||
/// A trait alias holds if the RHS traits and `where` clauses hold.
|
||||
fn consider_trait_alias_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// A type is `Copy` or `Clone` if its components are `Sized`. These components
|
||||
// are given by built-in rules from [`instantiate_constituent_tys_for_sized_trait`].
|
||||
/// A type is `Copy` or `Clone` if its components are `Sized`.
|
||||
///
|
||||
/// These components are given by built-in rules from
|
||||
/// [`structural_traits::instantiate_constituent_tys_for_sized_trait`].
|
||||
fn consider_builtin_sized_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// A type is `Copy` or `Clone` if its components are `Copy` or `Clone`. These
|
||||
// components are given by built-in rules from [`instantiate_constituent_tys_for_copy_clone_trait`].
|
||||
/// A type is `Copy` or `Clone` if its components are `Copy` or `Clone`.
|
||||
///
|
||||
/// These components are given by built-in rules from
|
||||
/// [`structural_traits::instantiate_constituent_tys_for_copy_clone_trait`].
|
||||
fn consider_builtin_copy_clone_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// A type is `PointerLike` if we can compute its layout, and that layout
|
||||
// matches the layout of `usize`.
|
||||
/// A type is `PointerLike` if we can compute its layout, and that layout
|
||||
/// matches the layout of `usize`.
|
||||
fn consider_builtin_pointer_like_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// A type is a `FnPtr` if it is of `FnPtr` type.
|
||||
/// A type is a `FnPtr` if it is of `FnPtr` type.
|
||||
fn consider_builtin_fn_ptr_trait_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// A callable type (a closure, fn def, or fn ptr) is known to implement the `Fn<A>`
|
||||
// family of traits where `A` is given by the signature of the type.
|
||||
/// A callable type (a closure, fn def, or fn ptr) is known to implement the `Fn<A>`
|
||||
/// family of traits where `A` is given by the signature of the type.
|
||||
fn consider_builtin_fn_trait_candidates(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
kind: ty::ClosureKind,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// `Tuple` is implemented if the `Self` type is a tuple.
|
||||
/// `Tuple` is implemented if the `Self` type is a tuple.
|
||||
fn consider_builtin_tuple_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// `Pointee` is always implemented.
|
||||
//
|
||||
// See the projection implementation for the `Metadata` types for all of
|
||||
// the built-in types. For structs, the metadata type is given by the struct
|
||||
// tail.
|
||||
/// `Pointee` is always implemented.
|
||||
///
|
||||
/// See the projection implementation for the `Metadata` types for all of
|
||||
/// the built-in types. For structs, the metadata type is given by the struct
|
||||
/// tail.
|
||||
fn consider_builtin_pointee_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// A generator (that comes from an `async` desugaring) is known to implement
|
||||
// `Future<Output = O>`, where `O` is given by the generator's return type
|
||||
// that was computed during type-checking.
|
||||
/// A generator (that comes from an `async` desugaring) is known to implement
|
||||
/// `Future<Output = O>`, where `O` is given by the generator's return type
|
||||
/// that was computed during type-checking.
|
||||
fn consider_builtin_future_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// A generator (that doesn't come from an `async` desugaring) is known to
|
||||
// implement `Generator<R, Yield = Y, Return = O>`, given the resume, yield,
|
||||
// and return types of the generator computed during type-checking.
|
||||
/// A generator (that doesn't come from an `async` desugaring) is known to
|
||||
/// implement `Generator<R, Yield = Y, Return = O>`, given the resume, yield,
|
||||
/// and return types of the generator computed during type-checking.
|
||||
fn consider_builtin_generator_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// The most common forms of unsizing are array to slice, and concrete (Sized)
|
||||
// type into a `dyn Trait`. ADTs and Tuples can also have their final field
|
||||
// unsized if it's generic.
|
||||
/// The most common forms of unsizing are array to slice, and concrete (Sized)
|
||||
/// type into a `dyn Trait`. ADTs and Tuples can also have their final field
|
||||
/// unsized if it's generic.
|
||||
fn consider_builtin_unsize_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
) -> QueryResult<'tcx>;
|
||||
|
||||
// `dyn Trait1` can be unsized to `dyn Trait2` if they are the same trait, or
|
||||
// if `Trait2` is a (transitive) supertrait of `Trait2`.
|
||||
/// `dyn Trait1` can be unsized to `dyn Trait2` if they are the same trait, or
|
||||
/// if `Trait2` is a (transitive) supertrait of `Trait2`.
|
||||
fn consider_builtin_dyn_upcast_candidates(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
@ -299,35 +318,66 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
goal: Goal<'tcx, G>,
|
||||
) -> Vec<Candidate<'tcx>> {
|
||||
debug_assert_eq!(goal, self.resolve_vars_if_possible(goal));
|
||||
if let Some(ambig) = self.assemble_self_ty_infer_ambiguity_response(goal) {
|
||||
return ambig;
|
||||
}
|
||||
|
||||
// HACK: `_: Trait` is ambiguous, because it may be satisfied via a builtin rule,
|
||||
// object bound, alias bound, etc. We are unable to determine this until we can at
|
||||
// least structurally resolve the type one layer.
|
||||
if goal.predicate.self_ty().is_ty_var() {
|
||||
return vec![Candidate {
|
||||
let mut candidates = self.assemble_candidates_via_self_ty(goal);
|
||||
|
||||
self.assemble_blanket_impl_candidates(goal, &mut candidates);
|
||||
|
||||
self.assemble_param_env_candidates(goal, &mut candidates);
|
||||
|
||||
candidates
|
||||
}
|
||||
|
||||
/// `?0: Trait` is ambiguous, because it may be satisfied via a builtin rule,
|
||||
/// object bound, alias bound, etc. We are unable to determine this until we can at
|
||||
/// least structurally resolve the type one layer.
|
||||
///
|
||||
/// It would also require us to consider all impls of the trait, which is both pretty
|
||||
/// bad for perf and would also constrain the self type if there is just a single impl.
|
||||
fn assemble_self_ty_infer_ambiguity_response<G: GoalKind<'tcx>>(
|
||||
&mut self,
|
||||
goal: Goal<'tcx, G>,
|
||||
) -> Option<Vec<Candidate<'tcx>>> {
|
||||
goal.predicate.self_ty().is_ty_var().then(|| {
|
||||
vec![Candidate {
|
||||
source: CandidateSource::BuiltinImpl(BuiltinImplSource::Ambiguity),
|
||||
result: self
|
||||
.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
|
||||
.unwrap(),
|
||||
}];
|
||||
}]
|
||||
})
|
||||
}
|
||||
|
||||
/// Assemble candidates which apply to the self type. This only looks at candidate which
|
||||
/// apply to the specific self type and ignores all others.
|
||||
///
|
||||
/// Returns `None` if the self type is still ambiguous.
|
||||
fn assemble_candidates_via_self_ty<G: GoalKind<'tcx>>(
|
||||
&mut self,
|
||||
goal: Goal<'tcx, G>,
|
||||
) -> Vec<Candidate<'tcx>> {
|
||||
debug_assert_eq!(goal, self.resolve_vars_if_possible(goal));
|
||||
if let Some(ambig) = self.assemble_self_ty_infer_ambiguity_response(goal) {
|
||||
return ambig;
|
||||
}
|
||||
|
||||
let mut candidates = Vec::new();
|
||||
|
||||
self.assemble_candidates_after_normalizing_self_ty(goal, &mut candidates);
|
||||
|
||||
self.assemble_impl_candidates(goal, &mut candidates);
|
||||
self.assemble_non_blanket_impl_candidates(goal, &mut candidates);
|
||||
|
||||
self.assemble_builtin_impl_candidates(goal, &mut candidates);
|
||||
|
||||
self.assemble_param_env_candidates(goal, &mut candidates);
|
||||
|
||||
self.assemble_alias_bound_candidates(goal, &mut candidates);
|
||||
|
||||
self.assemble_object_bound_candidates(goal, &mut candidates);
|
||||
|
||||
self.assemble_coherence_unknowable_candidates(goal, &mut candidates);
|
||||
|
||||
self.assemble_candidates_after_normalizing_self_ty(goal, &mut candidates);
|
||||
|
||||
candidates
|
||||
}
|
||||
|
||||
@ -385,7 +435,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
// have a `Normalized` candidate. This doesn't work as long as we
|
||||
// use `CandidateSource` in winnowing.
|
||||
let goal = goal.with(tcx, goal.predicate.with_self_ty(tcx, normalized_ty));
|
||||
Ok(ecx.assemble_and_evaluate_candidates(goal))
|
||||
Ok(ecx.assemble_candidates_via_self_ty(goal))
|
||||
},
|
||||
)
|
||||
});
|
||||
@ -396,22 +446,125 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
fn assemble_impl_candidates<G: GoalKind<'tcx>>(
|
||||
fn assemble_non_blanket_impl_candidates<G: GoalKind<'tcx>>(
|
||||
&mut self,
|
||||
goal: Goal<'tcx, G>,
|
||||
candidates: &mut Vec<Candidate<'tcx>>,
|
||||
) {
|
||||
let tcx = self.tcx();
|
||||
tcx.for_each_relevant_impl_treating_projections(
|
||||
goal.predicate.trait_def_id(tcx),
|
||||
goal.predicate.self_ty(),
|
||||
TreatProjections::NextSolverLookup,
|
||||
|impl_def_id| match G::consider_impl_candidate(self, goal, impl_def_id) {
|
||||
let self_ty = goal.predicate.self_ty();
|
||||
let trait_impls = tcx.trait_impls_of(goal.predicate.trait_def_id(tcx));
|
||||
let mut consider_impls_for_simplified_type = |simp| {
|
||||
if let Some(impls_for_type) = trait_impls.non_blanket_impls().get(&simp) {
|
||||
for &impl_def_id in impls_for_type {
|
||||
match G::consider_impl_candidate(self, goal, impl_def_id) {
|
||||
Ok(result) => candidates
|
||||
.push(Candidate { source: CandidateSource::Impl(impl_def_id), result }),
|
||||
Err(NoSolution) => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match self_ty.kind() {
|
||||
ty::Bool
|
||||
| ty::Char
|
||||
| ty::Int(_)
|
||||
| ty::Uint(_)
|
||||
| ty::Float(_)
|
||||
| ty::Adt(_, _)
|
||||
| ty::Foreign(_)
|
||||
| ty::Str
|
||||
| ty::Array(_, _)
|
||||
| ty::Slice(_)
|
||||
| ty::RawPtr(_)
|
||||
| ty::Ref(_, _, _)
|
||||
| ty::FnDef(_, _)
|
||||
| ty::FnPtr(_)
|
||||
| ty::Dynamic(_, _, _)
|
||||
| ty::Closure(_, _)
|
||||
| ty::Generator(_, _, _)
|
||||
| ty::Never
|
||||
| ty::Tuple(_) => {
|
||||
let simp =
|
||||
fast_reject::simplify_type(tcx, self_ty, TreatParams::ForLookup).unwrap();
|
||||
consider_impls_for_simplified_type(simp);
|
||||
}
|
||||
|
||||
// HACK: For integer and float variables we have to manually look at all impls
|
||||
// which have some integer or float as a self type.
|
||||
ty::Infer(ty::IntVar(_)) => {
|
||||
use ty::IntTy::*;
|
||||
use ty::UintTy::*;
|
||||
// This causes a compiler error if any new integer kinds are added.
|
||||
let (I8 | I16 | I32 | I64 | I128 | Isize): ty::IntTy;
|
||||
let (U8 | U16 | U32 | U64 | U128 | Usize): ty::UintTy;
|
||||
let possible_integers = [
|
||||
// signed integers
|
||||
SimplifiedType::Int(I8),
|
||||
SimplifiedType::Int(I16),
|
||||
SimplifiedType::Int(I32),
|
||||
SimplifiedType::Int(I64),
|
||||
SimplifiedType::Int(I128),
|
||||
SimplifiedType::Int(Isize),
|
||||
// unsigned integers
|
||||
SimplifiedType::Uint(U8),
|
||||
SimplifiedType::Uint(U16),
|
||||
SimplifiedType::Uint(U32),
|
||||
SimplifiedType::Uint(U64),
|
||||
SimplifiedType::Uint(U128),
|
||||
SimplifiedType::Uint(Usize),
|
||||
];
|
||||
for simp in possible_integers {
|
||||
consider_impls_for_simplified_type(simp);
|
||||
}
|
||||
}
|
||||
|
||||
ty::Infer(ty::FloatVar(_)) => {
|
||||
// This causes a compiler error if any new float kinds are added.
|
||||
let (ty::FloatTy::F32 | ty::FloatTy::F64);
|
||||
let possible_floats = [
|
||||
SimplifiedType::Float(ty::FloatTy::F32),
|
||||
SimplifiedType::Float(ty::FloatTy::F64),
|
||||
];
|
||||
|
||||
for simp in possible_floats {
|
||||
consider_impls_for_simplified_type(simp);
|
||||
}
|
||||
}
|
||||
|
||||
// The only traits applying to aliases and placeholders are blanket impls.
|
||||
//
|
||||
// Impls which apply to an alias after normalization are handled by
|
||||
// `assemble_candidates_after_normalizing_self_ty`.
|
||||
ty::Alias(_, _) | ty::Placeholder(..) | ty::Error(_) => (),
|
||||
|
||||
// FIXME: These should ideally not exist as a self type. It would be nice for
|
||||
// the builtin auto trait impls of generators should instead directly recurse
|
||||
// into the witness.
|
||||
ty::GeneratorWitness(_) | ty::GeneratorWitnessMIR(_, _) => (),
|
||||
|
||||
// These variants should not exist as a self type.
|
||||
ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_))
|
||||
| ty::Param(_)
|
||||
| ty::Bound(_, _) => bug!("unexpected self type: {self_ty}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn assemble_blanket_impl_candidates<G: GoalKind<'tcx>>(
|
||||
&mut self,
|
||||
goal: Goal<'tcx, G>,
|
||||
candidates: &mut Vec<Candidate<'tcx>>,
|
||||
) {
|
||||
let tcx = self.tcx();
|
||||
let trait_impls = tcx.trait_impls_of(goal.predicate.trait_def_id(tcx));
|
||||
for &impl_def_id in trait_impls.blanket_impls() {
|
||||
match G::consider_impl_candidate(self, goal, impl_def_id) {
|
||||
Ok(result) => candidates
|
||||
.push(Candidate { source: CandidateSource::Impl(impl_def_id), result }),
|
||||
Err(NoSolution) => (),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
@ -420,8 +573,9 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
goal: Goal<'tcx, G>,
|
||||
candidates: &mut Vec<Candidate<'tcx>>,
|
||||
) {
|
||||
let lang_items = self.tcx().lang_items();
|
||||
let trait_def_id = goal.predicate.trait_def_id(self.tcx());
|
||||
let tcx = self.tcx();
|
||||
let lang_items = tcx.lang_items();
|
||||
let trait_def_id = goal.predicate.trait_def_id(tcx);
|
||||
|
||||
// N.B. When assembling built-in candidates for lang items that are also
|
||||
// `auto` traits, then the auto trait candidate that is assembled in
|
||||
@ -430,9 +584,11 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
// Instead of adding the logic here, it's a better idea to add it in
|
||||
// `EvalCtxt::disqualify_auto_trait_candidate_due_to_possible_impl` in
|
||||
// `solve::trait_goals` instead.
|
||||
let result = if self.tcx().trait_is_auto(trait_def_id) {
|
||||
let result = if let Err(guar) = goal.predicate.error_reported() {
|
||||
G::consider_error_guaranteed_candidate(self, guar)
|
||||
} else if tcx.trait_is_auto(trait_def_id) {
|
||||
G::consider_auto_trait_candidate(self, goal)
|
||||
} else if self.tcx().trait_is_alias(trait_def_id) {
|
||||
} else if tcx.trait_is_alias(trait_def_id) {
|
||||
G::consider_trait_alias_candidate(self, goal)
|
||||
} else if lang_items.sized_trait() == Some(trait_def_id) {
|
||||
G::consider_builtin_sized_candidate(self, goal)
|
||||
|
@ -2,7 +2,6 @@ use crate::traits::specialization_graph;
|
||||
|
||||
use super::assembly::{self, structural_traits};
|
||||
use super::EvalCtxt;
|
||||
use rustc_errors::ErrorGuaranteed;
|
||||
use rustc_hir::def::DefKind;
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_hir::LangItem;
|
||||
@ -15,7 +14,7 @@ use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams};
|
||||
use rustc_middle::ty::ProjectionPredicate;
|
||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||
use rustc_middle::ty::{ToPredicate, TypeVisitableExt};
|
||||
use rustc_span::{sym, DUMMY_SP};
|
||||
use rustc_span::{sym, ErrorGuaranteed, DUMMY_SP};
|
||||
|
||||
impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
#[instrument(level = "debug", skip(self), ret)]
|
||||
@ -246,6 +245,15 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Fail to normalize if the predicate contains an error, alternatively, we could normalize to `ty::Error`
|
||||
/// and succeed. Can experiment with this to figure out what results in better error messages.
|
||||
fn consider_error_guaranteed_candidate(
|
||||
_ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
_guar: ErrorGuaranteed,
|
||||
) -> QueryResult<'tcx> {
|
||||
Err(NoSolution)
|
||||
}
|
||||
|
||||
fn consider_auto_trait_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
|
@ -9,7 +9,9 @@ use cache::ProvisionalCache;
|
||||
use overflow::OverflowData;
|
||||
use rustc_index::IndexVec;
|
||||
use rustc_middle::dep_graph::DepKind;
|
||||
use rustc_middle::traits::solve::{CanonicalInput, Certainty, MaybeCause, QueryResult};
|
||||
use rustc_middle::traits::solve::{
|
||||
CanonicalInput, Certainty, EvaluationCache, MaybeCause, QueryResult,
|
||||
};
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use std::{collections::hash_map::Entry, mem};
|
||||
|
||||
@ -58,10 +60,10 @@ impl<'tcx> SearchGraph<'tcx> {
|
||||
///
|
||||
/// We could add another global cache for coherence instead,
|
||||
/// but that's effort so let's only do it if necessary.
|
||||
pub(super) fn should_use_global_cache(&self) -> bool {
|
||||
pub(super) fn global_cache(&self, tcx: TyCtxt<'tcx>) -> &'tcx EvaluationCache<'tcx> {
|
||||
match self.mode {
|
||||
SolverMode::Normal => true,
|
||||
SolverMode::Coherence => false,
|
||||
SolverMode::Normal => &tcx.new_solver_evaluation_cache,
|
||||
SolverMode::Coherence => &tcx.new_solver_coherence_evaluation_cache,
|
||||
}
|
||||
}
|
||||
|
||||
@ -213,8 +215,8 @@ impl<'tcx> SearchGraph<'tcx> {
|
||||
inspect: &mut ProofTreeBuilder<'tcx>,
|
||||
mut loop_body: impl FnMut(&mut Self, &mut ProofTreeBuilder<'tcx>) -> QueryResult<'tcx>,
|
||||
) -> QueryResult<'tcx> {
|
||||
if self.should_use_global_cache() && inspect.use_global_cache() {
|
||||
if let Some(result) = tcx.new_solver_evaluation_cache.get(&canonical_input, tcx) {
|
||||
if inspect.use_global_cache() {
|
||||
if let Some(result) = self.global_cache(tcx).get(&canonical_input, tcx) {
|
||||
debug!(?canonical_input, ?result, "cache hit");
|
||||
inspect.cache_hit(CacheHit::Global);
|
||||
return result;
|
||||
@ -278,13 +280,10 @@ impl<'tcx> SearchGraph<'tcx> {
|
||||
// dependencies, our non-root goal may no longer appear as child of the root goal.
|
||||
//
|
||||
// See https://github.com/rust-lang/rust/pull/108071 for some additional context.
|
||||
let can_cache = !self.overflow_data.did_overflow() || self.stack.is_empty();
|
||||
if self.should_use_global_cache() && can_cache {
|
||||
tcx.new_solver_evaluation_cache.insert(
|
||||
current_goal.input,
|
||||
dep_node,
|
||||
current_goal.response,
|
||||
);
|
||||
let can_cache = inspect.use_global_cache()
|
||||
&& (!self.overflow_data.did_overflow() || self.stack.is_empty());
|
||||
if can_cache {
|
||||
self.global_cache(tcx).insert(current_goal.input, dep_node, current_goal.response)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ use rustc_middle::traits::Reveal;
|
||||
use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams, TreatProjections};
|
||||
use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt};
|
||||
use rustc_middle::ty::{TraitPredicate, TypeVisitableExt};
|
||||
use rustc_span::DUMMY_SP;
|
||||
use rustc_span::{ErrorGuaranteed, DUMMY_SP};
|
||||
|
||||
impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
|
||||
fn self_ty(self) -> Ty<'tcx> {
|
||||
@ -78,6 +78,13 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
|
||||
})
|
||||
}
|
||||
|
||||
fn consider_error_guaranteed_candidate(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
_guar: ErrorGuaranteed,
|
||||
) -> QueryResult<'tcx> {
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
|
||||
fn probe_and_match_goal_against_assumption(
|
||||
ecx: &mut EvalCtxt<'_, 'tcx>,
|
||||
goal: Goal<'tcx, Self>,
|
||||
@ -216,9 +223,20 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
|
||||
return ecx.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS);
|
||||
}
|
||||
|
||||
if let Ok(layout) = tcx.layout_of(key)
|
||||
&& layout.layout.is_pointer_like(&tcx.data_layout)
|
||||
{
|
||||
// First, try computing an exact naive layout in case the type is generic.
|
||||
let is_pointer_like = if let Ok(layout) = tcx.naive_layout_of(key) {
|
||||
layout.is_pointer_like(&tcx.data_layout).unwrap_or_else(|| {
|
||||
// Second, we fall back to full layout computation.
|
||||
tcx.layout_of(key)
|
||||
.ok()
|
||||
.filter(|l| l.layout.is_pointer_like(&tcx.data_layout))
|
||||
.is_some()
|
||||
})
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if is_pointer_like {
|
||||
// FIXME: We could make this faster by making a no-constraints response
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
} else {
|
||||
@ -686,7 +704,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
| ty::Tuple(_)
|
||||
| ty::Adt(_, _)
|
||||
// FIXME: Handling opaques here is kinda sus. Especially because we
|
||||
// simplify them to PlaceholderSimplifiedType.
|
||||
// simplify them to SimplifiedType::Placeholder.
|
||||
| ty::Alias(ty::Opaque, _) => {
|
||||
let mut disqualifying_impl = None;
|
||||
self.tcx().for_each_relevant_impl_treating_projections(
|
||||
|
@ -979,9 +979,20 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(layout) = tcx.layout_of(key)
|
||||
&& layout.layout.is_pointer_like(&tcx.data_layout)
|
||||
{
|
||||
// First, try computing an exact naive layout in case the type is generic.
|
||||
let is_pointer_like = if let Ok(layout) = tcx.naive_layout_of(key) {
|
||||
layout.is_pointer_like(&tcx.data_layout).unwrap_or_else(|| {
|
||||
// Second, we fall back to full layout computation.
|
||||
tcx.layout_of(key)
|
||||
.ok()
|
||||
.filter(|l| l.layout.is_pointer_like(&tcx.data_layout))
|
||||
.is_some()
|
||||
})
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if is_pointer_like {
|
||||
candidates.vec.push(BuiltinCandidate { has_nested: false });
|
||||
}
|
||||
}
|
||||
|
@ -24,8 +24,18 @@ pub enum VtblSegment<'tcx> {
|
||||
pub fn prepare_vtable_segments<'tcx, T>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
trait_ref: ty::PolyTraitRef<'tcx>,
|
||||
mut segment_visitor: impl FnMut(VtblSegment<'tcx>) -> ControlFlow<T>,
|
||||
segment_visitor: impl FnMut(VtblSegment<'tcx>) -> ControlFlow<T>,
|
||||
) -> Option<T> {
|
||||
prepare_vtable_segments_inner(tcx, trait_ref, segment_visitor).break_value()
|
||||
}
|
||||
|
||||
/// Helper for [`prepare_vtable_segments`] that returns `ControlFlow`,
|
||||
/// such that we can use `?` in the body.
|
||||
fn prepare_vtable_segments_inner<'tcx, T>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
trait_ref: ty::PolyTraitRef<'tcx>,
|
||||
mut segment_visitor: impl FnMut(VtblSegment<'tcx>) -> ControlFlow<T>,
|
||||
) -> ControlFlow<T> {
|
||||
// The following constraints holds for the final arrangement.
|
||||
// 1. The whole virtual table of the first direct super trait is included as the
|
||||
// the prefix. If this trait doesn't have any super traits, then this step
|
||||
@ -71,20 +81,18 @@ pub fn prepare_vtable_segments<'tcx, T>(
|
||||
// N, N-vptr, O
|
||||
|
||||
// emit dsa segment first.
|
||||
if let ControlFlow::Break(v) = (segment_visitor)(VtblSegment::MetadataDSA) {
|
||||
return Some(v);
|
||||
}
|
||||
segment_visitor(VtblSegment::MetadataDSA)?;
|
||||
|
||||
let mut emit_vptr_on_new_entry = false;
|
||||
let mut visited = PredicateSet::new(tcx);
|
||||
let predicate = trait_ref.without_const().to_predicate(tcx);
|
||||
let mut stack: SmallVec<[(ty::PolyTraitRef<'tcx>, _, _); 5]> =
|
||||
smallvec![(trait_ref, emit_vptr_on_new_entry, None)];
|
||||
smallvec![(trait_ref, emit_vptr_on_new_entry, maybe_iter(None))];
|
||||
visited.insert(predicate);
|
||||
|
||||
// the main traversal loop:
|
||||
// basically we want to cut the inheritance directed graph into a few non-overlapping slices of nodes
|
||||
// that each node is emitted after all its descendents have been emitted.
|
||||
// such that each node is emitted after all its descendants have been emitted.
|
||||
// so we convert the directed graph into a tree by skipping all previously visited nodes using a visited set.
|
||||
// this is done on the fly.
|
||||
// Each loop run emits a slice - it starts by find a "childless" unvisited node, backtracking upwards, and it
|
||||
@ -105,80 +113,81 @@ pub fn prepare_vtable_segments<'tcx, T>(
|
||||
// Loop run #1: Emitting the slice [D C] (in reverse order). No one has a next-sibling node.
|
||||
// Loop run #1: Stack after exiting out is []. Now the function exits.
|
||||
|
||||
loop {
|
||||
'outer: loop {
|
||||
// dive deeper into the stack, recording the path
|
||||
'diving_in: loop {
|
||||
if let Some((inner_most_trait_ref, _, _)) = stack.last() {
|
||||
let inner_most_trait_ref = *inner_most_trait_ref;
|
||||
let mut direct_super_traits_iter = tcx
|
||||
.super_predicates_of(inner_most_trait_ref.def_id())
|
||||
.predicates
|
||||
.into_iter()
|
||||
.filter_map(move |(pred, _)| {
|
||||
pred.subst_supertrait(tcx, &inner_most_trait_ref).as_trait_clause()
|
||||
});
|
||||
let &(inner_most_trait_ref, _, _) = stack.last().unwrap();
|
||||
|
||||
'diving_in_skip_visited_traits: loop {
|
||||
if let Some(next_super_trait) = direct_super_traits_iter.next() {
|
||||
if visited.insert(next_super_trait.to_predicate(tcx)) {
|
||||
// We're throwing away potential constness of super traits here.
|
||||
// FIXME: handle ~const super traits
|
||||
let next_super_trait = next_super_trait.map_bound(|t| t.trait_ref);
|
||||
stack.push((
|
||||
next_super_trait,
|
||||
emit_vptr_on_new_entry,
|
||||
Some(direct_super_traits_iter),
|
||||
));
|
||||
break 'diving_in_skip_visited_traits;
|
||||
} else {
|
||||
continue 'diving_in_skip_visited_traits;
|
||||
}
|
||||
} else {
|
||||
break 'diving_in;
|
||||
}
|
||||
let mut direct_super_traits_iter = tcx
|
||||
.super_predicates_of(inner_most_trait_ref.def_id())
|
||||
.predicates
|
||||
.into_iter()
|
||||
.filter_map(move |(pred, _)| {
|
||||
pred.subst_supertrait(tcx, &inner_most_trait_ref).as_trait_clause()
|
||||
});
|
||||
|
||||
// Find an unvisited supertrait
|
||||
match direct_super_traits_iter
|
||||
.find(|&super_trait| visited.insert(super_trait.to_predicate(tcx)))
|
||||
{
|
||||
// Push it to the stack for the next iteration of 'diving_in to pick up
|
||||
Some(unvisited_super_trait) => {
|
||||
// We're throwing away potential constness of super traits here.
|
||||
// FIXME: handle ~const super traits
|
||||
let next_super_trait = unvisited_super_trait.map_bound(|t| t.trait_ref);
|
||||
stack.push((
|
||||
next_super_trait,
|
||||
emit_vptr_on_new_entry,
|
||||
maybe_iter(Some(direct_super_traits_iter)),
|
||||
))
|
||||
}
|
||||
|
||||
// There are no more unvisited direct super traits, dive-in finished
|
||||
None => break 'diving_in,
|
||||
}
|
||||
}
|
||||
|
||||
// Other than the left-most path, vptr should be emitted for each trait.
|
||||
emit_vptr_on_new_entry = true;
|
||||
|
||||
// emit innermost item, move to next sibling and stop there if possible, otherwise jump to outer level.
|
||||
'exiting_out: loop {
|
||||
if let Some((inner_most_trait_ref, emit_vptr, siblings_opt)) = stack.last_mut() {
|
||||
if let ControlFlow::Break(v) = (segment_visitor)(VtblSegment::TraitOwnEntries {
|
||||
trait_ref: *inner_most_trait_ref,
|
||||
emit_vptr: *emit_vptr,
|
||||
}) {
|
||||
return Some(v);
|
||||
}
|
||||
while let Some((inner_most_trait_ref, emit_vptr, mut siblings)) = stack.pop() {
|
||||
segment_visitor(VtblSegment::TraitOwnEntries {
|
||||
trait_ref: inner_most_trait_ref,
|
||||
emit_vptr,
|
||||
})?;
|
||||
|
||||
'exiting_out_skip_visited_traits: loop {
|
||||
if let Some(siblings) = siblings_opt {
|
||||
if let Some(next_inner_most_trait_ref) = siblings.next() {
|
||||
if visited.insert(next_inner_most_trait_ref.to_predicate(tcx)) {
|
||||
// We're throwing away potential constness of super traits here.
|
||||
// FIXME: handle ~const super traits
|
||||
let next_inner_most_trait_ref =
|
||||
next_inner_most_trait_ref.map_bound(|t| t.trait_ref);
|
||||
*inner_most_trait_ref = next_inner_most_trait_ref;
|
||||
*emit_vptr = emit_vptr_on_new_entry;
|
||||
break 'exiting_out;
|
||||
} else {
|
||||
continue 'exiting_out_skip_visited_traits;
|
||||
}
|
||||
}
|
||||
}
|
||||
stack.pop();
|
||||
continue 'exiting_out;
|
||||
}
|
||||
// If we've emitted (fed to `segment_visitor`) a trait that has methods present in the vtable,
|
||||
// we'll need to emit vptrs from now on.
|
||||
if !emit_vptr_on_new_entry
|
||||
&& has_own_existential_vtable_entries(tcx, inner_most_trait_ref.def_id())
|
||||
{
|
||||
emit_vptr_on_new_entry = true;
|
||||
}
|
||||
|
||||
if let Some(next_inner_most_trait_ref) =
|
||||
siblings.find(|&sibling| visited.insert(sibling.to_predicate(tcx)))
|
||||
{
|
||||
// We're throwing away potential constness of super traits here.
|
||||
// FIXME: handle ~const super traits
|
||||
let next_inner_most_trait_ref =
|
||||
next_inner_most_trait_ref.map_bound(|t| t.trait_ref);
|
||||
|
||||
stack.push((next_inner_most_trait_ref, emit_vptr_on_new_entry, siblings));
|
||||
|
||||
// just pushed a new trait onto the stack, so we need to go through its super traits
|
||||
continue 'outer;
|
||||
}
|
||||
// all done
|
||||
return None;
|
||||
}
|
||||
|
||||
// the stack is empty, all done
|
||||
return ControlFlow::Continue(());
|
||||
}
|
||||
}
|
||||
|
||||
/// Turns option of iterator into an iterator (this is just flatten)
|
||||
fn maybe_iter<I: Iterator>(i: Option<I>) -> impl Iterator<Item = I::Item> {
|
||||
// Flatten is bad perf-vise, we could probably implement a special case here that is better
|
||||
i.into_iter().flatten()
|
||||
}
|
||||
|
||||
fn dump_vtable_entries<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
sp: Span,
|
||||
@ -192,11 +201,23 @@ fn dump_vtable_entries<'tcx>(
|
||||
});
|
||||
}
|
||||
|
||||
fn has_own_existential_vtable_entries(tcx: TyCtxt<'_>, trait_def_id: DefId) -> bool {
|
||||
own_existential_vtable_entries_iter(tcx, trait_def_id).next().is_some()
|
||||
}
|
||||
|
||||
fn own_existential_vtable_entries(tcx: TyCtxt<'_>, trait_def_id: DefId) -> &[DefId] {
|
||||
tcx.arena.alloc_from_iter(own_existential_vtable_entries_iter(tcx, trait_def_id))
|
||||
}
|
||||
|
||||
fn own_existential_vtable_entries_iter(
|
||||
tcx: TyCtxt<'_>,
|
||||
trait_def_id: DefId,
|
||||
) -> impl Iterator<Item = DefId> + '_ {
|
||||
let trait_methods = tcx
|
||||
.associated_items(trait_def_id)
|
||||
.in_definition_order()
|
||||
.filter(|item| item.kind == ty::AssocKind::Fn);
|
||||
|
||||
// Now list each method's DefId (for within its trait).
|
||||
let own_entries = trait_methods.filter_map(move |&trait_method| {
|
||||
debug!("own_existential_vtable_entry: trait_method={:?}", trait_method);
|
||||
@ -211,7 +232,7 @@ fn own_existential_vtable_entries(tcx: TyCtxt<'_>, trait_def_id: DefId) -> &[Def
|
||||
Some(def_id)
|
||||
});
|
||||
|
||||
tcx.arena.alloc_from_iter(own_entries.into_iter())
|
||||
own_entries
|
||||
}
|
||||
|
||||
/// Given a trait `trait_ref`, iterates the vtable entries
|
||||
|
@ -3,7 +3,7 @@ use rustc_hir as hir;
|
||||
use rustc_index::bit_set::BitSet;
|
||||
use rustc_index::{IndexSlice, IndexVec};
|
||||
use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
|
||||
use rustc_middle::query::Providers;
|
||||
use rustc_middle::query::{LocalCrate, Providers};
|
||||
use rustc_middle::ty::layout::{
|
||||
IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
|
||||
};
|
||||
@ -24,32 +24,28 @@ use crate::errors::{
|
||||
use crate::layout_sanity_check::sanity_check_layout;
|
||||
|
||||
pub fn provide(providers: &mut Providers) {
|
||||
*providers = Providers { layout_of, ..*providers };
|
||||
*providers = Providers { layout_of, reference_niches_policy, ..*providers };
|
||||
}
|
||||
|
||||
#[instrument(skip(tcx), level = "debug")]
|
||||
fn reference_niches_policy<'tcx>(tcx: TyCtxt<'tcx>, _: LocalCrate) -> ReferenceNichePolicy {
|
||||
tcx.sess.opts.unstable_opts.reference_niches.unwrap_or(DEFAULT_REF_NICHES)
|
||||
}
|
||||
|
||||
/// The reference niche policy for builtin types, and for types in
|
||||
/// crates not specifying `-Z reference-niches`.
|
||||
const DEFAULT_REF_NICHES: ReferenceNichePolicy = ReferenceNichePolicy { size: false, align: false };
|
||||
|
||||
#[instrument(skip(tcx, query), level = "debug")]
|
||||
fn layout_of<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
|
||||
) -> Result<TyAndLayout<'tcx>, &'tcx LayoutError<'tcx>> {
|
||||
let (param_env, ty) = query.into_parts();
|
||||
debug!(?ty);
|
||||
|
||||
let (param_env, unnormalized_ty) = query.into_parts();
|
||||
let param_env = param_env.with_reveal_all_normalized(tcx);
|
||||
let unnormalized_ty = ty;
|
||||
|
||||
// FIXME: We might want to have two different versions of `layout_of`:
|
||||
// One that can be called after typecheck has completed and can use
|
||||
// `normalize_erasing_regions` here and another one that can be called
|
||||
// before typecheck has completed and uses `try_normalize_erasing_regions`.
|
||||
let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
|
||||
Ok(t) => t,
|
||||
Err(normalization_error) => {
|
||||
return Err(tcx
|
||||
.arena
|
||||
.alloc(LayoutError::NormalizationFailure(ty, normalization_error)));
|
||||
}
|
||||
};
|
||||
// `naive_layout_of` takes care of normalizing the type.
|
||||
let naive = tcx.naive_layout_of(query)?;
|
||||
let ty = naive.ty;
|
||||
|
||||
if ty != unnormalized_ty {
|
||||
// Ensure this layout is also cached for the normalized type.
|
||||
@ -57,13 +53,11 @@ fn layout_of<'tcx>(
|
||||
}
|
||||
|
||||
let cx = LayoutCx { tcx, param_env };
|
||||
|
||||
let layout = layout_of_uncached(&cx, ty)?;
|
||||
|
||||
let layout = TyAndLayout { ty, layout };
|
||||
|
||||
record_layout_for_printing(&cx, layout);
|
||||
|
||||
sanity_check_layout(&cx, &layout);
|
||||
sanity_check_layout(&cx, &layout, &naive);
|
||||
|
||||
Ok(layout)
|
||||
}
|
||||
@ -83,12 +77,10 @@ fn univariant_uninterned<'tcx>(
|
||||
kind: StructKind,
|
||||
) -> Result<LayoutS, &'tcx LayoutError<'tcx>> {
|
||||
let dl = cx.data_layout();
|
||||
let pack = repr.pack;
|
||||
if pack.is_some() && repr.align.is_some() {
|
||||
cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
|
||||
return Err(cx.tcx.arena.alloc(LayoutError::Unknown(ty)));
|
||||
}
|
||||
|
||||
assert!(
|
||||
!(repr.pack.is_some() && repr.align.is_some()),
|
||||
"already rejected by `naive_layout_of`"
|
||||
);
|
||||
cx.univariant(dl, fields, repr, kind).ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))
|
||||
}
|
||||
|
||||
@ -146,75 +138,35 @@ fn layout_of_uncached<'tcx>(
|
||||
ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
|
||||
let mut data_ptr = scalar_unit(Pointer(AddressSpace::DATA));
|
||||
if !ty.is_unsafe_ptr() {
|
||||
data_ptr.valid_range_mut().start = 1;
|
||||
}
|
||||
// Calling `layout_of` here would cause a query cycle for recursive types;
|
||||
// so use a conservative estimate that doesn't look past references.
|
||||
let naive = cx.naive_layout_of(pointee)?.layout;
|
||||
|
||||
let pointee = tcx.normalize_erasing_regions(param_env, pointee);
|
||||
if pointee.is_sized(tcx, param_env) {
|
||||
return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
|
||||
}
|
||||
|
||||
let metadata = if let Some(metadata_def_id) = tcx.lang_items().metadata_type()
|
||||
// Projection eagerly bails out when the pointee references errors,
|
||||
// fall back to structurally deducing metadata.
|
||||
&& !pointee.references_error()
|
||||
{
|
||||
let pointee_metadata = Ty::new_projection(tcx,metadata_def_id, [pointee]);
|
||||
let metadata_ty = match tcx.try_normalize_erasing_regions(
|
||||
param_env,
|
||||
pointee_metadata,
|
||||
) {
|
||||
Ok(metadata_ty) => metadata_ty,
|
||||
Err(mut err) => {
|
||||
// Usually `<Ty as Pointee>::Metadata` can't be normalized because
|
||||
// its struct tail cannot be normalized either, so try to get a
|
||||
// more descriptive layout error here, which will lead to less confusing
|
||||
// diagnostics.
|
||||
match tcx.try_normalize_erasing_regions(
|
||||
param_env,
|
||||
tcx.struct_tail_without_normalization(pointee),
|
||||
) {
|
||||
Ok(_) => {},
|
||||
Err(better_err) => {
|
||||
err = better_err;
|
||||
}
|
||||
}
|
||||
return Err(error(cx, LayoutError::NormalizationFailure(pointee, err)));
|
||||
},
|
||||
let niches = match *pointee.kind() {
|
||||
ty::FnDef(def, ..)
|
||||
| ty::Foreign(def)
|
||||
| ty::Generator(def, ..)
|
||||
| ty::Closure(def, ..) => tcx.reference_niches_policy(def.krate),
|
||||
ty::Adt(def, _) => tcx.reference_niches_policy(def.did().krate),
|
||||
_ => DEFAULT_REF_NICHES,
|
||||
};
|
||||
|
||||
let metadata_layout = cx.layout_of(metadata_ty)?;
|
||||
// If the metadata is a 1-zst, then the pointer is thin.
|
||||
if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 {
|
||||
return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
|
||||
}
|
||||
let (min_addr, max_addr) = dl.address_range_for(
|
||||
if niches.size { naive.size } else { Size::ZERO },
|
||||
if niches.align { naive.align } else { Align::ONE },
|
||||
);
|
||||
|
||||
let Abi::Scalar(metadata) = metadata_layout.abi else {
|
||||
return Err(error(cx, LayoutError::Unknown(pointee)));
|
||||
};
|
||||
*data_ptr.valid_range_mut() =
|
||||
WrappingRange { start: min_addr.into(), end: max_addr.into() };
|
||||
}
|
||||
|
||||
metadata
|
||||
if let Some(metadata) = ptr_metadata_scalar(cx, pointee)? {
|
||||
// Effectively a (ptr, meta) tuple.
|
||||
tcx.mk_layout(cx.scalar_pair(data_ptr, metadata))
|
||||
} else {
|
||||
let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
|
||||
|
||||
match unsized_part.kind() {
|
||||
ty::Foreign(..) => {
|
||||
return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
|
||||
}
|
||||
ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
|
||||
ty::Dynamic(..) => {
|
||||
let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
|
||||
vtable.valid_range_mut().start = 1;
|
||||
vtable
|
||||
}
|
||||
_ => {
|
||||
return Err(error(cx, LayoutError::Unknown(pointee)));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Effectively a (ptr, meta) tuple.
|
||||
tcx.mk_layout(cx.scalar_pair(data_ptr, metadata))
|
||||
// No metadata, this is a thin pointer.
|
||||
tcx.mk_layout(LayoutS::scalar(cx, data_ptr))
|
||||
}
|
||||
}
|
||||
|
||||
ty::Dynamic(_, _, ty::DynStar) => {
|
||||
@ -226,16 +178,8 @@ fn layout_of_uncached<'tcx>(
|
||||
}
|
||||
|
||||
// Arrays and slices.
|
||||
ty::Array(element, mut count) => {
|
||||
if count.has_projections() {
|
||||
count = tcx.normalize_erasing_regions(param_env, count);
|
||||
if count.has_projections() {
|
||||
return Err(error(cx, LayoutError::Unknown(ty)));
|
||||
}
|
||||
}
|
||||
|
||||
let count = count
|
||||
.try_eval_target_usize(tcx, param_env)
|
||||
ty::Array(element, count) => {
|
||||
let count = compute_array_count(cx, count)
|
||||
.ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
|
||||
let element = cx.layout_of(element)?;
|
||||
let size = element
|
||||
@ -558,22 +502,106 @@ fn layout_of_uncached<'tcx>(
|
||||
}
|
||||
|
||||
// Types with no meaningful known layout.
|
||||
ty::Alias(..) => {
|
||||
// NOTE(eddyb) `layout_of` query should've normalized these away,
|
||||
// if that was possible, so there's no reason to try again here.
|
||||
return Err(error(cx, LayoutError::Unknown(ty)));
|
||||
}
|
||||
|
||||
ty::Bound(..) | ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) | ty::Infer(_) => {
|
||||
bug!("Layout::compute: unexpected type `{}`", ty)
|
||||
}
|
||||
|
||||
ty::Placeholder(..) | ty::Param(_) | ty::Error(_) => {
|
||||
return Err(error(cx, LayoutError::Unknown(ty)));
|
||||
ty::Alias(..)
|
||||
| ty::Bound(..)
|
||||
| ty::GeneratorWitness(..)
|
||||
| ty::GeneratorWitnessMIR(..)
|
||||
| ty::Infer(_)
|
||||
| ty::Placeholder(..)
|
||||
| ty::Param(_)
|
||||
| ty::Error(_) => {
|
||||
unreachable!("already rejected by `naive_layout_of`");
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn compute_array_count<'tcx>(
|
||||
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
|
||||
mut count: ty::Const<'tcx>,
|
||||
) -> Option<u64> {
|
||||
let LayoutCx { tcx, param_env } = *cx;
|
||||
if count.has_projections() {
|
||||
count = tcx.normalize_erasing_regions(param_env, count);
|
||||
if count.has_projections() {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
count.try_eval_target_usize(tcx, param_env)
|
||||
}
|
||||
|
||||
pub(crate) fn ptr_metadata_scalar<'tcx>(
|
||||
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
|
||||
pointee: Ty<'tcx>,
|
||||
) -> Result<Option<Scalar>, &'tcx LayoutError<'tcx>> {
|
||||
let dl = cx.data_layout();
|
||||
let scalar_unit = |value: Primitive| {
|
||||
let size = value.size(dl);
|
||||
assert!(size.bits() <= 128);
|
||||
Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
|
||||
};
|
||||
|
||||
let LayoutCx { tcx, param_env } = *cx;
|
||||
|
||||
let pointee = tcx.normalize_erasing_regions(param_env, pointee);
|
||||
if pointee.is_sized(tcx, param_env) {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if let Some(metadata_def_id) = tcx.lang_items().metadata_type()
|
||||
// Projection eagerly bails out when the pointee references errors,
|
||||
// fall back to structurally deducing metadata.
|
||||
&& !pointee.references_error()
|
||||
{
|
||||
let pointee_metadata = Ty::new_projection(tcx,metadata_def_id, [pointee]);
|
||||
let metadata_ty = match tcx.try_normalize_erasing_regions(
|
||||
param_env,
|
||||
pointee_metadata,
|
||||
) {
|
||||
Ok(metadata_ty) => metadata_ty,
|
||||
Err(mut err) => {
|
||||
// Usually `<Ty as Pointee>::Metadata` can't be normalized because
|
||||
// its struct tail cannot be normalized either, so try to get a
|
||||
// more descriptive layout error here, which will lead to less confusing
|
||||
// diagnostics.
|
||||
match tcx.try_normalize_erasing_regions(
|
||||
param_env,
|
||||
tcx.struct_tail_without_normalization(pointee),
|
||||
) {
|
||||
Ok(_) => {},
|
||||
Err(better_err) => {
|
||||
err = better_err;
|
||||
}
|
||||
}
|
||||
return Err(error(cx, LayoutError::NormalizationFailure(pointee, err)));
|
||||
},
|
||||
};
|
||||
|
||||
let metadata_layout = cx.layout_of(metadata_ty)?;
|
||||
|
||||
if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 {
|
||||
Ok(None) // If the metadata is a 1-zst, then the pointer is thin.
|
||||
} else if let Abi::Scalar(metadata) = metadata_layout.abi {
|
||||
Ok(Some(metadata))
|
||||
} else {
|
||||
Err(error(cx, LayoutError::Unknown(pointee)))
|
||||
}
|
||||
} else {
|
||||
let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
|
||||
|
||||
match unsized_part.kind() {
|
||||
ty::Foreign(..) => Ok(None),
|
||||
ty::Slice(_) | ty::Str => Ok(Some(scalar_unit(Int(dl.ptr_sized_integer(), false)))),
|
||||
ty::Dynamic(..) => {
|
||||
let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
|
||||
vtable.valid_range_mut().start = 1;
|
||||
Ok(Some(vtable))
|
||||
}
|
||||
_ => Err(error(cx, LayoutError::Unknown(pointee))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
enum SavedLocalEligibility {
|
||||
|
322
compiler/rustc_ty_utils/src/layout_naive.rs
Normal file
322
compiler/rustc_ty_utils/src/layout_naive.rs
Normal file
@ -0,0 +1,322 @@
|
||||
use rustc_middle::query::Providers;
|
||||
use rustc_middle::ty::layout::{
|
||||
IntegerExt, LayoutCx, LayoutError, LayoutOf, NaiveAbi, NaiveLayout, NaiveNiches,
|
||||
TyAndNaiveLayout,
|
||||
};
|
||||
use rustc_middle::ty::{self, ReprOptions, Ty, TyCtxt, TypeVisitableExt};
|
||||
use rustc_span::DUMMY_SP;
|
||||
use rustc_target::abi::*;
|
||||
|
||||
use std::ops::Bound;
|
||||
|
||||
use crate::layout::{compute_array_count, ptr_metadata_scalar};
|
||||
|
||||
pub fn provide(providers: &mut Providers) {
|
||||
*providers = Providers { naive_layout_of, ..*providers };
|
||||
}
|
||||
|
||||
#[instrument(skip(tcx, query), level = "debug")]
|
||||
fn naive_layout_of<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
|
||||
) -> Result<TyAndNaiveLayout<'tcx>, &'tcx LayoutError<'tcx>> {
|
||||
let (param_env, ty) = query.into_parts();
|
||||
debug!(?ty);
|
||||
|
||||
let param_env = param_env.with_reveal_all_normalized(tcx);
|
||||
let unnormalized_ty = ty;
|
||||
|
||||
// FIXME: We might want to have two different versions of `layout_of`:
|
||||
// One that can be called after typecheck has completed and can use
|
||||
// `normalize_erasing_regions` here and another one that can be called
|
||||
// before typecheck has completed and uses `try_normalize_erasing_regions`.
|
||||
let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
|
||||
Ok(t) => t,
|
||||
Err(normalization_error) => {
|
||||
return Err(tcx
|
||||
.arena
|
||||
.alloc(LayoutError::NormalizationFailure(ty, normalization_error)));
|
||||
}
|
||||
};
|
||||
|
||||
if ty != unnormalized_ty {
|
||||
// Ensure this layout is also cached for the normalized type.
|
||||
return tcx.naive_layout_of(param_env.and(ty));
|
||||
}
|
||||
|
||||
let cx = LayoutCx { tcx, param_env };
|
||||
let layout = naive_layout_of_uncached(&cx, ty)?;
|
||||
Ok(TyAndNaiveLayout { ty, layout })
|
||||
}
|
||||
|
||||
fn error<'tcx>(
|
||||
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
|
||||
err: LayoutError<'tcx>,
|
||||
) -> &'tcx LayoutError<'tcx> {
|
||||
cx.tcx.arena.alloc(err)
|
||||
}
|
||||
|
||||
fn naive_layout_of_uncached<'tcx>(
|
||||
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
|
||||
ty: Ty<'tcx>,
|
||||
) -> Result<NaiveLayout, &'tcx LayoutError<'tcx>> {
|
||||
let tcx = cx.tcx;
|
||||
let dl = cx.data_layout();
|
||||
|
||||
let scalar = |niched: bool, value: Primitive| NaiveLayout {
|
||||
abi: NaiveAbi::Scalar(value),
|
||||
niches: if niched { NaiveNiches::Some } else { NaiveNiches::None },
|
||||
size: value.size(dl),
|
||||
align: value.align(dl).abi,
|
||||
exact: true,
|
||||
};
|
||||
|
||||
let univariant = |fields: &mut dyn Iterator<Item = Ty<'tcx>>,
|
||||
repr: &ReprOptions|
|
||||
-> Result<NaiveLayout, &'tcx LayoutError<'tcx>> {
|
||||
if repr.pack.is_some() && repr.align.is_some() {
|
||||
cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
|
||||
return Err(error(cx, LayoutError::Unknown(ty)));
|
||||
}
|
||||
|
||||
let linear = repr.inhibit_struct_field_reordering_opt();
|
||||
let pack = repr.pack.unwrap_or(Align::MAX);
|
||||
let mut layout = NaiveLayout::EMPTY;
|
||||
|
||||
for field in fields {
|
||||
let field = cx.naive_layout_of(field)?.packed(pack);
|
||||
if linear {
|
||||
layout = layout.pad_to_align(field.align);
|
||||
}
|
||||
layout = layout
|
||||
.concat(&field, dl)
|
||||
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
|
||||
}
|
||||
|
||||
if let Some(align) = repr.align {
|
||||
layout = layout.align_to(align);
|
||||
}
|
||||
|
||||
if linear {
|
||||
layout.abi = layout.abi.as_aggregate();
|
||||
}
|
||||
|
||||
Ok(layout.pad_to_align(layout.align))
|
||||
};
|
||||
|
||||
debug_assert!(!ty.has_non_region_infer());
|
||||
|
||||
Ok(match *ty.kind() {
|
||||
// Basic scalars
|
||||
ty::Bool => scalar(true, Int(I8, false)),
|
||||
ty::Char => scalar(true, Int(I32, false)),
|
||||
ty::Int(ity) => scalar(false, Int(Integer::from_int_ty(dl, ity), true)),
|
||||
ty::Uint(ity) => scalar(false, Int(Integer::from_uint_ty(dl, ity), false)),
|
||||
ty::Float(fty) => scalar(
|
||||
false,
|
||||
match fty {
|
||||
ty::FloatTy::F32 => F32,
|
||||
ty::FloatTy::F64 => F64,
|
||||
},
|
||||
),
|
||||
ty::FnPtr(_) => scalar(true, Pointer(dl.instruction_address_space)),
|
||||
|
||||
// The never type.
|
||||
ty::Never => NaiveLayout { abi: NaiveAbi::Uninhabited, ..NaiveLayout::EMPTY },
|
||||
|
||||
// Potentially-wide pointers.
|
||||
ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
|
||||
let data_ptr = scalar(!ty.is_unsafe_ptr(), Pointer(AddressSpace::DATA));
|
||||
if let Some(metadata) = ptr_metadata_scalar(cx, pointee)? {
|
||||
// Effectively a (ptr, meta) tuple.
|
||||
let meta = scalar(!metadata.is_always_valid(dl), metadata.primitive());
|
||||
let l = data_ptr
|
||||
.concat(&meta, dl)
|
||||
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
|
||||
l.pad_to_align(l.align)
|
||||
} else {
|
||||
// No metadata, this is a thin pointer.
|
||||
data_ptr
|
||||
}
|
||||
}
|
||||
|
||||
ty::Dynamic(_, _, ty::DynStar) => {
|
||||
let ptr = scalar(false, Pointer(AddressSpace::DATA));
|
||||
let vtable = scalar(true, Pointer(AddressSpace::DATA));
|
||||
ptr.concat(&vtable, dl).ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?
|
||||
}
|
||||
|
||||
// Arrays and slices.
|
||||
ty::Array(element, count) => {
|
||||
let count = compute_array_count(cx, count)
|
||||
.ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
|
||||
let element = cx.naive_layout_of(element)?;
|
||||
NaiveLayout {
|
||||
abi: element.abi.as_aggregate(),
|
||||
size: element
|
||||
.size
|
||||
.checked_mul(count, cx)
|
||||
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?,
|
||||
niches: if count == 0 { NaiveNiches::None } else { element.niches },
|
||||
..*element
|
||||
}
|
||||
}
|
||||
ty::Slice(element) => NaiveLayout {
|
||||
abi: NaiveAbi::Unsized,
|
||||
size: Size::ZERO,
|
||||
niches: NaiveNiches::None,
|
||||
..*cx.naive_layout_of(element)?
|
||||
},
|
||||
|
||||
ty::FnDef(..) => NaiveLayout::EMPTY,
|
||||
|
||||
// Unsized types.
|
||||
ty::Str | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
|
||||
NaiveLayout { abi: NaiveAbi::Unsized, ..NaiveLayout::EMPTY }
|
||||
}
|
||||
|
||||
// FIXME(reference_niches): try to actually compute a reasonable layout estimate,
|
||||
// without duplicating too much code from `generator_layout`.
|
||||
ty::Generator(..) => {
|
||||
NaiveLayout { exact: false, niches: NaiveNiches::Maybe, ..NaiveLayout::EMPTY }
|
||||
}
|
||||
|
||||
ty::Closure(_, ref substs) => {
|
||||
univariant(&mut substs.as_closure().upvar_tys(), &ReprOptions::default())?
|
||||
}
|
||||
|
||||
ty::Tuple(tys) => univariant(&mut tys.iter(), &ReprOptions::default())?,
|
||||
|
||||
ty::Adt(def, substs) if def.is_union() => {
|
||||
assert_eq!(def.variants().len(), 1, "union should have a single variant");
|
||||
let repr = def.repr();
|
||||
let pack = repr.pack.unwrap_or(Align::MAX);
|
||||
if repr.pack.is_some() && repr.align.is_some() {
|
||||
cx.tcx.sess.delay_span_bug(DUMMY_SP, "union cannot be packed and aligned");
|
||||
return Err(error(cx, LayoutError::Unknown(ty)));
|
||||
}
|
||||
|
||||
let mut layout = NaiveLayout {
|
||||
// Unions never have niches.
|
||||
niches: NaiveNiches::None,
|
||||
..NaiveLayout::EMPTY
|
||||
};
|
||||
|
||||
for f in &def.variants()[FIRST_VARIANT].fields {
|
||||
let field = cx.naive_layout_of(f.ty(tcx, substs))?;
|
||||
layout = layout.union(&field.packed(pack));
|
||||
}
|
||||
|
||||
// Unions are always inhabited, and never scalar if `repr(C)`.
|
||||
if !matches!(layout.abi, NaiveAbi::Scalar(_)) || repr.inhibit_enum_layout_opt() {
|
||||
layout.abi = NaiveAbi::Sized;
|
||||
}
|
||||
|
||||
if let Some(align) = repr.align {
|
||||
layout = layout.align_to(align);
|
||||
}
|
||||
layout.pad_to_align(layout.align)
|
||||
}
|
||||
|
||||
ty::Adt(def, substs) => {
|
||||
let repr = def.repr();
|
||||
let mut layout = NaiveLayout {
|
||||
// An ADT with no inhabited variants should have an uninhabited ABI.
|
||||
abi: NaiveAbi::Uninhabited,
|
||||
..NaiveLayout::EMPTY
|
||||
};
|
||||
|
||||
let mut empty_variants = 0;
|
||||
for v in def.variants() {
|
||||
let mut fields = v.fields.iter().map(|f| f.ty(tcx, substs));
|
||||
let vlayout = univariant(&mut fields, &repr)?;
|
||||
|
||||
if vlayout.size == Size::ZERO && vlayout.exact {
|
||||
empty_variants += 1;
|
||||
} else {
|
||||
// Remember the niches of the last seen variant.
|
||||
layout.niches = vlayout.niches;
|
||||
}
|
||||
|
||||
layout = layout.union(&vlayout);
|
||||
}
|
||||
|
||||
if def.is_enum() {
|
||||
let may_need_discr = match def.variants().len() {
|
||||
0 | 1 => false,
|
||||
// Simple Option-like niche optimization.
|
||||
// Handling this special case allows enums like `Option<&T>`
|
||||
// to be recognized as `PointerLike` and to be transmutable
|
||||
// in generic contexts.
|
||||
2 if empty_variants == 1 && layout.niches == NaiveNiches::Some => {
|
||||
layout.niches = NaiveNiches::Maybe; // fill up the niche.
|
||||
false
|
||||
}
|
||||
_ => true,
|
||||
};
|
||||
|
||||
if may_need_discr || repr.inhibit_enum_layout_opt() {
|
||||
// For simplicity, assume that the discriminant always get niched.
|
||||
// This will be wrong in many cases, which will cause the size (and
|
||||
// sometimes the alignment) to be underestimated.
|
||||
// FIXME(reference_niches): Be smarter here.
|
||||
layout.niches = NaiveNiches::Maybe;
|
||||
layout = layout.inexact();
|
||||
}
|
||||
} else {
|
||||
assert_eq!(def.variants().len(), 1, "struct should have a single variant");
|
||||
|
||||
// We don't compute exact alignment for SIMD structs.
|
||||
if repr.simd() {
|
||||
layout = layout.inexact();
|
||||
}
|
||||
|
||||
// `UnsafeCell` hides all niches.
|
||||
if def.is_unsafe_cell() {
|
||||
layout.niches = NaiveNiches::None;
|
||||
}
|
||||
}
|
||||
|
||||
let valid_range = tcx.layout_scalar_valid_range(def.did());
|
||||
if valid_range != (Bound::Unbounded, Bound::Unbounded) {
|
||||
let get = |bound, default| match bound {
|
||||
Bound::Unbounded => default,
|
||||
Bound::Included(v) => v,
|
||||
Bound::Excluded(_) => bug!("exclusive `layout_scalar_valid_range` bound"),
|
||||
};
|
||||
|
||||
let valid_range = WrappingRange {
|
||||
start: get(valid_range.0, 0),
|
||||
// FIXME: this is wrong for scalar-pair ABIs. Fortunately, the
|
||||
// only type this could currently affect is`NonNull<T: !Sized>`,
|
||||
// and the `NaiveNiches` result still ends up correct.
|
||||
end: get(valid_range.1, layout.size.unsigned_int_max()),
|
||||
};
|
||||
assert!(
|
||||
valid_range.is_in_range_for(layout.size),
|
||||
"`layout_scalar_valid_range` values are out of bounds",
|
||||
);
|
||||
if !valid_range.is_full_for(layout.size) {
|
||||
layout.niches = NaiveNiches::Some;
|
||||
}
|
||||
}
|
||||
|
||||
layout.pad_to_align(layout.align)
|
||||
}
|
||||
|
||||
// Types with no meaningful known layout.
|
||||
ty::Alias(..) => {
|
||||
// NOTE(eddyb) `layout_of` query should've normalized these away,
|
||||
// if that was possible, so there's no reason to try again here.
|
||||
return Err(error(cx, LayoutError::Unknown(ty)));
|
||||
}
|
||||
|
||||
ty::Bound(..) | ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) | ty::Infer(_) => {
|
||||
bug!("Layout::compute: unexpected type `{}`", ty)
|
||||
}
|
||||
|
||||
ty::Placeholder(..) | ty::Param(_) | ty::Error(_) => {
|
||||
return Err(error(cx, LayoutError::Unknown(ty)));
|
||||
}
|
||||
})
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
use rustc_middle::ty::{
|
||||
layout::{LayoutCx, TyAndLayout},
|
||||
layout::{LayoutCx, NaiveLayout, TyAndLayout},
|
||||
TyCtxt,
|
||||
};
|
||||
use rustc_target::abi::*;
|
||||
@ -10,6 +10,7 @@ use std::assert_matches::assert_matches;
|
||||
pub(super) fn sanity_check_layout<'tcx>(
|
||||
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
|
||||
layout: &TyAndLayout<'tcx>,
|
||||
naive: &NaiveLayout,
|
||||
) {
|
||||
// Type-level uninhabitedness should always imply ABI uninhabitedness.
|
||||
if layout.ty.is_privately_uninhabited(cx.tcx, cx.param_env) {
|
||||
@ -20,6 +21,10 @@ pub(super) fn sanity_check_layout<'tcx>(
|
||||
bug!("size is not a multiple of align, in the following layout:\n{layout:#?}");
|
||||
}
|
||||
|
||||
if !naive.is_refined_by(layout.layout) {
|
||||
bug!("the naive layout isn't refined by the actual layout:\n{:#?}\n{:#?}", naive, layout);
|
||||
}
|
||||
|
||||
if !cfg!(debug_assertions) {
|
||||
// Stop here, the rest is kind of expensive.
|
||||
return;
|
||||
|
@ -31,6 +31,7 @@ mod errors;
|
||||
mod implied_bounds;
|
||||
pub mod instance;
|
||||
mod layout;
|
||||
mod layout_naive;
|
||||
mod layout_sanity_check;
|
||||
mod needs_drop;
|
||||
mod opaque_types;
|
||||
@ -47,6 +48,7 @@ pub fn provide(providers: &mut Providers) {
|
||||
consts::provide(providers);
|
||||
implied_bounds::provide(providers);
|
||||
layout::provide(providers);
|
||||
layout_naive::provide(providers);
|
||||
needs_drop::provide(providers);
|
||||
opaque_types::provide(providers);
|
||||
representability::provide(providers);
|
||||
|
@ -39,6 +39,7 @@ pub enum AliasKind {
|
||||
/// A projection `<Type as Trait>::AssocType`.
|
||||
/// Can get normalized away if monomorphic enough.
|
||||
Projection,
|
||||
/// An associated type in an inherent `impl`
|
||||
Inherent,
|
||||
/// An opaque type (usually from `impl Trait` in type aliases or function return types)
|
||||
/// Can only be normalized away in RevealAll mode
|
||||
|
@ -661,10 +661,14 @@ impl<T> Rc<T> {
|
||||
|
||||
impl<T, A: Allocator> Rc<T, A> {
|
||||
/// Returns a reference to the underlying allocator.
|
||||
///
|
||||
/// Note: this is an associated function, which means that you have
|
||||
/// to call it as `Rc::allocator(&r)` instead of `r.allocator()`. This
|
||||
/// is so that there is no conflict with a method on the inner type.
|
||||
#[inline]
|
||||
#[unstable(feature = "allocator_api", issue = "32838")]
|
||||
pub fn allocator(&self) -> &A {
|
||||
&self.alloc
|
||||
pub fn allocator(this: &Self) -> &A {
|
||||
&this.alloc
|
||||
}
|
||||
/// Constructs a new `Rc` in the provided allocator.
|
||||
///
|
||||
|
@ -678,10 +678,14 @@ impl<T> Arc<T> {
|
||||
|
||||
impl<T, A: Allocator> Arc<T, A> {
|
||||
/// Returns a reference to the underlying allocator.
|
||||
///
|
||||
/// Note: this is an associated function, which means that you have
|
||||
/// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
|
||||
/// is so that there is no conflict with a method on the inner type.
|
||||
#[inline]
|
||||
#[unstable(feature = "allocator_api", issue = "32838")]
|
||||
pub fn allocator(&self) -> &A {
|
||||
&self.alloc
|
||||
pub fn allocator(this: &Self) -> &A {
|
||||
&this.alloc
|
||||
}
|
||||
/// Constructs a new `Arc<T>` in the provided allocator.
|
||||
///
|
||||
|
@ -462,6 +462,30 @@ impl<T: ?Sized> NonNull<T> {
|
||||
// And the caller promised the `delta` is sound to add.
|
||||
unsafe { NonNull { pointer: self.pointer.add(delta) } }
|
||||
}
|
||||
|
||||
/// See [`pointer::sub`] for semantics and safety requirements.
|
||||
#[inline]
|
||||
pub(crate) const unsafe fn sub(self, delta: usize) -> Self
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
// SAFETY: We require that the delta stays in-bounds of the object, and
|
||||
// thus it cannot become null, as no legal objects can be allocated
|
||||
// in such as way that the null address is part of them.
|
||||
// And the caller promised the `delta` is sound to subtract.
|
||||
unsafe { NonNull { pointer: self.pointer.sub(delta) } }
|
||||
}
|
||||
|
||||
/// See [`pointer::sub_ptr`] for semantics and safety requirements.
|
||||
#[inline]
|
||||
pub(crate) const unsafe fn sub_ptr(self, subtrahend: Self) -> usize
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
// SAFETY: The caller promised that this is safe to do, and
|
||||
// the non-nullness is irrelevant to the operation.
|
||||
unsafe { self.pointer.sub_ptr(subtrahend.pointer) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> NonNull<[T]> {
|
||||
|
@ -13,7 +13,7 @@ use crate::iter::{
|
||||
use crate::marker::{PhantomData, Send, Sized, Sync};
|
||||
use crate::mem::{self, SizedTypeProperties};
|
||||
use crate::num::NonZeroUsize;
|
||||
use crate::ptr::{invalid, invalid_mut, NonNull};
|
||||
use crate::ptr::{self, invalid, invalid_mut, NonNull};
|
||||
|
||||
use super::{from_raw_parts, from_raw_parts_mut};
|
||||
|
||||
@ -68,7 +68,7 @@ pub struct Iter<'a, T: 'a> {
|
||||
/// For non-ZSTs, the non-null pointer to the past-the-end element.
|
||||
///
|
||||
/// For ZSTs, this is `ptr::invalid(len)`.
|
||||
end: *const T,
|
||||
end_or_len: *const T,
|
||||
_marker: PhantomData<&'a T>,
|
||||
}
|
||||
|
||||
@ -90,9 +90,9 @@ impl<'a, T> Iter<'a, T> {
|
||||
let ptr = slice.as_ptr();
|
||||
// SAFETY: Similar to `IterMut::new`.
|
||||
unsafe {
|
||||
let end = if T::IS_ZST { invalid(slice.len()) } else { ptr.add(slice.len()) };
|
||||
let end_or_len = if T::IS_ZST { invalid(slice.len()) } else { ptr.add(slice.len()) };
|
||||
|
||||
Self { ptr: NonNull::new_unchecked(ptr as *mut T), end, _marker: PhantomData }
|
||||
Self { ptr: NonNull::new_unchecked(ptr as *mut T), end_or_len, _marker: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
@ -128,7 +128,7 @@ impl<'a, T> Iter<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, {
|
||||
iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, as_ref, {
|
||||
fn is_sorted_by<F>(self, mut compare: F) -> bool
|
||||
where
|
||||
Self: Sized,
|
||||
@ -142,7 +142,7 @@ iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, {
|
||||
impl<T> Clone for Iter<'_, T> {
|
||||
#[inline]
|
||||
fn clone(&self) -> Self {
|
||||
Iter { ptr: self.ptr, end: self.end, _marker: self._marker }
|
||||
Iter { ptr: self.ptr, end_or_len: self.end_or_len, _marker: self._marker }
|
||||
}
|
||||
}
|
||||
|
||||
@ -189,7 +189,7 @@ pub struct IterMut<'a, T: 'a> {
|
||||
/// For non-ZSTs, the non-null pointer to the past-the-end element.
|
||||
///
|
||||
/// For ZSTs, this is `ptr::invalid_mut(len)`.
|
||||
end: *mut T,
|
||||
end_or_len: *mut T,
|
||||
_marker: PhantomData<&'a mut T>,
|
||||
}
|
||||
|
||||
@ -220,15 +220,16 @@ impl<'a, T> IterMut<'a, T> {
|
||||
// for direct pointer equality with `ptr` to check if the iterator is
|
||||
// done.
|
||||
//
|
||||
// In the case of a ZST, the end pointer is just the start pointer plus
|
||||
// the length, to also allows for the fast `ptr == end` check.
|
||||
// In the case of a ZST, the end pointer is just the length. It's never
|
||||
// used as a pointer at all, and thus it's fine to have no provenance.
|
||||
//
|
||||
// See the `next_unchecked!` and `is_empty!` macros as well as the
|
||||
// `post_inc_start` method for more information.
|
||||
unsafe {
|
||||
let end = if T::IS_ZST { invalid_mut(slice.len()) } else { ptr.add(slice.len()) };
|
||||
let end_or_len =
|
||||
if T::IS_ZST { invalid_mut(slice.len()) } else { ptr.add(slice.len()) };
|
||||
|
||||
Self { ptr: NonNull::new_unchecked(ptr), end, _marker: PhantomData }
|
||||
Self { ptr: NonNull::new_unchecked(ptr), end_or_len, _marker: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
@ -360,7 +361,7 @@ impl<T> AsRef<[T]> for IterMut<'_, T> {
|
||||
// }
|
||||
// }
|
||||
|
||||
iterator! {struct IterMut -> *mut T, &'a mut T, mut, {mut}, {}}
|
||||
iterator! {struct IterMut -> *mut T, &'a mut T, mut, {mut}, as_mut, {}}
|
||||
|
||||
/// An internal abstraction over the splitting iterators, so that
|
||||
/// splitn, splitn_mut etc can be implemented once.
|
||||
|
@ -1,45 +1,62 @@
|
||||
//! Macros used by iterators of slice.
|
||||
|
||||
// Shrinks the iterator when T is a ZST, setting the length to `new_len`.
|
||||
// `new_len` must not exceed `self.len()`.
|
||||
macro_rules! zst_set_len {
|
||||
($self: ident, $new_len: expr) => {{
|
||||
/// Convenience & performance macro for consuming the `end_or_len` field, by
|
||||
/// giving a `(&mut) usize` or `(&mut) NonNull<T>` depending whether `T` is
|
||||
/// or is not a ZST respectively.
|
||||
///
|
||||
/// Internally, this reads the `end` through a pointer-to-`NonNull` so that
|
||||
/// it'll get the appropriate non-null metadata in the backend without needing
|
||||
/// to call `assume` manually.
|
||||
macro_rules! if_zst {
|
||||
(mut $this:ident, $len:ident => $zst_body:expr, $end:ident => $other_body:expr,) => {{
|
||||
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
|
||||
|
||||
// SAFETY: same as `invalid(_mut)`, but the macro doesn't know
|
||||
// which versions of that function to call, so open-code it.
|
||||
$self.end = unsafe { mem::transmute::<usize, _>($new_len) };
|
||||
if T::IS_ZST {
|
||||
// SAFETY: for ZSTs, the pointer is storing a provenance-free length,
|
||||
// so consuming and updating it as a `usize` is fine.
|
||||
let $len = unsafe { &mut *ptr::addr_of_mut!($this.end_or_len).cast::<usize>() };
|
||||
$zst_body
|
||||
} else {
|
||||
// SAFETY: for non-ZSTs, the type invariant ensures it cannot be null
|
||||
let $end = unsafe { &mut *ptr::addr_of_mut!($this.end_or_len).cast::<NonNull<T>>() };
|
||||
$other_body
|
||||
}
|
||||
}};
|
||||
}
|
||||
($this:ident, $len:ident => $zst_body:expr, $end:ident => $other_body:expr,) => {{
|
||||
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
|
||||
|
||||
// Shrinks the iterator when T is a ZST, reducing the length by `n`.
|
||||
// `n` must not exceed `self.len()`.
|
||||
macro_rules! zst_shrink {
|
||||
($self: ident, $n: ident) => {
|
||||
let new_len = $self.end.addr() - $n;
|
||||
zst_set_len!($self, new_len);
|
||||
};
|
||||
if T::IS_ZST {
|
||||
let $len = $this.end_or_len.addr();
|
||||
$zst_body
|
||||
} else {
|
||||
// SAFETY: for non-ZSTs, the type invariant ensures it cannot be null
|
||||
let $end = unsafe { *ptr::addr_of!($this.end_or_len).cast::<NonNull<T>>() };
|
||||
$other_body
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
// Inlining is_empty and len makes a huge performance difference
|
||||
macro_rules! is_empty {
|
||||
($self: ident) => {
|
||||
if T::IS_ZST { $self.end.addr() == 0 } else { $self.ptr.as_ptr() as *const _ == $self.end }
|
||||
if_zst!($self,
|
||||
len => len == 0,
|
||||
end => $self.ptr == end,
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! len {
|
||||
($self: ident) => {{
|
||||
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
|
||||
|
||||
if T::IS_ZST {
|
||||
$self.end.addr()
|
||||
} else {
|
||||
// To get rid of some bounds checks (see `position`), we use ptr_sub instead of
|
||||
// offset_from (Tested by `codegen/slice-position-bounds-check`.)
|
||||
// SAFETY: by the type invariant pointers are aligned and `start <= end`
|
||||
unsafe { $self.end.sub_ptr($self.ptr.as_ptr()) }
|
||||
}
|
||||
if_zst!($self,
|
||||
len => len,
|
||||
end => {
|
||||
// To get rid of some bounds checks (see `position`), we use ptr_sub instead of
|
||||
// offset_from (Tested by `codegen/slice-position-bounds-check`.)
|
||||
// SAFETY: by the type invariant pointers are aligned and `start <= end`
|
||||
unsafe { end.sub_ptr($self.ptr) }
|
||||
},
|
||||
)
|
||||
}};
|
||||
}
|
||||
|
||||
@ -50,20 +67,21 @@ macro_rules! iterator {
|
||||
$elem:ty,
|
||||
$raw_mut:tt,
|
||||
{$( $mut_:tt )?},
|
||||
$into_ref:ident,
|
||||
{$($extra:tt)*}
|
||||
) => {
|
||||
// Returns the first element and moves the start of the iterator forwards by 1.
|
||||
// Greatly improves performance compared to an inlined function. The iterator
|
||||
// must not be empty.
|
||||
macro_rules! next_unchecked {
|
||||
($self: ident) => {& $( $mut_ )? *$self.post_inc_start(1)}
|
||||
($self: ident) => { $self.post_inc_start(1).$into_ref() }
|
||||
}
|
||||
|
||||
// Returns the last element and moves the end of the iterator backwards by 1.
|
||||
// Greatly improves performance compared to an inlined function. The iterator
|
||||
// must not be empty.
|
||||
macro_rules! next_back_unchecked {
|
||||
($self: ident) => {& $( $mut_ )? *$self.pre_dec_end(1)}
|
||||
($self: ident) => { $self.pre_dec_end(1).$into_ref() }
|
||||
}
|
||||
|
||||
impl<'a, T> $name<'a, T> {
|
||||
@ -80,33 +98,40 @@ macro_rules! iterator {
|
||||
// returning the old start.
|
||||
// Unsafe because the offset must not exceed `self.len()`.
|
||||
#[inline(always)]
|
||||
unsafe fn post_inc_start(&mut self, offset: usize) -> * $raw_mut T {
|
||||
unsafe fn post_inc_start(&mut self, offset: usize) -> NonNull<T> {
|
||||
let old = self.ptr;
|
||||
if T::IS_ZST {
|
||||
zst_shrink!(self, offset);
|
||||
} else {
|
||||
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
|
||||
// so this new pointer is inside `self` and thus guaranteed to be non-null.
|
||||
self.ptr = unsafe { self.ptr.add(offset) };
|
||||
|
||||
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
|
||||
// so this new pointer is inside `self` and thus guaranteed to be non-null.
|
||||
unsafe {
|
||||
if_zst!(mut self,
|
||||
len => *len = len.unchecked_sub(offset),
|
||||
_end => self.ptr = self.ptr.add(offset),
|
||||
);
|
||||
}
|
||||
old.as_ptr()
|
||||
old
|
||||
}
|
||||
|
||||
// Helper function for moving the end of the iterator backwards by `offset` elements,
|
||||
// returning the new end.
|
||||
// Unsafe because the offset must not exceed `self.len()`.
|
||||
#[inline(always)]
|
||||
unsafe fn pre_dec_end(&mut self, offset: usize) -> * $raw_mut T {
|
||||
if T::IS_ZST {
|
||||
zst_shrink!(self, offset);
|
||||
self.ptr.as_ptr()
|
||||
} else {
|
||||
unsafe fn pre_dec_end(&mut self, offset: usize) -> NonNull<T> {
|
||||
if_zst!(mut self,
|
||||
// SAFETY: By our precondition, `offset` can be at most the
|
||||
// current length, so the subtraction can never overflow.
|
||||
len => unsafe {
|
||||
*len = len.unchecked_sub(offset);
|
||||
self.ptr
|
||||
},
|
||||
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
|
||||
// which is guaranteed to not overflow an `isize`. Also, the resulting pointer
|
||||
// is in bounds of `slice`, which fulfills the other requirements for `offset`.
|
||||
self.end = unsafe { self.end.sub(offset) };
|
||||
self.end
|
||||
}
|
||||
end => unsafe {
|
||||
*end = end.sub(offset);
|
||||
*end
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -131,13 +156,9 @@ macro_rules! iterator {
|
||||
fn next(&mut self) -> Option<$elem> {
|
||||
// could be implemented with slices, but this avoids bounds checks
|
||||
|
||||
// SAFETY: `assume` call is safe because slices over non-ZSTs must
|
||||
// have a non-null end pointer. The call to `next_unchecked!` is
|
||||
// SAFETY: The call to `next_unchecked!` is
|
||||
// safe since we check if the iterator is empty first.
|
||||
unsafe {
|
||||
if !<T>::IS_ZST {
|
||||
assume(!self.end.is_null());
|
||||
}
|
||||
if is_empty!(self) {
|
||||
None
|
||||
} else {
|
||||
@ -161,14 +182,10 @@ macro_rules! iterator {
|
||||
fn nth(&mut self, n: usize) -> Option<$elem> {
|
||||
if n >= len!(self) {
|
||||
// This iterator is now empty.
|
||||
if T::IS_ZST {
|
||||
zst_set_len!(self, 0);
|
||||
} else {
|
||||
// SAFETY: end can't be 0 if T isn't ZST because ptr isn't 0 and end >= ptr
|
||||
unsafe {
|
||||
self.ptr = NonNull::new_unchecked(self.end as *mut T);
|
||||
}
|
||||
}
|
||||
if_zst!(mut self,
|
||||
len => *len = 0,
|
||||
end => self.ptr = *end,
|
||||
);
|
||||
return None;
|
||||
}
|
||||
// SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs.
|
||||
@ -375,13 +392,9 @@ macro_rules! iterator {
|
||||
fn next_back(&mut self) -> Option<$elem> {
|
||||
// could be implemented with slices, but this avoids bounds checks
|
||||
|
||||
// SAFETY: `assume` call is safe because slices over non-ZSTs must
|
||||
// have a non-null end pointer. The call to `next_back_unchecked!`
|
||||
// SAFETY: The call to `next_back_unchecked!`
|
||||
// is safe since we check if the iterator is empty first.
|
||||
unsafe {
|
||||
if !<T>::IS_ZST {
|
||||
assume(!self.end.is_null());
|
||||
}
|
||||
if is_empty!(self) {
|
||||
None
|
||||
} else {
|
||||
@ -394,11 +407,10 @@ macro_rules! iterator {
|
||||
fn nth_back(&mut self, n: usize) -> Option<$elem> {
|
||||
if n >= len!(self) {
|
||||
// This iterator is now empty.
|
||||
if T::IS_ZST {
|
||||
zst_set_len!(self, 0);
|
||||
} else {
|
||||
self.end = self.ptr.as_ptr();
|
||||
}
|
||||
if_zst!(mut self,
|
||||
len => *len = 0,
|
||||
end => *end = self.ptr,
|
||||
);
|
||||
return None;
|
||||
}
|
||||
// SAFETY: We are in bounds. `pre_dec_end` does the right thing even for ZSTs.
|
||||
|
@ -1958,14 +1958,12 @@ macro_rules! atomic_int {
|
||||
$stable_from:meta,
|
||||
$stable_nand:meta,
|
||||
$const_stable:meta,
|
||||
$stable_init_const:meta,
|
||||
$diagnostic_item:meta,
|
||||
$s_int_type:literal,
|
||||
$extra_feature:expr,
|
||||
$min_fn:ident, $max_fn:ident,
|
||||
$align:expr,
|
||||
$atomic_new:expr,
|
||||
$int_type:ident $atomic_type:ident $atomic_init:ident) => {
|
||||
$int_type:ident $atomic_type:ident) => {
|
||||
/// An integer type which can be safely shared between threads.
|
||||
///
|
||||
/// This type has the same in-memory representation as the underlying
|
||||
@ -1988,15 +1986,6 @@ macro_rules! atomic_int {
|
||||
v: UnsafeCell<$int_type>,
|
||||
}
|
||||
|
||||
/// An atomic integer initialized to `0`.
|
||||
#[$stable_init_const]
|
||||
#[deprecated(
|
||||
since = "1.34.0",
|
||||
note = "the `new` function is now preferred",
|
||||
suggestion = $atomic_new,
|
||||
)]
|
||||
pub const $atomic_init: $atomic_type = $atomic_type::new(0);
|
||||
|
||||
#[$stable]
|
||||
impl Default for $atomic_type {
|
||||
#[inline]
|
||||
@ -2874,14 +2863,12 @@ atomic_int! {
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicI8"),
|
||||
"i8",
|
||||
"",
|
||||
atomic_min, atomic_max,
|
||||
1,
|
||||
"AtomicI8::new(0)",
|
||||
i8 AtomicI8 ATOMIC_I8_INIT
|
||||
i8 AtomicI8
|
||||
}
|
||||
#[cfg(target_has_atomic_load_store = "8")]
|
||||
atomic_int! {
|
||||
@ -2894,14 +2881,12 @@ atomic_int! {
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicU8"),
|
||||
"u8",
|
||||
"",
|
||||
atomic_umin, atomic_umax,
|
||||
1,
|
||||
"AtomicU8::new(0)",
|
||||
u8 AtomicU8 ATOMIC_U8_INIT
|
||||
u8 AtomicU8
|
||||
}
|
||||
#[cfg(target_has_atomic_load_store = "16")]
|
||||
atomic_int! {
|
||||
@ -2914,14 +2899,12 @@ atomic_int! {
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicI16"),
|
||||
"i16",
|
||||
"",
|
||||
atomic_min, atomic_max,
|
||||
2,
|
||||
"AtomicI16::new(0)",
|
||||
i16 AtomicI16 ATOMIC_I16_INIT
|
||||
i16 AtomicI16
|
||||
}
|
||||
#[cfg(target_has_atomic_load_store = "16")]
|
||||
atomic_int! {
|
||||
@ -2934,14 +2917,12 @@ atomic_int! {
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicU16"),
|
||||
"u16",
|
||||
"",
|
||||
atomic_umin, atomic_umax,
|
||||
2,
|
||||
"AtomicU16::new(0)",
|
||||
u16 AtomicU16 ATOMIC_U16_INIT
|
||||
u16 AtomicU16
|
||||
}
|
||||
#[cfg(target_has_atomic_load_store = "32")]
|
||||
atomic_int! {
|
||||
@ -2954,14 +2935,12 @@ atomic_int! {
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicI32"),
|
||||
"i32",
|
||||
"",
|
||||
atomic_min, atomic_max,
|
||||
4,
|
||||
"AtomicI32::new(0)",
|
||||
i32 AtomicI32 ATOMIC_I32_INIT
|
||||
i32 AtomicI32
|
||||
}
|
||||
#[cfg(target_has_atomic_load_store = "32")]
|
||||
atomic_int! {
|
||||
@ -2974,14 +2953,12 @@ atomic_int! {
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicU32"),
|
||||
"u32",
|
||||
"",
|
||||
atomic_umin, atomic_umax,
|
||||
4,
|
||||
"AtomicU32::new(0)",
|
||||
u32 AtomicU32 ATOMIC_U32_INIT
|
||||
u32 AtomicU32
|
||||
}
|
||||
#[cfg(target_has_atomic_load_store = "64")]
|
||||
atomic_int! {
|
||||
@ -2994,14 +2971,12 @@ atomic_int! {
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicI64"),
|
||||
"i64",
|
||||
"",
|
||||
atomic_min, atomic_max,
|
||||
8,
|
||||
"AtomicI64::new(0)",
|
||||
i64 AtomicI64 ATOMIC_I64_INIT
|
||||
i64 AtomicI64
|
||||
}
|
||||
#[cfg(target_has_atomic_load_store = "64")]
|
||||
atomic_int! {
|
||||
@ -3014,14 +2989,12 @@ atomic_int! {
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
stable(feature = "integer_atomics_stable", since = "1.34.0"),
|
||||
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicU64"),
|
||||
"u64",
|
||||
"",
|
||||
atomic_umin, atomic_umax,
|
||||
8,
|
||||
"AtomicU64::new(0)",
|
||||
u64 AtomicU64 ATOMIC_U64_INIT
|
||||
u64 AtomicU64
|
||||
}
|
||||
#[cfg(target_has_atomic_load_store = "128")]
|
||||
atomic_int! {
|
||||
@ -3034,14 +3007,12 @@ atomic_int! {
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicI128"),
|
||||
"i128",
|
||||
"#![feature(integer_atomics)]\n\n",
|
||||
atomic_min, atomic_max,
|
||||
16,
|
||||
"AtomicI128::new(0)",
|
||||
i128 AtomicI128 ATOMIC_I128_INIT
|
||||
i128 AtomicI128
|
||||
}
|
||||
#[cfg(target_has_atomic_load_store = "128")]
|
||||
atomic_int! {
|
||||
@ -3054,19 +3025,17 @@ atomic_int! {
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
|
||||
unstable(feature = "integer_atomics", issue = "99069"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicU128"),
|
||||
"u128",
|
||||
"#![feature(integer_atomics)]\n\n",
|
||||
atomic_umin, atomic_umax,
|
||||
16,
|
||||
"AtomicU128::new(0)",
|
||||
u128 AtomicU128 ATOMIC_U128_INIT
|
||||
u128 AtomicU128
|
||||
}
|
||||
|
||||
#[cfg(target_has_atomic_load_store = "ptr")]
|
||||
macro_rules! atomic_int_ptr_sized {
|
||||
( $($target_pointer_width:literal $align:literal)* ) => { $(
|
||||
#[cfg(target_has_atomic_load_store = "ptr")]
|
||||
#[cfg(target_pointer_width = $target_pointer_width)]
|
||||
atomic_int! {
|
||||
cfg(target_has_atomic = "ptr"),
|
||||
@ -3078,16 +3047,13 @@ macro_rules! atomic_int_ptr_sized {
|
||||
stable(feature = "atomic_from", since = "1.23.0"),
|
||||
stable(feature = "atomic_nand", since = "1.27.0"),
|
||||
rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
|
||||
stable(feature = "rust1", since = "1.0.0"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicIsize"),
|
||||
"isize",
|
||||
"",
|
||||
atomic_min, atomic_max,
|
||||
$align,
|
||||
"AtomicIsize::new(0)",
|
||||
isize AtomicIsize ATOMIC_ISIZE_INIT
|
||||
isize AtomicIsize
|
||||
}
|
||||
#[cfg(target_has_atomic_load_store = "ptr")]
|
||||
#[cfg(target_pointer_width = $target_pointer_width)]
|
||||
atomic_int! {
|
||||
cfg(target_has_atomic = "ptr"),
|
||||
@ -3099,18 +3065,37 @@ macro_rules! atomic_int_ptr_sized {
|
||||
stable(feature = "atomic_from", since = "1.23.0"),
|
||||
stable(feature = "atomic_nand", since = "1.27.0"),
|
||||
rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
|
||||
stable(feature = "rust1", since = "1.0.0"),
|
||||
cfg_attr(not(test), rustc_diagnostic_item = "AtomicUsize"),
|
||||
"usize",
|
||||
"",
|
||||
atomic_umin, atomic_umax,
|
||||
$align,
|
||||
"AtomicUsize::new(0)",
|
||||
usize AtomicUsize ATOMIC_USIZE_INIT
|
||||
usize AtomicUsize
|
||||
}
|
||||
|
||||
/// An [`AtomicIsize`] initialized to `0`.
|
||||
#[cfg(target_pointer_width = $target_pointer_width)]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[deprecated(
|
||||
since = "1.34.0",
|
||||
note = "the `new` function is now preferred",
|
||||
suggestion = "AtomicIsize::new(0)",
|
||||
)]
|
||||
pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
|
||||
|
||||
/// An [`AtomicUsize`] initialized to `0`.
|
||||
#[cfg(target_pointer_width = $target_pointer_width)]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[deprecated(
|
||||
since = "1.34.0",
|
||||
note = "the `new` function is now preferred",
|
||||
suggestion = "AtomicUsize::new(0)",
|
||||
)]
|
||||
pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
|
||||
)* };
|
||||
}
|
||||
|
||||
#[cfg(target_has_atomic_load_store = "ptr")]
|
||||
atomic_int_ptr_sized! {
|
||||
"16" 2
|
||||
"32" 4
|
||||
|
@ -1,3 +1,4 @@
|
||||
use core::cmp::Ordering;
|
||||
use core::num::NonZeroUsize;
|
||||
|
||||
/// A wrapper struct that implements `Eq` and `Ord` based on the wrapped
|
||||
@ -371,11 +372,39 @@ fn test_by_ref() {
|
||||
|
||||
#[test]
|
||||
fn test_is_sorted() {
|
||||
// Tests on integers
|
||||
assert!([1, 2, 2, 9].iter().is_sorted());
|
||||
assert!(![1, 3, 2].iter().is_sorted());
|
||||
assert!([0].iter().is_sorted());
|
||||
assert!(std::iter::empty::<i32>().is_sorted());
|
||||
assert!([0, 0].iter().is_sorted());
|
||||
assert!(core::iter::empty::<i32>().is_sorted());
|
||||
|
||||
// Tests on floats
|
||||
assert!([1.0f32, 2.0, 2.0, 9.0].iter().is_sorted());
|
||||
assert!(![1.0f32, 3.0f32, 2.0f32].iter().is_sorted());
|
||||
assert!([0.0f32].iter().is_sorted());
|
||||
assert!([0.0f32, 0.0f32].iter().is_sorted());
|
||||
// Test cases with NaNs
|
||||
assert!([f32::NAN].iter().is_sorted());
|
||||
assert!(![f32::NAN, f32::NAN].iter().is_sorted());
|
||||
assert!(![0.0, 1.0, f32::NAN].iter().is_sorted());
|
||||
// Tests from <https://github.com/rust-lang/rust/pull/55045#discussion_r229689884>
|
||||
assert!(![f32::NAN, f32::NAN, f32::NAN].iter().is_sorted());
|
||||
assert!(![1.0, f32::NAN, 2.0].iter().is_sorted());
|
||||
assert!(![2.0, f32::NAN, 1.0].iter().is_sorted());
|
||||
assert!(![2.0, f32::NAN, 1.0, 7.0].iter().is_sorted());
|
||||
assert!(![2.0, f32::NAN, 1.0, 0.0].iter().is_sorted());
|
||||
assert!(![-f32::NAN, -1.0, 0.0, 1.0, f32::NAN].iter().is_sorted());
|
||||
assert!(![f32::NAN, -f32::NAN, -1.0, 0.0, 1.0].iter().is_sorted());
|
||||
assert!(![1.0, f32::NAN, -f32::NAN, -1.0, 0.0].iter().is_sorted());
|
||||
assert!(![0.0, 1.0, f32::NAN, -f32::NAN, -1.0].iter().is_sorted());
|
||||
assert!(![-1.0, 0.0, 1.0, f32::NAN, -f32::NAN].iter().is_sorted());
|
||||
|
||||
// Tests for is_sorted_by
|
||||
assert!(![6, 2, 8, 5, 1, -60, 1337].iter().is_sorted());
|
||||
assert!([6, 2, 8, 5, 1, -60, 1337].iter().is_sorted_by(|_, _| Some(Ordering::Less)));
|
||||
|
||||
// Tests for is_sorted_by_key
|
||||
assert!([-2, -1, 0, 3].iter().is_sorted());
|
||||
assert!(![-2i32, -1, 0, 3].iter().is_sorted_by_key(|n| n.abs()));
|
||||
assert!(!["c", "bb", "aaa"].iter().is_sorted());
|
||||
|
@ -93,7 +93,7 @@
|
||||
#![feature(const_option)]
|
||||
#![feature(const_option_ext)]
|
||||
#![feature(const_result)]
|
||||
#![feature(integer_atomics)]
|
||||
#![cfg_attr(target_has_atomic = "128", feature(integer_atomics))]
|
||||
#![feature(int_roundings)]
|
||||
#![feature(slice_group_by)]
|
||||
#![feature(split_array)]
|
||||
|
@ -2278,11 +2278,39 @@ fn test_copy_within_panics_src_out_of_bounds() {
|
||||
fn test_is_sorted() {
|
||||
let empty: [i32; 0] = [];
|
||||
|
||||
// Tests on integers
|
||||
assert!([1, 2, 2, 9].is_sorted());
|
||||
assert!(![1, 3, 2].is_sorted());
|
||||
assert!([0].is_sorted());
|
||||
assert!([0, 0].is_sorted());
|
||||
assert!(empty.is_sorted());
|
||||
|
||||
// Tests on floats
|
||||
assert!([1.0f32, 2.0, 2.0, 9.0].is_sorted());
|
||||
assert!(![1.0f32, 3.0f32, 2.0f32].is_sorted());
|
||||
assert!([0.0f32].is_sorted());
|
||||
assert!([0.0f32, 0.0f32].is_sorted());
|
||||
// Test cases with NaNs
|
||||
assert!([f32::NAN].is_sorted());
|
||||
assert!(![f32::NAN, f32::NAN].is_sorted());
|
||||
assert!(![0.0, 1.0, f32::NAN].is_sorted());
|
||||
// Tests from <https://github.com/rust-lang/rust/pull/55045#discussion_r229689884>
|
||||
assert!(![f32::NAN, f32::NAN, f32::NAN].is_sorted());
|
||||
assert!(![1.0, f32::NAN, 2.0].is_sorted());
|
||||
assert!(![2.0, f32::NAN, 1.0].is_sorted());
|
||||
assert!(![2.0, f32::NAN, 1.0, 7.0].is_sorted());
|
||||
assert!(![2.0, f32::NAN, 1.0, 0.0].is_sorted());
|
||||
assert!(![-f32::NAN, -1.0, 0.0, 1.0, f32::NAN].is_sorted());
|
||||
assert!(![f32::NAN, -f32::NAN, -1.0, 0.0, 1.0].is_sorted());
|
||||
assert!(![1.0, f32::NAN, -f32::NAN, -1.0, 0.0].is_sorted());
|
||||
assert!(![0.0, 1.0, f32::NAN, -f32::NAN, -1.0].is_sorted());
|
||||
assert!(![-1.0, 0.0, 1.0, f32::NAN, -f32::NAN].is_sorted());
|
||||
|
||||
// Tests for is_sorted_by
|
||||
assert!(![6, 2, 8, 5, 1, -60, 1337].is_sorted());
|
||||
assert!([6, 2, 8, 5, 1, -60, 1337].is_sorted_by(|_, _| Some(Ordering::Less)));
|
||||
|
||||
// Tests for is_sorted_by_key
|
||||
assert!([-2, -1, 0, 3].is_sorted());
|
||||
assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
|
||||
assert!(!["c", "bb", "aaa"].is_sorted());
|
||||
|
@ -2608,9 +2608,27 @@ impl Path {
|
||||
}
|
||||
|
||||
fn _with_extension(&self, extension: &OsStr) -> PathBuf {
|
||||
let mut buf = self.to_path_buf();
|
||||
buf.set_extension(extension);
|
||||
buf
|
||||
let self_len = self.as_os_str().len();
|
||||
let self_bytes = self.as_os_str().as_os_str_bytes();
|
||||
|
||||
let (new_capacity, slice_to_copy) = match self.extension() {
|
||||
None => {
|
||||
// Enough capacity for the extension and the dot
|
||||
let capacity = self_len + extension.len() + 1;
|
||||
let whole_path = self_bytes.iter();
|
||||
(capacity, whole_path)
|
||||
}
|
||||
Some(previous_extension) => {
|
||||
let capacity = self_len + extension.len() - previous_extension.len();
|
||||
let path_till_dot = self_bytes[..self_len - previous_extension.len()].iter();
|
||||
(capacity, path_till_dot)
|
||||
}
|
||||
};
|
||||
|
||||
let mut new_path = PathBuf::with_capacity(new_capacity);
|
||||
new_path.as_mut_vec().extend(slice_to_copy);
|
||||
new_path.set_extension(extension);
|
||||
new_path
|
||||
}
|
||||
|
||||
/// Produces an iterator over the [`Component`]s of the path.
|
||||
|
@ -1183,7 +1183,7 @@ pub fn test_prefix_ext() {
|
||||
#[test]
|
||||
pub fn test_push() {
|
||||
macro_rules! tp (
|
||||
($path:expr, $push:expr, $expected:expr) => ( {
|
||||
($path:expr, $push:expr, $expected:expr) => ({
|
||||
let mut actual = PathBuf::from($path);
|
||||
actual.push($push);
|
||||
assert!(actual.to_str() == Some($expected),
|
||||
@ -1281,7 +1281,7 @@ pub fn test_push() {
|
||||
#[test]
|
||||
pub fn test_pop() {
|
||||
macro_rules! tp (
|
||||
($path:expr, $expected:expr, $output:expr) => ( {
|
||||
($path:expr, $expected:expr, $output:expr) => ({
|
||||
let mut actual = PathBuf::from($path);
|
||||
let output = actual.pop();
|
||||
assert!(actual.to_str() == Some($expected) && output == $output,
|
||||
@ -1335,7 +1335,7 @@ pub fn test_pop() {
|
||||
#[test]
|
||||
pub fn test_set_file_name() {
|
||||
macro_rules! tfn (
|
||||
($path:expr, $file:expr, $expected:expr) => ( {
|
||||
($path:expr, $file:expr, $expected:expr) => ({
|
||||
let mut p = PathBuf::from($path);
|
||||
p.set_file_name($file);
|
||||
assert!(p.to_str() == Some($expected),
|
||||
@ -1369,7 +1369,7 @@ pub fn test_set_file_name() {
|
||||
#[test]
|
||||
pub fn test_set_extension() {
|
||||
macro_rules! tfe (
|
||||
($path:expr, $ext:expr, $expected:expr, $output:expr) => ( {
|
||||
($path:expr, $ext:expr, $expected:expr, $output:expr) => ({
|
||||
let mut p = PathBuf::from($path);
|
||||
let output = p.set_extension($ext);
|
||||
assert!(p.to_str() == Some($expected) && output == $output,
|
||||
@ -1394,6 +1394,46 @@ pub fn test_set_extension() {
|
||||
tfe!("/", "foo", "/", false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_with_extension() {
|
||||
macro_rules! twe (
|
||||
($input:expr, $extension:expr, $expected:expr) => ({
|
||||
let input = Path::new($input);
|
||||
let output = input.with_extension($extension);
|
||||
|
||||
assert!(
|
||||
output.to_str() == Some($expected),
|
||||
"calling Path::new({:?}).with_extension({:?}): Expected {:?}, got {:?}",
|
||||
$input, $extension, $expected, output,
|
||||
);
|
||||
});
|
||||
);
|
||||
|
||||
twe!("foo", "txt", "foo.txt");
|
||||
twe!("foo.bar", "txt", "foo.txt");
|
||||
twe!("foo.bar.baz", "txt", "foo.bar.txt");
|
||||
twe!(".test", "txt", ".test.txt");
|
||||
twe!("foo.txt", "", "foo");
|
||||
twe!("foo", "", "foo");
|
||||
twe!("", "foo", "");
|
||||
twe!(".", "foo", ".");
|
||||
twe!("foo/", "bar", "foo.bar");
|
||||
twe!("foo/.", "bar", "foo.bar");
|
||||
twe!("..", "foo", "..");
|
||||
twe!("foo/..", "bar", "foo/..");
|
||||
twe!("/", "foo", "/");
|
||||
|
||||
// New extension is smaller than file name
|
||||
twe!("aaa_aaa_aaa", "bbb_bbb", "aaa_aaa_aaa.bbb_bbb");
|
||||
// New extension is greater than file name
|
||||
twe!("bbb_bbb", "aaa_aaa_aaa", "bbb_bbb.aaa_aaa_aaa");
|
||||
|
||||
// New extension is smaller than previous extension
|
||||
twe!("ccc.aaa_aaa_aaa", "bbb_bbb", "ccc.bbb_bbb");
|
||||
// New extension is greater than previous extension
|
||||
twe!("ccc.bbb_bbb", "aaa_aaa_aaa", "ccc.aaa_aaa_aaa");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_eq_receivers() {
|
||||
use crate::borrow::Cow;
|
||||
@ -1669,7 +1709,7 @@ fn into_rc() {
|
||||
#[test]
|
||||
fn test_ord() {
|
||||
macro_rules! ord(
|
||||
($ord:ident, $left:expr, $right:expr) => ( {
|
||||
($ord:ident, $left:expr, $right:expr) => ({
|
||||
use core::cmp::Ordering;
|
||||
|
||||
let left = Path::new($left);
|
||||
|
@ -66,26 +66,32 @@ impl Waker {
|
||||
/// Attempts to find another thread's entry, select the operation, and wake it up.
|
||||
#[inline]
|
||||
pub(crate) fn try_select(&mut self) -> Option<Entry> {
|
||||
self.selectors
|
||||
.iter()
|
||||
.position(|selector| {
|
||||
// Does the entry belong to a different thread?
|
||||
selector.cx.thread_id() != current_thread_id()
|
||||
&& selector // Try selecting this operation.
|
||||
.cx
|
||||
.try_select(Selected::Operation(selector.oper))
|
||||
.is_ok()
|
||||
&& {
|
||||
// Provide the packet.
|
||||
selector.cx.store_packet(selector.packet);
|
||||
// Wake the thread up.
|
||||
selector.cx.unpark();
|
||||
true
|
||||
}
|
||||
})
|
||||
// Remove the entry from the queue to keep it clean and improve
|
||||
// performance.
|
||||
.map(|pos| self.selectors.remove(pos))
|
||||
if self.selectors.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let thread_id = current_thread_id();
|
||||
|
||||
self.selectors
|
||||
.iter()
|
||||
.position(|selector| {
|
||||
// Does the entry belong to a different thread?
|
||||
selector.cx.thread_id() != thread_id
|
||||
&& selector // Try selecting this operation.
|
||||
.cx
|
||||
.try_select(Selected::Operation(selector.oper))
|
||||
.is_ok()
|
||||
&& {
|
||||
// Provide the packet.
|
||||
selector.cx.store_packet(selector.packet);
|
||||
// Wake the thread up.
|
||||
selector.cx.unpark();
|
||||
true
|
||||
}
|
||||
})
|
||||
// Remove the entry from the queue to keep it clean and improve
|
||||
// performance.
|
||||
.map(|pos| self.selectors.remove(pos))
|
||||
}
|
||||
}
|
||||
|
||||
/// Notifies all operations waiting to be ready.
|
||||
|
@ -4,29 +4,4 @@
|
||||
#![unstable(feature = "thread_local_internals", issue = "none")]
|
||||
#![cfg(target_thread_local)]
|
||||
|
||||
// Using a per-thread list avoids the problems in synchronizing global state.
|
||||
#[thread_local]
|
||||
static mut DESTRUCTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new();
|
||||
|
||||
// Ensure this can never be inlined because otherwise this may break in dylibs.
|
||||
// See #44391.
|
||||
#[inline(never)]
|
||||
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
|
||||
DESTRUCTORS.push((t, dtor));
|
||||
}
|
||||
|
||||
#[inline(never)] // See comment above
|
||||
/// Runs destructors. This should not be called until thread exit.
|
||||
pub unsafe fn run_keyless_dtors() {
|
||||
// Drop all the destructors.
|
||||
//
|
||||
// Note: While this is potentially an infinite loop, it *should* be
|
||||
// the case that this loop always terminates because we provide the
|
||||
// guarantee that a TLS key cannot be set after it is flagged for
|
||||
// destruction.
|
||||
while let Some((ptr, dtor)) = DESTRUCTORS.pop() {
|
||||
(dtor)(ptr);
|
||||
}
|
||||
// We're done so free the memory.
|
||||
DESTRUCTORS = Vec::new();
|
||||
}
|
||||
pub use super::thread_local_key::register_keyless_dtor as register_dtor;
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::ptr;
|
||||
use crate::sync::atomic::{
|
||||
AtomicPtr, AtomicU32,
|
||||
AtomicBool, AtomicPtr, AtomicU32,
|
||||
Ordering::{AcqRel, Acquire, Relaxed, Release},
|
||||
};
|
||||
use crate::sys::c;
|
||||
@ -9,6 +9,41 @@ use crate::sys::c;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// An optimization hint. The compiler is often smart enough to know if an atomic
|
||||
/// is never set and can remove dead code based on that fact.
|
||||
static HAS_DTORS: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
// Using a per-thread list avoids the problems in synchronizing global state.
|
||||
#[thread_local]
|
||||
#[cfg(target_thread_local)]
|
||||
static mut DESTRUCTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new();
|
||||
|
||||
// Ensure this can never be inlined because otherwise this may break in dylibs.
|
||||
// See #44391.
|
||||
#[inline(never)]
|
||||
#[cfg(target_thread_local)]
|
||||
pub unsafe fn register_keyless_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
|
||||
DESTRUCTORS.push((t, dtor));
|
||||
HAS_DTORS.store(true, Relaxed);
|
||||
}
|
||||
|
||||
#[inline(never)] // See comment above
|
||||
#[cfg(target_thread_local)]
|
||||
/// Runs destructors. This should not be called until thread exit.
|
||||
unsafe fn run_keyless_dtors() {
|
||||
// Drop all the destructors.
|
||||
//
|
||||
// Note: While this is potentially an infinite loop, it *should* be
|
||||
// the case that this loop always terminates because we provide the
|
||||
// guarantee that a TLS key cannot be set after it is flagged for
|
||||
// destruction.
|
||||
while let Some((ptr, dtor)) = DESTRUCTORS.pop() {
|
||||
(dtor)(ptr);
|
||||
}
|
||||
// We're done so free the memory.
|
||||
DESTRUCTORS = Vec::new();
|
||||
}
|
||||
|
||||
type Key = c::DWORD;
|
||||
type Dtor = unsafe extern "C" fn(*mut u8);
|
||||
|
||||
@ -156,6 +191,8 @@ static DTORS: AtomicPtr<StaticKey> = AtomicPtr::new(ptr::null_mut());
|
||||
/// Should only be called once per key, otherwise loops or breaks may occur in
|
||||
/// the linked list.
|
||||
unsafe fn register_dtor(key: &'static StaticKey) {
|
||||
// Ensure this is never run when native thread locals are available.
|
||||
assert_eq!(false, cfg!(target_thread_local));
|
||||
let this = <*const StaticKey>::cast_mut(key);
|
||||
// Use acquire ordering to pass along the changes done by the previously
|
||||
// registered keys when we store the new head with release ordering.
|
||||
@ -167,6 +204,7 @@ unsafe fn register_dtor(key: &'static StaticKey) {
|
||||
Err(new) => head = new,
|
||||
}
|
||||
}
|
||||
HAS_DTORS.store(true, Release);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
@ -240,10 +278,14 @@ pub static p_thread_callback: unsafe extern "system" fn(c::LPVOID, c::DWORD, c::
|
||||
|
||||
#[allow(dead_code, unused_variables)]
|
||||
unsafe extern "system" fn on_tls_callback(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID) {
|
||||
if !HAS_DTORS.load(Acquire) {
|
||||
return;
|
||||
}
|
||||
if dwReason == c::DLL_THREAD_DETACH || dwReason == c::DLL_PROCESS_DETACH {
|
||||
#[cfg(not(target_thread_local))]
|
||||
run_dtors();
|
||||
#[cfg(target_thread_local)]
|
||||
super::thread_local_dtor::run_keyless_dtors();
|
||||
run_keyless_dtors();
|
||||
}
|
||||
|
||||
// See comments above for what this is doing. Note that we don't need this
|
||||
|
@ -1,3 +1,7 @@
|
||||
// This file only tests the thread local key fallback.
|
||||
// Windows targets with native thread local support do not use this.
|
||||
#![cfg(not(target_thread_local))]
|
||||
|
||||
use super::StaticKey;
|
||||
use crate::ptr;
|
||||
|
||||
|
@ -2087,10 +2087,11 @@ impl Step for ErrorIndex {
|
||||
let mut tool = tool::ErrorIndex::command(builder);
|
||||
tool.arg("markdown").arg(&output);
|
||||
|
||||
let _guard =
|
||||
let guard =
|
||||
builder.msg(Kind::Test, compiler.stage, "error-index", compiler.host, compiler.host);
|
||||
let _time = util::timeit(&builder);
|
||||
builder.run_quiet(&mut tool);
|
||||
drop(guard);
|
||||
// The tests themselves need to link to std, so make sure it is
|
||||
// available.
|
||||
builder.ensure(compile::Std::new(compiler, compiler.host));
|
||||
|
@ -260,6 +260,10 @@ The valid types of print values are:
|
||||
This returns rustc's minimum supported deployment target if no `*_DEPLOYMENT_TARGET` variable
|
||||
is present in the environment, or otherwise returns the variable's parsed value.
|
||||
|
||||
A filepath may optionally be specified for each requested information kind, in
|
||||
the format `--print KIND=PATH`, just like for `--emit`. When a path is
|
||||
specified, information will be written there instead of to stdout.
|
||||
|
||||
[conditional compilation]: ../reference/conditional-compilation.html
|
||||
[deployment target]: https://developer.apple.com/library/archive/documentation/DeveloperTools/Conceptual/cross_development/Configuring/configuring.html
|
||||
|
||||
|
@ -37,10 +37,9 @@ options.
|
||||
### Indentation and line width
|
||||
|
||||
* Use spaces, not tabs.
|
||||
* Each level of indentation must be four spaces (that is, all indentation
|
||||
outside of string literals and comments must be a multiple of four).
|
||||
* Each level of indentation must be 4 spaces (that is, all indentation
|
||||
outside of string literals and comments must be a multiple of 4).
|
||||
* The maximum width for a line is 100 characters.
|
||||
* A tool may choose to make some of these configurable.
|
||||
|
||||
#### Block indent
|
||||
|
||||
@ -63,7 +62,8 @@ example) and less rightward drift.
|
||||
|
||||
### Trailing commas
|
||||
|
||||
Lists should have a trailing comma when followed by a newline:
|
||||
In comma-separated lists of any kind, use a trailing comma when followed by a
|
||||
newline:
|
||||
|
||||
```rust
|
||||
function_call(
|
||||
@ -99,8 +99,6 @@ fn bar() {}
|
||||
fn baz() {}
|
||||
```
|
||||
|
||||
Formatting tools may wish to make the bounds on blank lines configurable.
|
||||
|
||||
### [Module-level items](items.md)
|
||||
### [Statements](statements.md)
|
||||
### [Expressions](expressions.md)
|
||||
@ -114,17 +112,17 @@ formatter might skip formatting of comments.
|
||||
|
||||
Prefer line comments (`//`) to block comments (`/* ... */`).
|
||||
|
||||
When using line comments there should be a single space after the opening sigil.
|
||||
When using line comments, put a single space after the opening sigil.
|
||||
|
||||
When using single-line block comments there should be a single space after the
|
||||
opening sigil and before the closing sigil. Multi-line block comments should
|
||||
have a newline after the opening sigil and before the closing sigil.
|
||||
When using single-line block comments, put a single space after the opening
|
||||
sigil and before the closing sigil. For multi-line block comments, put a
|
||||
newline after the opening sigil, and a newline before the closing sigil.
|
||||
|
||||
Prefer to put a comment on its own line. Where a comment follows code, there
|
||||
should be a single space before it. Where a block comment is inline, there
|
||||
should be surrounding whitespace as if it were an identifier or keyword. There
|
||||
should be no trailing whitespace after a comment or at the end of any line in a
|
||||
multi-line comment. Examples:
|
||||
Prefer to put a comment on its own line. Where a comment follows code, put a
|
||||
single space before it. Where a block comment appears inline, use surrounding
|
||||
whitespace as if it were an identifier or keyword. Do not include trailing
|
||||
whitespace after a comment or at the end of any line in a multi-line comment.
|
||||
Examples:
|
||||
|
||||
```rust
|
||||
// A comment on an item.
|
||||
@ -173,7 +171,7 @@ Prefer line comments (`///`) to block comments (`/** ... */`).
|
||||
Prefer outer doc comments (`///` or `/** ... */`), only use inner doc comments
|
||||
(`//!` and `/*! ... */`) to write module-level or crate-level documentation.
|
||||
|
||||
Doc comments should come before attributes.
|
||||
Put doc comments before attributes.
|
||||
|
||||
### Attributes
|
||||
|
||||
@ -198,18 +196,20 @@ struct CRepr {
|
||||
}
|
||||
```
|
||||
|
||||
For attributes with an equal sign, there should be a single space before and
|
||||
after the `=`, e.g., `#[foo = 42]`.
|
||||
For attributes with an equal sign, put a single space before and after the `=`,
|
||||
e.g., `#[foo = 42]`.
|
||||
|
||||
There must only be a single `derive` attribute. Note for tool authors: if
|
||||
combining multiple `derive` attributes into a single attribute, the ordering of
|
||||
the derived names should be preserved. E.g., `#[derive(bar)] #[derive(foo)]
|
||||
struct Baz;` should be formatted to `#[derive(bar, foo)] struct Baz;`.
|
||||
the derived names must generally be preserved for correctness:
|
||||
`#[derive(Foo)] #[derive(Bar)] struct Baz;` must be formatted to
|
||||
`#[derive(Foo, Bar)] struct Baz;`.
|
||||
|
||||
### *small* items
|
||||
|
||||
In many places in this guide we specify that a formatter may format an item
|
||||
differently if it is *small*, for example struct literals:
|
||||
In many places in this guide we specify formatting that depends on a code
|
||||
construct being *small*. For example, single-line vs multi-line struct
|
||||
literals:
|
||||
|
||||
```rust
|
||||
// Normal formatting
|
||||
@ -218,7 +218,7 @@ Foo {
|
||||
f2: another_expression(),
|
||||
}
|
||||
|
||||
// *small* formatting
|
||||
// "small" formatting
|
||||
Foo { f1, f2 }
|
||||
```
|
||||
|
||||
@ -231,10 +231,6 @@ complexity of an item (for example, that all components must be simple names,
|
||||
not more complex sub-expressions). For more discussion on suitable heuristics,
|
||||
see [this issue](https://github.com/rust-lang-nursery/fmt-rfcs/issues/47).
|
||||
|
||||
Tools should give the user an option to ignore such heuristics and always use
|
||||
the normal formatting.
|
||||
|
||||
|
||||
## [Non-formatting conventions](advice.md)
|
||||
|
||||
## [Cargo.toml conventions](cargo.md)
|
||||
|
@ -2,10 +2,13 @@
|
||||
|
||||
### Blocks
|
||||
|
||||
A block expression should have a newline after the initial `{` and before the
|
||||
terminal `}`. Any qualifier before the block (e.g., `unsafe`) should always be
|
||||
on the same line as the opening brace, and separated with a single space. The
|
||||
contents of the block should be block indented:
|
||||
A block expression must have a newline after the initial `{` and before the
|
||||
terminal `}`, unless it qualifies to be written as a single line based on
|
||||
another style rule.
|
||||
|
||||
A keyword before the block (such as `unsafe` or `async`) must be on the same
|
||||
line as the opening brace, with a single space between the keyword and the
|
||||
opening brace. Indent the contents of the block.
|
||||
|
||||
```rust
|
||||
fn block_as_stmt() {
|
||||
@ -40,7 +43,7 @@ fn unsafe_block_as_stmt() {
|
||||
}
|
||||
```
|
||||
|
||||
If a block has an attribute, it should be on its own line:
|
||||
If a block has an attribute, put it on its own line before the block:
|
||||
|
||||
```rust
|
||||
fn block_as_stmt() {
|
||||
@ -54,18 +57,18 @@ fn block_as_stmt() {
|
||||
}
|
||||
```
|
||||
|
||||
Avoid writing comments on the same line as the braces.
|
||||
Avoid writing comments on the same lines as either of the braces.
|
||||
|
||||
An empty block should be written as `{}`.
|
||||
Write an empty block as `{}`.
|
||||
|
||||
A block may be written on a single line if:
|
||||
Write a block on a single line if:
|
||||
|
||||
* it is either used in expression position (not statement position) or is an
|
||||
unsafe block in statement position
|
||||
* contains a single-line expression and no statements
|
||||
* contains no comments
|
||||
unsafe block in statement position,
|
||||
* it contains a single-line expression and no statements, and
|
||||
* it contains no comments
|
||||
|
||||
A single line block should have spaces after the opening brace and before the
|
||||
For a single-line block, put spaces after the opening brace and before the
|
||||
closing brace.
|
||||
|
||||
Examples:
|
||||
@ -117,14 +120,14 @@ fn main() {
|
||||
### Closures
|
||||
|
||||
Don't put any extra spaces before the first `|` (unless the closure is prefixed
|
||||
by `move`); put a space between the second `|` and the expression of the
|
||||
closure. Between the `|`s, you should use function definition syntax, however,
|
||||
elide types where possible.
|
||||
by a keyword such as `move`); put a space between the second `|` and the
|
||||
expression of the closure. Between the `|`s, use function definition syntax,
|
||||
but elide types where possible.
|
||||
|
||||
Use closures without the enclosing `{}`, if possible. Add the `{}` when you have
|
||||
a return type, when there are statements, there are comments in the body, or the
|
||||
body expression spans multiple lines and is a control-flow expression. If using
|
||||
braces, follow the rules above for blocks. Examples:
|
||||
a return type, when there are statements, when there are comments inside the
|
||||
closure, or when the body expression is a control-flow expression that spans
|
||||
multiple lines. If using braces, follow the rules above for blocks. Examples:
|
||||
|
||||
```rust
|
||||
|arg1, arg2| expr
|
||||
@ -155,13 +158,14 @@ move |arg1: i32, arg2: i32| -> i32 {
|
||||
|
||||
### Struct literals
|
||||
|
||||
If a struct literal is *small* it may be formatted on a single line. If not,
|
||||
each field should be on it's own, block-indented line. There should be a
|
||||
trailing comma in the multi-line form only. There should be a space after the
|
||||
colon only.
|
||||
If a struct literal is *small*, format it on a single line, and do not use a
|
||||
trailing comma. If not, split it across multiple lines, with each field on its
|
||||
own block-indented line, and use a trailing comma.
|
||||
|
||||
There should be a space before the opening brace. In the single-line form there
|
||||
should be spaces after the opening brace and before the closing brace.
|
||||
For each `field: value` entry, put a space after the colon only.
|
||||
|
||||
Put a space before the opening brace. In the single-line form, put spaces after
|
||||
the opening brace and before the closing brace.
|
||||
|
||||
```rust
|
||||
Foo { field1, field2: 0 }
|
||||
@ -172,19 +176,25 @@ let f = Foo {
|
||||
```
|
||||
|
||||
Functional record update syntax is treated like a field, but it must never have
|
||||
a trailing comma. There should be no space after `..`.
|
||||
a trailing comma. Do not put a space after `..`.
|
||||
|
||||
```rust
|
||||
let f = Foo {
|
||||
field1,
|
||||
..an_expr
|
||||
};
|
||||
```
|
||||
|
||||
|
||||
### Tuple literals
|
||||
|
||||
Use a single-line form where possible. There should not be spaces around the
|
||||
parentheses. Where a single-line form is not possible, each element of the tuple
|
||||
should be on its own block-indented line and there should be a trailing comma.
|
||||
Use a single-line form where possible. Do not put spaces between the opening
|
||||
parenthesis and the first element, or between the last element and the closing
|
||||
parenthesis. Separate elements with a comma followed by a space.
|
||||
|
||||
Where a single-line form is not possible, write the tuple across
|
||||
multiple lines, with each element of the tuple on its own block-indented line,
|
||||
and use a trailing comma.
|
||||
|
||||
```rust
|
||||
(a, b, c)
|
||||
@ -198,14 +208,23 @@ let x = (
|
||||
|
||||
### Tuple struct literals
|
||||
|
||||
There should be no space between the identifier and the opening parenthesis.
|
||||
Otherwise, follow the rules for tuple literals, e.g., `Foo(a, b)`.
|
||||
Do not put space between the identifier and the opening parenthesis. Otherwise,
|
||||
follow the rules for tuple literals:
|
||||
|
||||
```rust
|
||||
Foo(a, b, c)
|
||||
|
||||
let x = Foo(
|
||||
a_long_expr,
|
||||
another_very_long_expr,
|
||||
);
|
||||
```
|
||||
|
||||
|
||||
### Enum literals
|
||||
|
||||
Follow the formatting rules for the various struct literals. Prefer using the
|
||||
name of the enum as a qualifying name, unless the enum is in the prelude. E.g.,
|
||||
name of the enum as a qualifying name, unless the enum is in the prelude:
|
||||
|
||||
```rust
|
||||
Foo::Bar(a, b)
|
||||
@ -219,24 +238,29 @@ Ok(an_expr)
|
||||
|
||||
### Array literals
|
||||
|
||||
For simple array literals, avoid line breaking, no spaces around square
|
||||
brackets, contents of the array should be separated by commas and spaces. If
|
||||
using the repeating initialiser, there should be a space after the semicolon
|
||||
only. Apply the same rules if using the `vec!` or similar macros (always use
|
||||
square brackets here). Examples:
|
||||
Write small array literals on a single line. Do not put spaces between the opening
|
||||
square bracket and the first element, or between the last element and the closing
|
||||
square bracket. Separate elements with a comma followed by a space.
|
||||
|
||||
If using the repeating initializer, put a space after the semicolon
|
||||
only.
|
||||
|
||||
Apply the same rules if using `vec!` or similar array-like macros; always use
|
||||
square brackets with such macros. Examples:
|
||||
|
||||
```rust
|
||||
fn main() {
|
||||
[1, 2, 3];
|
||||
vec![a, b, c, d];
|
||||
let x = [1, 2, 3];
|
||||
let y = vec![a, b, c, d];
|
||||
let a = [42; 10];
|
||||
}
|
||||
```
|
||||
|
||||
If a line must be broken, prefer breaking only after the `;`, if possible.
|
||||
Otherwise, follow the rules below for function calls. In any case, the contents
|
||||
of the initialiser should be block indented and there should be line breaks
|
||||
after the opening bracket and before the closing bracket:
|
||||
For arrays that have to be broken across lines, if using the repeating
|
||||
initializer, break after the `;`, not before. Otherwise, follow the rules below
|
||||
for function calls. In any case, block-indent the contents of the initializer,
|
||||
and put line breaks after the opening square bracket and before the closing
|
||||
square bracket:
|
||||
|
||||
```rust
|
||||
fn main() {
|
||||
@ -255,11 +279,12 @@ fn main() {
|
||||
|
||||
### Array accesses, indexing, and slicing.
|
||||
|
||||
No spaces around the square brackets, avoid breaking lines if possible, never
|
||||
break a line between the target expression and the opening bracket. If the
|
||||
indexing expression covers multiple lines, then it should be block indented and
|
||||
there should be newlines after the opening brackets and before the closing
|
||||
bracket. However, this should be avoided where possible.
|
||||
Don't put spaces around the square brackets. Avoid breaking lines if possible.
|
||||
Never break a line between the target expression and the opening square
|
||||
bracket. If the indexing expression must be broken onto a subsequent line, or
|
||||
spans multiple lines itself, then block-indent the indexing expression, and put
|
||||
newlines after the opening square bracket and before the closing square
|
||||
bracket:
|
||||
|
||||
Examples:
|
||||
|
||||
@ -291,7 +316,7 @@ if you have `t: &T`, and `u: U`, prefer `*t op u` to `t op &u`. In general,
|
||||
within expressions, prefer dereferencing to taking references, unless necessary
|
||||
(e.g. to avoid an unnecessarily expensive operation).
|
||||
|
||||
Use parentheses liberally, do not necessarily elide them due to precedence.
|
||||
Use parentheses liberally; do not necessarily elide them due to precedence.
|
||||
Tools should not automatically insert or remove parentheses. Do not use spaces
|
||||
to indicate precedence.
|
||||
|
||||
@ -353,10 +378,10 @@ foo(x, y, z)
|
||||
#### Multi-line calls
|
||||
|
||||
If the function call is not *small*, it would otherwise over-run the max width,
|
||||
or any argument or the callee is multi-line, then the call should be formatted
|
||||
across multiple lines. In this case, each argument should be on it's own block-
|
||||
indented line, there should be a newline after the opening parenthesis and
|
||||
before the closing parenthesis, and there should be a trailing comma. E.g.,
|
||||
or any argument or the callee is multi-line, then format the call across
|
||||
multiple lines. In this case, put each argument on its own block-indented line,
|
||||
break after the opening parenthesis and before the closing parenthesis,
|
||||
and use a trailing comma:
|
||||
|
||||
```rust
|
||||
a_function_call(
|
||||
@ -379,17 +404,18 @@ x.foo().bar().baz(x, y, z);
|
||||
|
||||
### Macro uses
|
||||
|
||||
Macros which can be parsed like other constructs should be formatted like those
|
||||
If a macro can be parsed like other constructs, format it like those
|
||||
constructs. For example, a macro use `foo!(a, b, c)` can be parsed like a
|
||||
function call (ignoring the `!`), therefore it should be formatted following the
|
||||
rules for function calls.
|
||||
function call (ignoring the `!`), so format it using the rules for function
|
||||
calls.
|
||||
|
||||
#### Special case macros
|
||||
|
||||
Macros which take a format string and where all other arguments are *small* may
|
||||
be formatted with arguments before and after the format string on a single line
|
||||
and the format string on its own line, rather than putting each argument on its
|
||||
own line. For example,
|
||||
For macros which take a format string, if all other arguments are *small*,
|
||||
format the arguments before the format string on a single line if they fit, and
|
||||
format the arguments after the format string on a single line if they fit, with
|
||||
the format string on its own line. If the arguments are not small or do not
|
||||
fit, put each on its own line as with a function. For example:
|
||||
|
||||
```rust
|
||||
println!(
|
||||
@ -416,13 +442,13 @@ let cstr = "Hi\0" as *const str as *const [u8] as *const std::os::raw::c_char;
|
||||
|
||||
### Chains of fields and method calls
|
||||
|
||||
A chain is a sequence of field accesses and/or method calls. A chain may also
|
||||
include the try operator ('?'). E.g., `a.b.c().d` or `foo?.bar().baz?`.
|
||||
A chain is a sequence of field accesses, method calls, and/or uses of the try
|
||||
operator `?`. E.g., `a.b.c().d` or `foo?.bar().baz?`.
|
||||
|
||||
Prefer formatting on one line if possible, and the chain is *small*. If
|
||||
formatting on multiple lines, each field access or method call in the chain
|
||||
should be on its own line with the line-break before the `.` and after any `?`.
|
||||
Each line should be block-indented. E.g.,
|
||||
Format the chain on one line if it is "small" and otherwise possible to do so.
|
||||
If formatting on multiple lines, put each field access or method call in the
|
||||
chain on its own line, with the line-break before the `.` and after any `?`.
|
||||
Block-indent each subsequent line:
|
||||
|
||||
```rust
|
||||
let foo = bar
|
||||
@ -431,13 +457,16 @@ let foo = bar
|
||||
```
|
||||
|
||||
If the length of the last line of the first element plus its indentation is
|
||||
less than or equal to the indentation of the second line (and there is space),
|
||||
then combine the first and second lines, e.g.,
|
||||
less than or equal to the indentation of the second line, then combine the
|
||||
first and second lines if they fit. Apply this rule recursively.
|
||||
|
||||
```rust
|
||||
x.baz?
|
||||
.qux()
|
||||
|
||||
x.y.z
|
||||
.qux()
|
||||
|
||||
let foo = x
|
||||
.baz?
|
||||
.qux();
|
||||
@ -489,13 +518,13 @@ self.pre_comment.as_ref().map_or(
|
||||
This section covers `if`, `if let`, `loop`, `while`, `while let`, and `for`
|
||||
expressions.
|
||||
|
||||
The keyword, any initial clauses, and the opening brace of the block should be
|
||||
on a single line. The usual rules for [block formatting](#blocks) should be
|
||||
applied to the block.
|
||||
Put the keyword, any initial clauses, and the opening brace of the block all on
|
||||
a single line, if they fit. Apply the usual rules for [block
|
||||
formatting](#blocks) to the block.
|
||||
|
||||
If there is an `else` component, then the closing brace, `else`, any following
|
||||
clause, and the opening brace should all be on the same line. There should be a
|
||||
single space before and after the `else` keyword. For example:
|
||||
If there is an `else` component, then put the closing brace, `else`, any
|
||||
following clause, and the opening brace all on the same line, with a single
|
||||
space before and after the `else` keyword:
|
||||
|
||||
```rust
|
||||
if ... {
|
||||
@ -513,10 +542,10 @@ if let ... {
|
||||
}
|
||||
```
|
||||
|
||||
If the control line needs to be broken, then prefer to break before the `=` in
|
||||
`* let` expressions and before `in` in a `for` expression; the following line
|
||||
should be block indented. If the control line is broken for any reason, then the
|
||||
opening brace should be on its own line and not indented. Examples:
|
||||
If the control line needs to be broken, prefer to break before the `=` in `*
|
||||
let` expressions and before `in` in a `for` expression; block-indent the
|
||||
following line. If the control line is broken for any reason, put the opening
|
||||
brace on its own line, not indented. Examples:
|
||||
|
||||
```rust
|
||||
while let Some(foo)
|
||||
@ -539,10 +568,10 @@ if a_long_expression
|
||||
}
|
||||
```
|
||||
|
||||
Where the initial clause is multi-lined and ends with one or more closing
|
||||
parentheses, square brackets, or braces, and there is nothing else on that line,
|
||||
and that line is not indented beyond the indent on the first line of the control
|
||||
flow expression, then the opening brace of the block should be put on the same
|
||||
Where the initial clause spans multiple lines and ends with one or more closing
|
||||
parentheses, square brackets, or braces, and there is nothing else on that
|
||||
line, and that line is not indented beyond the indent on the first line of the
|
||||
control flow expression, then put the opening brace of the block on the same
|
||||
line with a preceding space. For example:
|
||||
|
||||
```rust
|
||||
@ -558,9 +587,9 @@ if !self.config.file_lines().intersects(
|
||||
|
||||
#### Single line `if else`
|
||||
|
||||
Formatters may place an `if else` or `if let else` on a single line if it occurs
|
||||
in expression context (i.e., is not a standalone statement), it contains a
|
||||
single `else` clause, and is *small*. For example:
|
||||
Put an `if else` or `if let else` on a single line if it occurs in expression
|
||||
context (i.e., is not a standalone statement), it contains a single `else`
|
||||
clause, and is *small*:
|
||||
|
||||
```rust
|
||||
let y = if x { 0 } else { 1 };
|
||||
@ -582,9 +611,9 @@ if x {
|
||||
|
||||
### Match
|
||||
|
||||
Prefer not to line-break inside the discriminant expression. There must always
|
||||
be a line break after the opening brace and before the closing brace. The match
|
||||
arms must be block indented once:
|
||||
Prefer not to line-break inside the discriminant expression. Always break after
|
||||
the opening brace and before the closing brace. Block-indent the match arms
|
||||
once:
|
||||
|
||||
```rust
|
||||
match foo {
|
||||
@ -598,7 +627,7 @@ let x = match foo.bar.baz() {
|
||||
|
||||
Use a trailing comma for a match arm if and only if not using a block.
|
||||
|
||||
Never start a match arm pattern with `|`, e.g.,
|
||||
Never start a match arm pattern with `|`:
|
||||
|
||||
```rust
|
||||
match foo {
|
||||
@ -608,14 +637,13 @@ match foo {
|
||||
| a_very_long_pattern
|
||||
| another_pattern
|
||||
| yet_another_pattern
|
||||
| a_forth_pattern => {
|
||||
| a_fourth_pattern => {
|
||||
...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Prefer
|
||||
|
||||
Prefer:
|
||||
|
||||
```rust
|
||||
match foo {
|
||||
@ -623,7 +651,7 @@ match foo {
|
||||
a_very_long_pattern
|
||||
| another_pattern
|
||||
| yet_another_pattern
|
||||
| a_forth_pattern => {
|
||||
| a_fourth_pattern => {
|
||||
...
|
||||
}
|
||||
}
|
||||
@ -633,11 +661,11 @@ Avoid splitting the left-hand side (before the `=>`) of a match arm where
|
||||
possible. If the right-hand side of the match arm is kept on the same line,
|
||||
never use a block (unless the block is empty).
|
||||
|
||||
If the right-hand side consists of multiple statements or has line comments or
|
||||
the start of the line cannot be fit on the same line as the left-hand side, use
|
||||
a block.
|
||||
If the right-hand side consists of multiple statements, or has line comments,
|
||||
or the start of the line does not fit on the same line as the left-hand side,
|
||||
use a block.
|
||||
|
||||
The body of a block arm should be block indented once.
|
||||
Block-indent the body of a block arm.
|
||||
|
||||
Examples:
|
||||
|
||||
@ -662,8 +690,8 @@ match foo {
|
||||
```
|
||||
|
||||
If the body is a single expression with no line comments and not a control flow
|
||||
expression, then it may be started on the same line as the right-hand side. If
|
||||
not, then it must be in a block. Example,
|
||||
expression, start it on the same line as the left-hand side. If not, then it
|
||||
must be in a block. Example:
|
||||
|
||||
```rust
|
||||
match foo {
|
||||
@ -687,8 +715,8 @@ match foo {
|
||||
|
||||
#### Line-breaking
|
||||
|
||||
Where it is possible to use a block form on the right-hand side and avoid
|
||||
breaking the left-hand side, do that. E.g.
|
||||
If using a block form on the right-hand side of a match arm makes it possible
|
||||
to avoid breaking on the left-hand side, do that:
|
||||
|
||||
```rust
|
||||
// Assuming the following line does not fit in the max width
|
||||
@ -720,7 +748,7 @@ body on a new line:
|
||||
|
||||
If required to break the pattern, put each clause of the pattern on its own
|
||||
line with no additional indent, breaking before the `|`. If there is an `if`
|
||||
clause, then you must use the above form:
|
||||
clause, use the above form:
|
||||
|
||||
```rust
|
||||
a_very_long_pattern
|
||||
@ -740,7 +768,7 @@ clause, then you must use the above form:
|
||||
```
|
||||
|
||||
If the pattern is multi-line, and the last line is less wide than the indent, do
|
||||
not put the `if` clause on a newline. E.g.,
|
||||
not put the `if` clause on a new line. E.g.,
|
||||
|
||||
```rust
|
||||
Token::Dimension {
|
||||
@ -753,8 +781,8 @@ not put the `if` clause on a newline. E.g.,
|
||||
```
|
||||
|
||||
If every clause in a pattern is *small*, but the whole pattern does not fit on
|
||||
one line, then the pattern may be formatted across multiple lines with as many
|
||||
clauses per line as possible. Again break before a `|`:
|
||||
one line, then format the pattern across multiple lines with as many clauses
|
||||
per line as possible. Again, break before a `|`:
|
||||
|
||||
```rust
|
||||
foo | bar | baz
|
||||
@ -783,8 +811,8 @@ E.g., `&&Some(foo)` matches, `Foo(4, Bar)` does not.
|
||||
### Combinable expressions
|
||||
|
||||
Where a function call has a single argument, and that argument is formatted
|
||||
across multiple-lines, the outer call may be formatted as if it were a single-
|
||||
line call. The same combining behaviour may be applied to any similar
|
||||
across multiple-lines, format the outer call as if it were a single-line call,
|
||||
if the result fits. Apply the same combining behaviour to any similar
|
||||
expressions which have multi-line, block-indented lists of sub-expressions
|
||||
delimited by parentheses (e.g., macros or tuple struct literals). E.g.,
|
||||
|
||||
@ -814,13 +842,12 @@ let arr = [combinable(
|
||||
)];
|
||||
```
|
||||
|
||||
Such behaviour should extend recursively, however, tools may choose to limit the
|
||||
depth of nesting.
|
||||
Apply this behavior recursively.
|
||||
|
||||
Only where the multi-line sub-expression is a closure with an explicit block,
|
||||
this combining behaviour may be used where there are other arguments, as long as
|
||||
all the arguments and the first line of the closure fit on the first line, the
|
||||
closure is the last argument, and there is only one closure argument:
|
||||
For a function with multiple arguments, if the last argument is a multi-line
|
||||
closure with an explicit block, there are no other closure arguments, and all
|
||||
the arguments and the first line of the closure fit on the first line, use the
|
||||
same combining behavior:
|
||||
|
||||
```rust
|
||||
foo(first_arg, x, |param| {
|
||||
@ -835,16 +862,17 @@ foo(first_arg, x, |param| {
|
||||
Do not put spaces in ranges, e.g., `0..10`, `x..=y`, `..x.len()`, `foo..`.
|
||||
|
||||
When writing a range with both upper and lower bounds, if the line must be
|
||||
broken, break before the range operator and block indent the second line:
|
||||
broken within the range, break before the range operator and block indent the
|
||||
second line:
|
||||
|
||||
```rust
|
||||
a_long_expression
|
||||
..another_long_expression
|
||||
```
|
||||
|
||||
For the sake of indicating precedence, we recommend that if either bound is a
|
||||
compound expression, then use parentheses around it, e.g., `..(x + 1)`,
|
||||
`(x.f)..(x.f.len())`, or `0..(x - 10)`.
|
||||
For the sake of indicating precedence, if either bound is a compound
|
||||
expression, use parentheses around it, e.g., `..(x + 1)`, `(x.f)..(x.f.len())`,
|
||||
or `0..(x - 10)`.
|
||||
|
||||
|
||||
### Hexadecimal literals
|
||||
@ -852,11 +880,8 @@ compound expression, then use parentheses around it, e.g., `..(x + 1)`,
|
||||
Hexadecimal literals may use upper- or lower-case letters, but they must not be
|
||||
mixed within the same literal. Projects should use the same case for all
|
||||
literals, but we do not make a recommendation for either lower- or upper-case.
|
||||
Tools should have an option to convert mixed case literals to upper-case, and
|
||||
may have an option to convert all literals to either lower- or upper-case.
|
||||
|
||||
|
||||
## Patterns
|
||||
|
||||
Patterns should be formatted like their corresponding expressions. See the
|
||||
section on `match` for additional formatting for patterns in match arms.
|
||||
Format patterns like their corresponding expressions. See the section on
|
||||
`match` for additional formatting for patterns in match arms.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user