Auto merge of #112127 - matthiaskrgr:rollup-77pt893, r=matthiaskrgr

Rollup of 7 pull requests

Successful merges:

 - #112031 (Migrate  `item_proc_macro` to Askama)
 - #112053 (Remove `-Zcgu-partitioning-strategy`.)
 - #112069 (offset_of: don't require type to be `Sized`)
 - #112084 (enhancements on  build_helper utilization and rustdoc-gui-test)
 - #112096 (Remove array_zip)
 - #112108 (Fix re-export of doc hidden item inside private item not displayed)
 - #112113 (rustdoc: simplify `clean` by removing `FnRetTy`)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2023-05-31 05:42:26 +00:00
commit 617d3d6d72
37 changed files with 1413 additions and 1558 deletions

View File

@ -4289,6 +4289,7 @@ dependencies = [
name = "rustdoc-gui-test"
version = "0.1.0"
dependencies = [
"build_helper",
"compiletest",
"getopts",
"walkdir",

View File

@ -668,11 +668,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::Rvalue::NullaryOp(ref null_op, ty) => {
let ty = self.monomorphize(ty);
assert!(bx.cx().type_is_sized(ty));
let layout = bx.cx().layout_of(ty);
let val = match null_op {
mir::NullOp::SizeOf => layout.size.bytes(),
mir::NullOp::AlignOf => layout.align.abi.bytes(),
mir::NullOp::SizeOf => {
assert!(bx.cx().type_is_sized(ty));
layout.size.bytes()
}
mir::NullOp::AlignOf => {
assert!(bx.cx().type_is_sized(ty));
layout.align.abi.bytes()
}
mir::NullOp::OffsetOf(fields) => {
layout.offset_of_subfield(bx.cx(), fields.iter().map(|f| f.index())).bytes()
}

File diff suppressed because it is too large Load Diff

View File

@ -1,644 +0,0 @@
use std::cmp;
use std::collections::hash_map::Entry;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::definitions::DefPathDataName;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, Linkage, Visibility};
use rustc_middle::mir::mono::{InstantiationMode, MonoItem};
use rustc_middle::ty::print::characteristic_def_id_of_type;
use rustc_middle::ty::{self, visit::TypeVisitableExt, InstanceDef, TyCtxt};
use rustc_span::symbol::Symbol;
use super::PartitioningCx;
use crate::collector::InliningMap;
use crate::partitioning::{MonoItemPlacement, Partition, PlacedRootMonoItems};
pub struct DefaultPartitioning;
impl<'tcx> Partition<'tcx> for DefaultPartitioning {
fn place_root_mono_items<I>(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
mono_items: &mut I,
) -> PlacedRootMonoItems<'tcx>
where
I: Iterator<Item = MonoItem<'tcx>>,
{
let mut roots = FxHashSet::default();
let mut codegen_units = FxHashMap::default();
let is_incremental_build = cx.tcx.sess.opts.incremental.is_some();
let mut internalization_candidates = FxHashSet::default();
// Determine if monomorphizations instantiated in this crate will be made
// available to downstream crates. This depends on whether we are in
// share-generics mode and whether the current crate can even have
// downstream crates.
let export_generics =
cx.tcx.sess.opts.share_generics() && cx.tcx.local_crate_exports_generics();
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx);
let cgu_name_cache = &mut FxHashMap::default();
for mono_item in mono_items {
match mono_item.instantiation_mode(cx.tcx) {
InstantiationMode::GloballyShared { .. } => {}
InstantiationMode::LocalCopy => continue,
}
let characteristic_def_id = characteristic_def_id_of_mono_item(cx.tcx, mono_item);
let is_volatile = is_incremental_build && mono_item.is_generic_fn();
let codegen_unit_name = match characteristic_def_id {
Some(def_id) => compute_codegen_unit_name(
cx.tcx,
cgu_name_builder,
def_id,
is_volatile,
cgu_name_cache,
),
None => fallback_cgu_name(cgu_name_builder),
};
let codegen_unit = codegen_units
.entry(codegen_unit_name)
.or_insert_with(|| CodegenUnit::new(codegen_unit_name));
let mut can_be_internalized = true;
let (linkage, visibility) = mono_item_linkage_and_visibility(
cx.tcx,
&mono_item,
&mut can_be_internalized,
export_generics,
);
if visibility == Visibility::Hidden && can_be_internalized {
internalization_candidates.insert(mono_item);
}
codegen_unit.items_mut().insert(mono_item, (linkage, visibility));
roots.insert(mono_item);
}
// Always ensure we have at least one CGU; otherwise, if we have a
// crate with just types (for example), we could wind up with no CGU.
if codegen_units.is_empty() {
let codegen_unit_name = fallback_cgu_name(cgu_name_builder);
codegen_units.insert(codegen_unit_name, CodegenUnit::new(codegen_unit_name));
}
let codegen_units = codegen_units.into_values().collect();
PlacedRootMonoItems { codegen_units, roots, internalization_candidates }
}
fn merge_codegen_units(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
codegen_units: &mut Vec<CodegenUnit<'tcx>>,
) {
assert!(cx.target_cgu_count >= 1);
// Note that at this point in time the `codegen_units` here may not be
// in a deterministic order (but we know they're deterministically the
// same set). We want this merging to produce a deterministic ordering
// of codegen units from the input.
//
// Due to basically how we've implemented the merging below (merge the
// two smallest into each other) we're sure to start off with a
// deterministic order (sorted by name). This'll mean that if two cgus
// have the same size the stable sort below will keep everything nice
// and deterministic.
codegen_units.sort_by(|a, b| a.name().as_str().cmp(b.name().as_str()));
// This map keeps track of what got merged into what.
let mut cgu_contents: FxHashMap<Symbol, Vec<Symbol>> =
codegen_units.iter().map(|cgu| (cgu.name(), vec![cgu.name()])).collect();
// Merge the two smallest codegen units until the target size is
// reached.
while codegen_units.len() > cx.target_cgu_count {
// Sort small cgus to the back
codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
let mut smallest = codegen_units.pop().unwrap();
let second_smallest = codegen_units.last_mut().unwrap();
// Move the mono-items from `smallest` to `second_smallest`
second_smallest.modify_size_estimate(smallest.size_estimate());
for (k, v) in smallest.items_mut().drain() {
second_smallest.items_mut().insert(k, v);
}
// Record that `second_smallest` now contains all the stuff that was
// in `smallest` before.
let mut consumed_cgu_names = cgu_contents.remove(&smallest.name()).unwrap();
cgu_contents.get_mut(&second_smallest.name()).unwrap().append(&mut consumed_cgu_names);
debug!(
"CodegenUnit {} merged into CodegenUnit {}",
smallest.name(),
second_smallest.name()
);
}
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx);
if cx.tcx.sess.opts.incremental.is_some() {
// If we are doing incremental compilation, we want CGU names to
// reflect the path of the source level module they correspond to.
// For CGUs that contain the code of multiple modules because of the
// merging done above, we use a concatenation of the names of all
// contained CGUs.
let new_cgu_names: FxHashMap<Symbol, String> = cgu_contents
.into_iter()
// This `filter` makes sure we only update the name of CGUs that
// were actually modified by merging.
.filter(|(_, cgu_contents)| cgu_contents.len() > 1)
.map(|(current_cgu_name, cgu_contents)| {
let mut cgu_contents: Vec<&str> =
cgu_contents.iter().map(|s| s.as_str()).collect();
// Sort the names, so things are deterministic and easy to
// predict. We are sorting primitive `&str`s here so we can
// use unstable sort.
cgu_contents.sort_unstable();
(current_cgu_name, cgu_contents.join("--"))
})
.collect();
for cgu in codegen_units.iter_mut() {
if let Some(new_cgu_name) = new_cgu_names.get(&cgu.name()) {
if cx.tcx.sess.opts.unstable_opts.human_readable_cgu_names {
cgu.set_name(Symbol::intern(&new_cgu_name));
} else {
// If we don't require CGU names to be human-readable,
// we use a fixed length hash of the composite CGU name
// instead.
let new_cgu_name = CodegenUnit::mangle_name(&new_cgu_name);
cgu.set_name(Symbol::intern(&new_cgu_name));
}
}
}
} else {
// If we are compiling non-incrementally we just generate simple CGU
// names containing an index.
for (index, cgu) in codegen_units.iter_mut().enumerate() {
let numbered_codegen_unit_name =
cgu_name_builder.build_cgu_name_no_mangle(LOCAL_CRATE, &["cgu"], Some(index));
cgu.set_name(numbered_codegen_unit_name);
}
}
}
fn place_inlined_mono_items(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
codegen_units: &mut [CodegenUnit<'tcx>],
roots: FxHashSet<MonoItem<'tcx>>,
) -> FxHashMap<MonoItem<'tcx>, MonoItemPlacement> {
let mut mono_item_placements = FxHashMap::default();
let single_codegen_unit = codegen_units.len() == 1;
for old_codegen_unit in codegen_units.iter_mut() {
// Collect all items that need to be available in this codegen unit.
let mut reachable = FxHashSet::default();
for root in old_codegen_unit.items().keys() {
follow_inlining(*root, cx.inlining_map, &mut reachable);
}
let mut new_codegen_unit = CodegenUnit::new(old_codegen_unit.name());
// Add all monomorphizations that are not already there.
for mono_item in reachable {
if let Some(linkage) = old_codegen_unit.items().get(&mono_item) {
// This is a root, just copy it over.
new_codegen_unit.items_mut().insert(mono_item, *linkage);
} else {
if roots.contains(&mono_item) {
bug!(
"GloballyShared mono-item inlined into other CGU: \
{:?}",
mono_item
);
}
// This is a CGU-private copy.
new_codegen_unit
.items_mut()
.insert(mono_item, (Linkage::Internal, Visibility::Default));
}
if !single_codegen_unit {
// If there is more than one codegen unit, we need to keep track
// in which codegen units each monomorphization is placed.
match mono_item_placements.entry(mono_item) {
Entry::Occupied(e) => {
let placement = e.into_mut();
debug_assert!(match *placement {
MonoItemPlacement::SingleCgu { cgu_name } => {
cgu_name != new_codegen_unit.name()
}
MonoItemPlacement::MultipleCgus => true,
});
*placement = MonoItemPlacement::MultipleCgus;
}
Entry::Vacant(e) => {
e.insert(MonoItemPlacement::SingleCgu {
cgu_name: new_codegen_unit.name(),
});
}
}
}
}
*old_codegen_unit = new_codegen_unit;
}
return mono_item_placements;
fn follow_inlining<'tcx>(
mono_item: MonoItem<'tcx>,
inlining_map: &InliningMap<'tcx>,
visited: &mut FxHashSet<MonoItem<'tcx>>,
) {
if !visited.insert(mono_item) {
return;
}
inlining_map.with_inlining_candidates(mono_item, |target| {
follow_inlining(target, inlining_map, visited);
});
}
}
fn internalize_symbols(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
codegen_units: &mut [CodegenUnit<'tcx>],
mono_item_placements: FxHashMap<MonoItem<'tcx>, MonoItemPlacement>,
internalization_candidates: FxHashSet<MonoItem<'tcx>>,
) {
if codegen_units.len() == 1 {
// Fast path for when there is only one codegen unit. In this case we
// can internalize all candidates, since there is nowhere else they
// could be accessed from.
for cgu in codegen_units {
for candidate in &internalization_candidates {
cgu.items_mut().insert(*candidate, (Linkage::Internal, Visibility::Default));
}
}
return;
}
// Build a map from every monomorphization to all the monomorphizations that
// reference it.
let mut accessor_map: FxHashMap<MonoItem<'tcx>, Vec<MonoItem<'tcx>>> = Default::default();
cx.inlining_map.iter_accesses(|accessor, accessees| {
for accessee in accessees {
accessor_map.entry(*accessee).or_default().push(accessor);
}
});
// For each internalization candidates in each codegen unit, check if it is
// accessed from outside its defining codegen unit.
for cgu in codegen_units {
let home_cgu = MonoItemPlacement::SingleCgu { cgu_name: cgu.name() };
for (accessee, linkage_and_visibility) in cgu.items_mut() {
if !internalization_candidates.contains(accessee) {
// This item is no candidate for internalizing, so skip it.
continue;
}
debug_assert_eq!(mono_item_placements[accessee], home_cgu);
if let Some(accessors) = accessor_map.get(accessee) {
if accessors
.iter()
.filter_map(|accessor| {
// Some accessors might not have been
// instantiated. We can safely ignore those.
mono_item_placements.get(accessor)
})
.any(|placement| *placement != home_cgu)
{
// Found an accessor from another CGU, so skip to the next
// item without marking this one as internal.
continue;
}
}
// If we got here, we did not find any accesses from other CGUs,
// so it's fine to make this monomorphization internal.
*linkage_and_visibility = (Linkage::Internal, Visibility::Default);
}
}
}
}
fn characteristic_def_id_of_mono_item<'tcx>(
tcx: TyCtxt<'tcx>,
mono_item: MonoItem<'tcx>,
) -> Option<DefId> {
match mono_item {
MonoItem::Fn(instance) => {
let def_id = match instance.def {
ty::InstanceDef::Item(def) => def,
ty::InstanceDef::VTableShim(..)
| ty::InstanceDef::ReifyShim(..)
| ty::InstanceDef::FnPtrShim(..)
| ty::InstanceDef::ClosureOnceShim { .. }
| ty::InstanceDef::Intrinsic(..)
| ty::InstanceDef::DropGlue(..)
| ty::InstanceDef::Virtual(..)
| ty::InstanceDef::CloneShim(..)
| ty::InstanceDef::ThreadLocalShim(..)
| ty::InstanceDef::FnPtrAddrShim(..) => return None,
};
// If this is a method, we want to put it into the same module as
// its self-type. If the self-type does not provide a characteristic
// DefId, we use the location of the impl after all.
if tcx.trait_of_item(def_id).is_some() {
let self_ty = instance.substs.type_at(0);
// This is a default implementation of a trait method.
return characteristic_def_id_of_type(self_ty).or(Some(def_id));
}
if let Some(impl_def_id) = tcx.impl_of_method(def_id) {
if tcx.sess.opts.incremental.is_some()
&& tcx.trait_id_of_impl(impl_def_id) == tcx.lang_items().drop_trait()
{
// Put `Drop::drop` into the same cgu as `drop_in_place`
// since `drop_in_place` is the only thing that can
// call it.
return None;
}
// When polymorphization is enabled, methods which do not depend on their generic
// parameters, but the self-type of their impl block do will fail to normalize.
if !tcx.sess.opts.unstable_opts.polymorphize || !instance.has_param() {
// This is a method within an impl, find out what the self-type is:
let impl_self_ty = tcx.subst_and_normalize_erasing_regions(
instance.substs,
ty::ParamEnv::reveal_all(),
tcx.type_of(impl_def_id),
);
if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) {
return Some(def_id);
}
}
}
Some(def_id)
}
MonoItem::Static(def_id) => Some(def_id),
MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.to_def_id()),
}
}
fn compute_codegen_unit_name(
tcx: TyCtxt<'_>,
name_builder: &mut CodegenUnitNameBuilder<'_>,
def_id: DefId,
volatile: bool,
cache: &mut CguNameCache,
) -> Symbol {
// Find the innermost module that is not nested within a function.
let mut current_def_id = def_id;
let mut cgu_def_id = None;
// Walk backwards from the item we want to find the module for.
loop {
if current_def_id.is_crate_root() {
if cgu_def_id.is_none() {
// If we have not found a module yet, take the crate root.
cgu_def_id = Some(def_id.krate.as_def_id());
}
break;
} else if tcx.def_kind(current_def_id) == DefKind::Mod {
if cgu_def_id.is_none() {
cgu_def_id = Some(current_def_id);
}
} else {
// If we encounter something that is not a module, throw away
// any module that we've found so far because we now know that
// it is nested within something else.
cgu_def_id = None;
}
current_def_id = tcx.parent(current_def_id);
}
let cgu_def_id = cgu_def_id.unwrap();
*cache.entry((cgu_def_id, volatile)).or_insert_with(|| {
let def_path = tcx.def_path(cgu_def_id);
let components = def_path.data.iter().map(|part| match part.data.name() {
DefPathDataName::Named(name) => name,
DefPathDataName::Anon { .. } => unreachable!(),
});
let volatile_suffix = volatile.then_some("volatile");
name_builder.build_cgu_name(def_path.krate, components, volatile_suffix)
})
}
// Anything we can't find a proper codegen unit for goes into this.
fn fallback_cgu_name(name_builder: &mut CodegenUnitNameBuilder<'_>) -> Symbol {
name_builder.build_cgu_name(LOCAL_CRATE, &["fallback"], Some("cgu"))
}
fn mono_item_linkage_and_visibility<'tcx>(
tcx: TyCtxt<'tcx>,
mono_item: &MonoItem<'tcx>,
can_be_internalized: &mut bool,
export_generics: bool,
) -> (Linkage, Visibility) {
if let Some(explicit_linkage) = mono_item.explicit_linkage(tcx) {
return (explicit_linkage, Visibility::Default);
}
let vis = mono_item_visibility(tcx, mono_item, can_be_internalized, export_generics);
(Linkage::External, vis)
}
type CguNameCache = FxHashMap<(DefId, bool), Symbol>;
fn static_visibility<'tcx>(
tcx: TyCtxt<'tcx>,
can_be_internalized: &mut bool,
def_id: DefId,
) -> Visibility {
if tcx.is_reachable_non_generic(def_id) {
*can_be_internalized = false;
default_visibility(tcx, def_id, false)
} else {
Visibility::Hidden
}
}
fn mono_item_visibility<'tcx>(
tcx: TyCtxt<'tcx>,
mono_item: &MonoItem<'tcx>,
can_be_internalized: &mut bool,
export_generics: bool,
) -> Visibility {
let instance = match mono_item {
// This is pretty complicated; see below.
MonoItem::Fn(instance) => instance,
// Misc handling for generics and such, but otherwise:
MonoItem::Static(def_id) => return static_visibility(tcx, can_be_internalized, *def_id),
MonoItem::GlobalAsm(item_id) => {
return static_visibility(tcx, can_be_internalized, item_id.owner_id.to_def_id());
}
};
let def_id = match instance.def {
InstanceDef::Item(def_id) | InstanceDef::DropGlue(def_id, Some(_)) => def_id,
// We match the visibility of statics here
InstanceDef::ThreadLocalShim(def_id) => {
return static_visibility(tcx, can_be_internalized, def_id);
}
// These are all compiler glue and such, never exported, always hidden.
InstanceDef::VTableShim(..)
| InstanceDef::ReifyShim(..)
| InstanceDef::FnPtrShim(..)
| InstanceDef::Virtual(..)
| InstanceDef::Intrinsic(..)
| InstanceDef::ClosureOnceShim { .. }
| InstanceDef::DropGlue(..)
| InstanceDef::CloneShim(..)
| InstanceDef::FnPtrAddrShim(..) => return Visibility::Hidden,
};
// The `start_fn` lang item is actually a monomorphized instance of a
// function in the standard library, used for the `main` function. We don't
// want to export it so we tag it with `Hidden` visibility but this symbol
// is only referenced from the actual `main` symbol which we unfortunately
// don't know anything about during partitioning/collection. As a result we
// forcibly keep this symbol out of the `internalization_candidates` set.
//
// FIXME: eventually we don't want to always force this symbol to have
// hidden visibility, it should indeed be a candidate for
// internalization, but we have to understand that it's referenced
// from the `main` symbol we'll generate later.
//
// This may be fixable with a new `InstanceDef` perhaps? Unsure!
if tcx.lang_items().start_fn() == Some(def_id) {
*can_be_internalized = false;
return Visibility::Hidden;
}
let is_generic = instance.substs.non_erasable_generics().next().is_some();
// Upstream `DefId` instances get different handling than local ones.
let Some(def_id) = def_id.as_local() else {
return if export_generics && is_generic {
// If it is an upstream monomorphization and we export generics, we must make
// it available to downstream crates.
*can_be_internalized = false;
default_visibility(tcx, def_id, true)
} else {
Visibility::Hidden
};
};
if is_generic {
if export_generics {
if tcx.is_unreachable_local_definition(def_id) {
// This instance cannot be used from another crate.
Visibility::Hidden
} else {
// This instance might be useful in a downstream crate.
*can_be_internalized = false;
default_visibility(tcx, def_id.to_def_id(), true)
}
} else {
// We are not exporting generics or the definition is not reachable
// for downstream crates, we can internalize its instantiations.
Visibility::Hidden
}
} else {
// If this isn't a generic function then we mark this a `Default` if
// this is a reachable item, meaning that it's a symbol other crates may
// access when they link to us.
if tcx.is_reachable_non_generic(def_id.to_def_id()) {
*can_be_internalized = false;
debug_assert!(!is_generic);
return default_visibility(tcx, def_id.to_def_id(), false);
}
// If this isn't reachable then we're gonna tag this with `Hidden`
// visibility. In some situations though we'll want to prevent this
// symbol from being internalized.
//
// There's two categories of items here:
//
// * First is weak lang items. These are basically mechanisms for
// libcore to forward-reference symbols defined later in crates like
// the standard library or `#[panic_handler]` definitions. The
// definition of these weak lang items needs to be referencable by
// libcore, so we're no longer a candidate for internalization.
// Removal of these functions can't be done by LLVM but rather must be
// done by the linker as it's a non-local decision.
//
// * Second is "std internal symbols". Currently this is primarily used
// for allocator symbols. Allocators are a little weird in their
// implementation, but the idea is that the compiler, at the last
// minute, defines an allocator with an injected object file. The
// `alloc` crate references these symbols (`__rust_alloc`) and the
// definition doesn't get hooked up until a linked crate artifact is
// generated.
//
// The symbols synthesized by the compiler (`__rust_alloc`) are thin
// veneers around the actual implementation, some other symbol which
// implements the same ABI. These symbols (things like `__rg_alloc`,
// `__rdl_alloc`, `__rde_alloc`, etc), are all tagged with "std
// internal symbols".
//
// The std-internal symbols here **should not show up in a dll as an
// exported interface**, so they return `false` from
// `is_reachable_non_generic` above and we'll give them `Hidden`
// visibility below. Like the weak lang items, though, we can't let
// LLVM internalize them as this decision is left up to the linker to
// omit them, so prevent them from being internalized.
let attrs = tcx.codegen_fn_attrs(def_id);
if attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) {
*can_be_internalized = false;
}
Visibility::Hidden
}
}
fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibility {
if !tcx.sess.target.default_hidden_visibility {
return Visibility::Default;
}
// Generic functions never have export-level C.
if is_generic {
return Visibility::Hidden;
}
// Things with export level C don't get instantiated in
// downstream crates.
if !id.is_local() {
return Visibility::Hidden;
}
// C-export level items remain at `Default`, all other internal
// items become `Hidden`.
match tcx.reachable_non_generics(id.krate).get(&id) {
Some(SymbolExportInfo { level: SymbolExportLevel::C, .. }) => Visibility::Default,
_ => Visibility::Hidden,
}
}

View File

@ -1,673 +0,0 @@
//! Partitioning Codegen Units for Incremental Compilation
//! ======================================================
//!
//! The task of this module is to take the complete set of monomorphizations of
//! a crate and produce a set of codegen units from it, where a codegen unit
//! is a named set of (mono-item, linkage) pairs. That is, this module
//! decides which monomorphization appears in which codegen units with which
//! linkage. The following paragraphs describe some of the background on the
//! partitioning scheme.
//!
//! The most important opportunity for saving on compilation time with
//! incremental compilation is to avoid re-codegenning and re-optimizing code.
//! Since the unit of codegen and optimization for LLVM is "modules" or, how
//! we call them "codegen units", the particulars of how much time can be saved
//! by incremental compilation are tightly linked to how the output program is
//! partitioned into these codegen units prior to passing it to LLVM --
//! especially because we have to treat codegen units as opaque entities once
//! they are created: There is no way for us to incrementally update an existing
//! LLVM module and so we have to build any such module from scratch if it was
//! affected by some change in the source code.
//!
//! From that point of view it would make sense to maximize the number of
//! codegen units by, for example, putting each function into its own module.
//! That way only those modules would have to be re-compiled that were actually
//! affected by some change, minimizing the number of functions that could have
//! been re-used but just happened to be located in a module that is
//! re-compiled.
//!
//! However, since LLVM optimization does not work across module boundaries,
//! using such a highly granular partitioning would lead to very slow runtime
//! code since it would effectively prohibit inlining and other inter-procedure
//! optimizations. We want to avoid that as much as possible.
//!
//! Thus we end up with a trade-off: The bigger the codegen units, the better
//! LLVM's optimizer can do its work, but also the smaller the compilation time
//! reduction we get from incremental compilation.
//!
//! Ideally, we would create a partitioning such that there are few big codegen
//! units with few interdependencies between them. For now though, we use the
//! following heuristic to determine the partitioning:
//!
//! - There are two codegen units for every source-level module:
//! - One for "stable", that is non-generic, code
//! - One for more "volatile" code, i.e., monomorphized instances of functions
//! defined in that module
//!
//! In order to see why this heuristic makes sense, let's take a look at when a
//! codegen unit can get invalidated:
//!
//! 1. The most straightforward case is when the BODY of a function or global
//! changes. Then any codegen unit containing the code for that item has to be
//! re-compiled. Note that this includes all codegen units where the function
//! has been inlined.
//!
//! 2. The next case is when the SIGNATURE of a function or global changes. In
//! this case, all codegen units containing a REFERENCE to that item have to be
//! re-compiled. This is a superset of case 1.
//!
//! 3. The final and most subtle case is when a REFERENCE to a generic function
//! is added or removed somewhere. Even though the definition of the function
//! might be unchanged, a new REFERENCE might introduce a new monomorphized
//! instance of this function which has to be placed and compiled somewhere.
//! Conversely, when removing a REFERENCE, it might have been the last one with
//! that particular set of generic arguments and thus we have to remove it.
//!
//! From the above we see that just using one codegen unit per source-level
//! module is not such a good idea, since just adding a REFERENCE to some
//! generic item somewhere else would invalidate everything within the module
//! containing the generic item. The heuristic above reduces this detrimental
//! side-effect of references a little by at least not touching the non-generic
//! code of the module.
//!
//! A Note on Inlining
//! ------------------
//! As briefly mentioned above, in order for LLVM to be able to inline a
//! function call, the body of the function has to be available in the LLVM
//! module where the call is made. This has a few consequences for partitioning:
//!
//! - The partitioning algorithm has to take care of placing functions into all
//! codegen units where they should be available for inlining. It also has to
//! decide on the correct linkage for these functions.
//!
//! - The partitioning algorithm has to know which functions are likely to get
//! inlined, so it can distribute function instantiations accordingly. Since
//! there is no way of knowing for sure which functions LLVM will decide to
//! inline in the end, we apply a heuristic here: Only functions marked with
//! `#[inline]` are considered for inlining by the partitioner. The current
//! implementation will not try to determine if a function is likely to be
//! inlined by looking at the functions definition.
//!
//! Note though that as a side-effect of creating a codegen units per
//! source-level module, functions from the same module will be available for
//! inlining, even when they are not marked `#[inline]`.
mod default;
use std::cmp;
use std::fs::{self, File};
use std::io::{BufWriter, Write};
use std::path::{Path, PathBuf};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::sync;
use rustc_hir::def_id::{DefIdSet, LOCAL_CRATE};
use rustc_middle::mir;
use rustc_middle::mir::mono::MonoItem;
use rustc_middle::mir::mono::{CodegenUnit, Linkage};
use rustc_middle::query::Providers;
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::TyCtxt;
use rustc_session::config::{DumpMonoStatsFormat, SwitchWithOptPath};
use rustc_span::symbol::Symbol;
use crate::collector::InliningMap;
use crate::collector::{self, MonoItemCollectionMode};
use crate::errors::{
CouldntDumpMonoStats, SymbolAlreadyDefined, UnknownCguCollectionMode, UnknownPartitionStrategy,
};
enum Partitioner {
Default(default::DefaultPartitioning),
// Other partitioning strategies can go here.
Unknown,
}
impl<'tcx> Partition<'tcx> for Partitioner {
fn place_root_mono_items<I>(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
mono_items: &mut I,
) -> PlacedRootMonoItems<'tcx>
where
I: Iterator<Item = MonoItem<'tcx>>,
{
match self {
Partitioner::Default(partitioner) => partitioner.place_root_mono_items(cx, mono_items),
Partitioner::Unknown => cx.tcx.sess.emit_fatal(UnknownPartitionStrategy),
}
}
fn merge_codegen_units(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
codegen_units: &mut Vec<CodegenUnit<'tcx>>,
) {
match self {
Partitioner::Default(partitioner) => partitioner.merge_codegen_units(cx, codegen_units),
Partitioner::Unknown => cx.tcx.sess.emit_fatal(UnknownPartitionStrategy),
}
}
fn place_inlined_mono_items(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
codegen_units: &mut [CodegenUnit<'tcx>],
roots: FxHashSet<MonoItem<'tcx>>,
) -> FxHashMap<MonoItem<'tcx>, MonoItemPlacement> {
match self {
Partitioner::Default(partitioner) => {
partitioner.place_inlined_mono_items(cx, codegen_units, roots)
}
Partitioner::Unknown => cx.tcx.sess.emit_fatal(UnknownPartitionStrategy),
}
}
fn internalize_symbols(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
codegen_units: &mut [CodegenUnit<'tcx>],
mono_item_placements: FxHashMap<MonoItem<'tcx>, MonoItemPlacement>,
internalization_candidates: FxHashSet<MonoItem<'tcx>>,
) {
match self {
Partitioner::Default(partitioner) => partitioner.internalize_symbols(
cx,
codegen_units,
mono_item_placements,
internalization_candidates,
),
Partitioner::Unknown => cx.tcx.sess.emit_fatal(UnknownPartitionStrategy),
}
}
}
struct PartitioningCx<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
target_cgu_count: usize,
inlining_map: &'a InliningMap<'tcx>,
}
pub struct PlacedRootMonoItems<'tcx> {
codegen_units: Vec<CodegenUnit<'tcx>>,
roots: FxHashSet<MonoItem<'tcx>>,
internalization_candidates: FxHashSet<MonoItem<'tcx>>,
}
trait Partition<'tcx> {
fn place_root_mono_items<I>(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
mono_items: &mut I,
) -> PlacedRootMonoItems<'tcx>
where
I: Iterator<Item = MonoItem<'tcx>>;
fn merge_codegen_units(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
codegen_units: &mut Vec<CodegenUnit<'tcx>>,
);
fn place_inlined_mono_items(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
codegen_units: &mut [CodegenUnit<'tcx>],
roots: FxHashSet<MonoItem<'tcx>>,
) -> FxHashMap<MonoItem<'tcx>, MonoItemPlacement>;
fn internalize_symbols(
&mut self,
cx: &PartitioningCx<'_, 'tcx>,
codegen_units: &mut [CodegenUnit<'tcx>],
mono_item_placements: FxHashMap<MonoItem<'tcx>, MonoItemPlacement>,
internalization_candidates: FxHashSet<MonoItem<'tcx>>,
);
}
fn get_partitioner(tcx: TyCtxt<'_>) -> Partitioner {
let strategy = match &tcx.sess.opts.unstable_opts.cgu_partitioning_strategy {
None => "default",
Some(s) => &s[..],
};
match strategy {
"default" => Partitioner::Default(default::DefaultPartitioning),
_ => Partitioner::Unknown,
}
}
fn partition<'tcx, I>(
tcx: TyCtxt<'tcx>,
mono_items: &mut I,
max_cgu_count: usize,
inlining_map: &InliningMap<'tcx>,
) -> Vec<CodegenUnit<'tcx>>
where
I: Iterator<Item = MonoItem<'tcx>>,
{
let _prof_timer = tcx.prof.generic_activity("cgu_partitioning");
let mut partitioner = get_partitioner(tcx);
let cx = &PartitioningCx { tcx, target_cgu_count: max_cgu_count, inlining_map };
// In the first step, we place all regular monomorphizations into their
// respective 'home' codegen unit. Regular monomorphizations are all
// functions and statics defined in the local crate.
let PlacedRootMonoItems { mut codegen_units, roots, internalization_candidates } = {
let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_roots");
partitioner.place_root_mono_items(cx, mono_items)
};
for cgu in &mut codegen_units {
cgu.create_size_estimate(tcx);
}
debug_dump(tcx, "INITIAL PARTITIONING", &codegen_units);
// Merge until we have at most `max_cgu_count` codegen units.
// `merge_codegen_units` is responsible for updating the CGU size
// estimates.
{
let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_merge_cgus");
partitioner.merge_codegen_units(cx, &mut codegen_units);
debug_dump(tcx, "POST MERGING", &codegen_units);
}
// In the next step, we use the inlining map to determine which additional
// monomorphizations have to go into each codegen unit. These additional
// monomorphizations can be drop-glue, functions from external crates, and
// local functions the definition of which is marked with `#[inline]`.
let mono_item_placements = {
let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_inline_items");
partitioner.place_inlined_mono_items(cx, &mut codegen_units, roots)
};
for cgu in &mut codegen_units {
cgu.create_size_estimate(tcx);
}
debug_dump(tcx, "POST INLINING", &codegen_units);
// Next we try to make as many symbols "internal" as possible, so LLVM has
// more freedom to optimize.
if !tcx.sess.link_dead_code() {
let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_internalize_symbols");
partitioner.internalize_symbols(
cx,
&mut codegen_units,
mono_item_placements,
internalization_candidates,
);
}
let instrument_dead_code =
tcx.sess.instrument_coverage() && !tcx.sess.instrument_coverage_except_unused_functions();
if instrument_dead_code {
assert!(
codegen_units.len() > 0,
"There must be at least one CGU that code coverage data can be generated in."
);
// Find the smallest CGU that has exported symbols and put the dead
// function stubs in that CGU. We look for exported symbols to increase
// the likelihood the linker won't throw away the dead functions.
// FIXME(#92165): In order to truly resolve this, we need to make sure
// the object file (CGU) containing the dead function stubs is included
// in the final binary. This will probably require forcing these
// function symbols to be included via `-u` or `/include` linker args.
let mut cgus: Vec<_> = codegen_units.iter_mut().collect();
cgus.sort_by_key(|cgu| cgu.size_estimate());
let dead_code_cgu =
if let Some(cgu) = cgus.into_iter().rev().find(|cgu| {
cgu.items().iter().any(|(_, (linkage, _))| *linkage == Linkage::External)
}) {
cgu
} else {
// If there are no CGUs that have externally linked items,
// then we just pick the first CGU as a fallback.
&mut codegen_units[0]
};
dead_code_cgu.make_code_coverage_dead_code_cgu();
}
// Finally, sort by codegen unit name, so that we get deterministic results.
codegen_units.sort_by(|a, b| a.name().as_str().cmp(b.name().as_str()));
debug_dump(tcx, "FINAL", &codegen_units);
codegen_units
}
/// For symbol internalization, we need to know whether a symbol/mono-item is
/// accessed from outside the codegen unit it is defined in. This type is used
/// to keep track of that.
#[derive(Clone, PartialEq, Eq, Debug)]
enum MonoItemPlacement {
SingleCgu { cgu_name: Symbol },
MultipleCgus,
}
fn debug_dump<'a, 'tcx: 'a>(tcx: TyCtxt<'tcx>, label: &str, cgus: &[CodegenUnit<'tcx>]) {
let dump = move || {
use std::fmt::Write;
let num_cgus = cgus.len();
let max = cgus.iter().map(|cgu| cgu.size_estimate()).max().unwrap();
let min = cgus.iter().map(|cgu| cgu.size_estimate()).min().unwrap();
let ratio = max as f64 / min as f64;
let s = &mut String::new();
let _ = writeln!(
s,
"{label} ({num_cgus} CodegenUnits, max={max}, min={min}, max/min={ratio:.1}):"
);
for cgu in cgus {
let _ =
writeln!(s, "CodegenUnit {} estimated size {}:", cgu.name(), cgu.size_estimate());
for (mono_item, linkage) in cgu.items() {
let symbol_name = mono_item.symbol_name(tcx).name;
let symbol_hash_start = symbol_name.rfind('h');
let symbol_hash = symbol_hash_start.map_or("<no hash>", |i| &symbol_name[i..]);
let _ = with_no_trimmed_paths!(writeln!(
s,
" - {} [{:?}] [{}] estimated size {}",
mono_item,
linkage,
symbol_hash,
mono_item.size_estimate(tcx)
));
}
let _ = writeln!(s);
}
std::mem::take(s)
};
debug!("{}", dump());
}
#[inline(never)] // give this a place in the profiler
fn assert_symbols_are_distinct<'a, 'tcx, I>(tcx: TyCtxt<'tcx>, mono_items: I)
where
I: Iterator<Item = &'a MonoItem<'tcx>>,
'tcx: 'a,
{
let _prof_timer = tcx.prof.generic_activity("assert_symbols_are_distinct");
let mut symbols: Vec<_> =
mono_items.map(|mono_item| (mono_item, mono_item.symbol_name(tcx))).collect();
symbols.sort_by_key(|sym| sym.1);
for &[(mono_item1, ref sym1), (mono_item2, ref sym2)] in symbols.array_windows() {
if sym1 == sym2 {
let span1 = mono_item1.local_span(tcx);
let span2 = mono_item2.local_span(tcx);
// Deterministically select one of the spans for error reporting
let span = match (span1, span2) {
(Some(span1), Some(span2)) => {
Some(if span1.lo().0 > span2.lo().0 { span1 } else { span2 })
}
(span1, span2) => span1.or(span2),
};
tcx.sess.emit_fatal(SymbolAlreadyDefined { span, symbol: sym1.to_string() });
}
}
}
fn collect_and_partition_mono_items(tcx: TyCtxt<'_>, (): ()) -> (&DefIdSet, &[CodegenUnit<'_>]) {
let collection_mode = match tcx.sess.opts.unstable_opts.print_mono_items {
Some(ref s) => {
let mode = s.to_lowercase();
let mode = mode.trim();
if mode == "eager" {
MonoItemCollectionMode::Eager
} else {
if mode != "lazy" {
tcx.sess.emit_warning(UnknownCguCollectionMode { mode });
}
MonoItemCollectionMode::Lazy
}
}
None => {
if tcx.sess.link_dead_code() {
MonoItemCollectionMode::Eager
} else {
MonoItemCollectionMode::Lazy
}
}
};
let (items, inlining_map) = collector::collect_crate_mono_items(tcx, collection_mode);
tcx.sess.abort_if_errors();
let (codegen_units, _) = tcx.sess.time("partition_and_assert_distinct_symbols", || {
sync::join(
|| {
let mut codegen_units = partition(
tcx,
&mut items.iter().copied(),
tcx.sess.codegen_units(),
&inlining_map,
);
codegen_units[0].make_primary();
&*tcx.arena.alloc_from_iter(codegen_units)
},
|| assert_symbols_are_distinct(tcx, items.iter()),
)
});
if tcx.prof.enabled() {
// Record CGU size estimates for self-profiling.
for cgu in codegen_units {
tcx.prof.artifact_size(
"codegen_unit_size_estimate",
cgu.name().as_str(),
cgu.size_estimate() as u64,
);
}
}
let mono_items: DefIdSet = items
.iter()
.filter_map(|mono_item| match *mono_item {
MonoItem::Fn(ref instance) => Some(instance.def_id()),
MonoItem::Static(def_id) => Some(def_id),
_ => None,
})
.collect();
// Output monomorphization stats per def_id
if let SwitchWithOptPath::Enabled(ref path) = tcx.sess.opts.unstable_opts.dump_mono_stats {
if let Err(err) =
dump_mono_items_stats(tcx, &codegen_units, path, tcx.crate_name(LOCAL_CRATE))
{
tcx.sess.emit_fatal(CouldntDumpMonoStats { error: err.to_string() });
}
}
if tcx.sess.opts.unstable_opts.print_mono_items.is_some() {
let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default();
for cgu in codegen_units {
for (&mono_item, &linkage) in cgu.items() {
item_to_cgus.entry(mono_item).or_default().push((cgu.name(), linkage));
}
}
let mut item_keys: Vec<_> = items
.iter()
.map(|i| {
let mut output = with_no_trimmed_paths!(i.to_string());
output.push_str(" @@");
let mut empty = Vec::new();
let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty);
cgus.sort_by_key(|(name, _)| *name);
cgus.dedup();
for &(ref cgu_name, (linkage, _)) in cgus.iter() {
output.push(' ');
output.push_str(cgu_name.as_str());
let linkage_abbrev = match linkage {
Linkage::External => "External",
Linkage::AvailableExternally => "Available",
Linkage::LinkOnceAny => "OnceAny",
Linkage::LinkOnceODR => "OnceODR",
Linkage::WeakAny => "WeakAny",
Linkage::WeakODR => "WeakODR",
Linkage::Appending => "Appending",
Linkage::Internal => "Internal",
Linkage::Private => "Private",
Linkage::ExternalWeak => "ExternalWeak",
Linkage::Common => "Common",
};
output.push('[');
output.push_str(linkage_abbrev);
output.push(']');
}
output
})
.collect();
item_keys.sort();
for item in item_keys {
println!("MONO_ITEM {item}");
}
}
(tcx.arena.alloc(mono_items), codegen_units)
}
/// Outputs stats about instantiation counts and estimated size, per `MonoItem`'s
/// def, to a file in the given output directory.
fn dump_mono_items_stats<'tcx>(
tcx: TyCtxt<'tcx>,
codegen_units: &[CodegenUnit<'tcx>],
output_directory: &Option<PathBuf>,
crate_name: Symbol,
) -> Result<(), Box<dyn std::error::Error>> {
let output_directory = if let Some(ref directory) = output_directory {
fs::create_dir_all(directory)?;
directory
} else {
Path::new(".")
};
let format = tcx.sess.opts.unstable_opts.dump_mono_stats_format;
let ext = format.extension();
let filename = format!("{crate_name}.mono_items.{ext}");
let output_path = output_directory.join(&filename);
let file = File::create(&output_path)?;
let mut file = BufWriter::new(file);
// Gather instantiated mono items grouped by def_id
let mut items_per_def_id: FxHashMap<_, Vec<_>> = Default::default();
for cgu in codegen_units {
for (&mono_item, _) in cgu.items() {
// Avoid variable-sized compiler-generated shims
if mono_item.is_user_defined() {
items_per_def_id.entry(mono_item.def_id()).or_default().push(mono_item);
}
}
}
#[derive(serde::Serialize)]
struct MonoItem {
name: String,
instantiation_count: usize,
size_estimate: usize,
total_estimate: usize,
}
// Output stats sorted by total instantiated size, from heaviest to lightest
let mut stats: Vec<_> = items_per_def_id
.into_iter()
.map(|(def_id, items)| {
let name = with_no_trimmed_paths!(tcx.def_path_str(def_id));
let instantiation_count = items.len();
let size_estimate = items[0].size_estimate(tcx);
let total_estimate = instantiation_count * size_estimate;
MonoItem { name, instantiation_count, size_estimate, total_estimate }
})
.collect();
stats.sort_unstable_by_key(|item| cmp::Reverse(item.total_estimate));
if !stats.is_empty() {
match format {
DumpMonoStatsFormat::Json => serde_json::to_writer(file, &stats)?,
DumpMonoStatsFormat::Markdown => {
writeln!(
file,
"| Item | Instantiation count | Estimated Cost Per Instantiation | Total Estimated Cost |"
)?;
writeln!(file, "| --- | ---: | ---: | ---: |")?;
for MonoItem { name, instantiation_count, size_estimate, total_estimate } in stats {
writeln!(
file,
"| `{name}` | {instantiation_count} | {size_estimate} | {total_estimate} |"
)?;
}
}
}
}
Ok(())
}
fn codegened_and_inlined_items(tcx: TyCtxt<'_>, (): ()) -> &DefIdSet {
let (items, cgus) = tcx.collect_and_partition_mono_items(());
let mut visited = DefIdSet::default();
let mut result = items.clone();
for cgu in cgus {
for (item, _) in cgu.items() {
if let MonoItem::Fn(ref instance) = item {
let did = instance.def_id();
if !visited.insert(did) {
continue;
}
let body = tcx.instance_mir(instance.def);
for block in body.basic_blocks.iter() {
for statement in &block.statements {
let mir::StatementKind::Coverage(_) = statement.kind else { continue };
let scope = statement.source_info.scope;
if let Some(inlined) = scope.inlined_instance(&body.source_scopes) {
result.insert(inlined.def_id());
}
}
}
}
}
}
tcx.arena.alloc(result)
}
pub fn provide(providers: &mut Providers) {
providers.collect_and_partition_mono_items = collect_and_partition_mono_items;
providers.codegened_and_inlined_items = codegened_and_inlined_items;
providers.is_codegened_item = |tcx, def_id| {
let (all_mono_items, _) = tcx.collect_and_partition_mono_items(());
all_mono_items.contains(&def_id)
};
providers.codegen_unit = |tcx, name| {
let (_, all) = tcx.collect_and_partition_mono_items(());
all.iter()
.find(|cgu| cgu.name() == name)
.unwrap_or_else(|| panic!("failed to find cgu with name {name:?}"))
};
}

View File

@ -1372,8 +1372,6 @@ options! {
"set options for branch target identification and pointer authentication on AArch64"),
cf_protection: CFProtection = (CFProtection::None, parse_cfprotection, [TRACKED],
"instrument control-flow architecture protection"),
cgu_partitioning_strategy: Option<String> = (None, parse_opt_string, [TRACKED],
"the codegen unit partitioning strategy to use"),
codegen_backend: Option<String> = (None, parse_opt_string, [TRACKED],
"the backend to use"),
combine_cgu: bool = (false, parse_bool, [TRACKED],

View File

@ -538,29 +538,6 @@ impl<T, const N: usize> [T; N] {
drain_array_with(self, |iter| try_from_trusted_iterator(iter.map(f)))
}
/// 'Zips up' two arrays into a single array of pairs.
///
/// `zip()` returns a new array where every element is a tuple where the
/// first element comes from the first array, and the second element comes
/// from the second array. In other words, it zips two arrays together,
/// into a single one.
///
/// # Examples
///
/// ```
/// #![feature(array_zip)]
/// let x = [1, 2, 3];
/// let y = [4, 5, 6];
/// let z = x.zip(y);
/// assert_eq!(z, [(1, 4), (2, 5), (3, 6)]);
/// ```
#[unstable(feature = "array_zip", issue = "80094")]
pub fn zip<U>(self, rhs: [U; N]) -> [(T, U); N] {
drain_array_with(self, |lhs| {
drain_array_with(rhs, |rhs| from_trusted_iterator(crate::iter::zip(lhs, rhs)))
})
}
/// Returns a slice containing the entire array. Equivalent to `&s[..]`.
#[stable(feature = "array_as_slice", since = "1.57.0")]
#[rustc_const_stable(feature = "array_as_slice", since = "1.57.0")]

View File

@ -381,7 +381,7 @@ impl StepDescription {
eprintln!(
"note: if you are adding a new Step to bootstrap itself, make sure you register it with `describe!`"
);
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
}
}
@ -1355,7 +1355,7 @@ impl<'a> Builder<'a> {
"error: `x.py clippy` requires a host `rustc` toolchain with the `clippy` component"
);
eprintln!("help: try `rustup component add clippy`");
crate::detail_exit(1);
crate::detail_exit_macro!(1);
});
if !t!(std::str::from_utf8(&output.stdout)).contains("nightly") {
rustflags.arg("--cfg=bootstrap");

View File

@ -1686,7 +1686,7 @@ pub fn run_cargo(
});
if !ok {
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
// Ok now we need to actually find all the files listed in `toplevel`. We've

View File

@ -23,6 +23,7 @@ use crate::channel::{self, GitInfo};
pub use crate::flags::Subcommand;
use crate::flags::{Color, Flags, Warnings};
use crate::util::{exe, output, t};
use build_helper::detail_exit_macro;
use once_cell::sync::OnceCell;
use serde::{Deserialize, Deserializer};
use serde_derive::Deserialize;
@ -579,7 +580,7 @@ macro_rules! define_config {
panic!("overriding existing option")
} else {
eprintln!("overriding existing option: `{}`", stringify!($field));
crate::detail_exit(2);
detail_exit_macro!(2);
}
} else {
self.$field = other.$field;
@ -678,7 +679,7 @@ impl<T> Merge for Option<T> {
panic!("overriding existing option")
} else {
eprintln!("overriding existing option");
crate::detail_exit(2);
detail_exit_macro!(2);
}
} else {
*self = other;
@ -944,7 +945,7 @@ impl Config {
.and_then(|table: toml::Value| TomlConfig::deserialize(table))
.unwrap_or_else(|err| {
eprintln!("failed to parse TOML configuration '{}': {err}", file.display());
crate::detail_exit(2);
detail_exit_macro!(2);
})
}
Self::parse_inner(args, get_toml)
@ -978,7 +979,7 @@ impl Config {
eprintln!(
"Cannot use both `llvm_bolt_profile_generate` and `llvm_bolt_profile_use` at the same time"
);
crate::detail_exit(1);
detail_exit_macro!(1);
}
// Infer the rest of the configuration.
@ -1094,7 +1095,7 @@ impl Config {
}
}
eprintln!("failed to parse override `{option}`: `{err}");
crate::detail_exit(2)
detail_exit_macro!(2)
}
toml.merge(override_toml, ReplaceOpt::Override);
@ -1810,7 +1811,7 @@ impl Config {
println!("help: maybe your repository history is too shallow?");
println!("help: consider disabling `download-rustc`");
println!("help: or fetch enough history to include one upstream commit");
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
// Warn if there were changes to the compiler or standard library since the ancestor commit.

View File

@ -7,6 +7,7 @@ use std::{
process::{Command, Stdio},
};
use build_helper::util::try_run;
use once_cell::sync::OnceCell;
use xz2::bufread::XzDecoder;
@ -14,7 +15,7 @@ use crate::{
config::RustfmtMetadata,
llvm::detect_llvm_sha,
t,
util::{check_run, exe, program_out_of_date, try_run},
util::{check_run, exe, program_out_of_date},
Config,
};
@ -245,7 +246,7 @@ impl Config {
if !help_on_error.is_empty() {
eprintln!("{}", help_on_error);
}
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
}

View File

@ -193,7 +193,7 @@ impl Flags {
} else {
panic!("No paths available for subcommand `{}`", subcommand.as_str());
}
crate::detail_exit(0);
crate::detail_exit_macro!(0);
}
Flags::parse_from(it)
@ -538,7 +538,7 @@ pub fn get_completion<G: clap_complete::Generator>(shell: G, path: &Path) -> Opt
} else {
std::fs::read_to_string(path).unwrap_or_else(|_| {
eprintln!("couldn't read {}", path.display());
crate::detail_exit(1)
crate::detail_exit_macro!(1)
})
};
let mut buf = Vec::new();

View File

@ -40,7 +40,7 @@ fn rustfmt(src: &Path, rustfmt: &Path, paths: &[PathBuf], check: bool) -> impl F
code, run `./x.py fmt` instead.",
cmd_debug,
);
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
true
}
@ -196,7 +196,7 @@ pub fn format(build: &Builder<'_>, check: bool, paths: &[PathBuf]) {
let rustfmt_path = build.initial_rustfmt().unwrap_or_else(|| {
eprintln!("./x.py fmt is not supported on this channel");
crate::detail_exit(1);
crate::detail_exit_macro!(1);
});
assert!(rustfmt_path.exists(), "{}", rustfmt_path.display());
let src = build.src.clone();

View File

@ -27,6 +27,7 @@ use std::process::{Command, Stdio};
use std::str;
use build_helper::ci::{gha, CiEnv};
use build_helper::detail_exit_macro;
use channel::GitInfo;
use config::{DryRun, Target};
use filetime::FileTime;
@ -699,7 +700,7 @@ impl Build {
for failure in failures.iter() {
eprintln!(" - {}\n", failure);
}
detail_exit(1);
detail_exit_macro!(1);
}
#[cfg(feature = "build-metrics")]
@ -1482,7 +1483,7 @@ impl Build {
"Error: Unable to find the stamp file {}, did you try to keep a nonexistent build stage?",
stamp.display()
);
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
let mut paths = Vec::new();
@ -1674,7 +1675,7 @@ Alternatively, set `download-ci-llvm = true` in that `[llvm]` section
to download LLVM rather than building it.
"
);
detail_exit(1);
detail_exit_macro!(1);
}
}
@ -1739,18 +1740,6 @@ fn chmod(path: &Path, perms: u32) {
#[cfg(windows)]
fn chmod(_path: &Path, _perms: u32) {}
/// If code is not 0 (successful exit status), exit status is 101 (rust's default error code.)
/// If the test is running and code is an error code, it will cause a panic.
fn detail_exit(code: i32) -> ! {
// if in test and code is an error code, panic with status code provided
if cfg!(test) {
panic!("status code: {}", code);
} else {
// otherwise,exit with provided status code
std::process::exit(code);
}
}
impl Compiler {
pub fn with_stage(mut self, stage: u32) -> Compiler {
self.stage = stage;

View File

@ -30,7 +30,7 @@ pub(crate) fn try_run_tests(builder: &Builder<'_>, cmd: &mut Command) -> bool {
if !run_tests(builder, cmd) {
if builder.fail_fast {
crate::detail_exit(1);
crate::detail_exit_macro!(1);
} else {
let mut failures = builder.delayed_failures.borrow_mut();
failures.push(format!("{cmd:?}"));

View File

@ -104,7 +104,7 @@ You should install cmake, or set `download-ci-llvm = true` in the
than building it.
"
);
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
}

View File

@ -194,7 +194,7 @@ fn setup_config_toml(path: &PathBuf, profile: Profile, config: &Config) {
"note: this will use the configuration in {}",
profile.include_path(&config.src).display()
);
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
let settings = format!(
@ -380,7 +380,7 @@ pub fn interactive_path() -> io::Result<Profile> {
io::stdin().read_line(&mut input)?;
if input.is_empty() {
eprintln!("EOF on stdin, when expecting answer to question. Giving up.");
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
break match parse_with_abbrev(&input) {
Ok(profile) => profile,

View File

@ -773,7 +773,7 @@ impl Step for Clippy {
}
if !builder.config.cmd.bless() {
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
let mut cargo = builder.cargo(compiler, Mode::ToolRustc, SourceType::InTree, host, "run");
@ -1085,7 +1085,7 @@ help: to skip test's attempt to check tidiness, pass `--exclude src/tools/tidy`
PATH = inferred_rustfmt_dir.display(),
CHAN = builder.config.channel,
);
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
crate::format::format(&builder, !builder.config.cmd.bless(), &[]);
}
@ -1108,7 +1108,7 @@ help: to skip test's attempt to check tidiness, pass `--exclude src/tools/tidy`
eprintln!(
"x.py completions were changed; run `x.py run generate-completions` to update them"
);
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
}
}
@ -1329,7 +1329,7 @@ help: to test the compiler, use `--stage 1` instead
help: to test the standard library, use `--stage 0 library/std` instead
note: if you're sure you want to do this, please open an issue as to why. In the meantime, you can override this with `COMPILETEST_FORCE_STAGE0=1`."
);
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
let mut compiler = self.compiler;

View File

@ -116,7 +116,7 @@ impl Step for ToolBuild {
if !is_expected {
if !is_optional_tool {
crate::detail_exit(1);
crate::detail_exit_macro!(1);
} else {
None
}

View File

@ -91,7 +91,7 @@ fn print_error(tool: &str, submodule: &str) {
eprintln!("If you do NOT intend to update '{}', please ensure you did not accidentally", tool);
eprintln!("change the submodule at '{}'. You may ask your reviewer for the", submodule);
eprintln!("proper steps.");
crate::detail_exit(3);
crate::detail_exit_macro!(3);
}
fn check_changed_files(toolstates: &HashMap<Box<str>, ToolState>) {
@ -106,7 +106,7 @@ fn check_changed_files(toolstates: &HashMap<Box<str>, ToolState>) {
Ok(o) => o,
Err(e) => {
eprintln!("Failed to get changed files: {:?}", e);
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
};
@ -177,7 +177,7 @@ impl Step for ToolStateCheck {
}
if did_error {
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
check_changed_files(&toolstates);
@ -223,7 +223,7 @@ impl Step for ToolStateCheck {
}
if did_error {
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
if builder.config.channel == "nightly" && env::var_os("TOOLSTATE_PUBLISH").is_some() {

View File

@ -3,6 +3,7 @@
//! Simple things like testing the various filesystem operations here and there,
//! not a lot of interesting happenings here unfortunately.
use build_helper::util::{fail, try_run};
use std::env;
use std::fs;
use std::io;
@ -230,25 +231,10 @@ pub fn is_valid_test_suite_arg<'a, P: AsRef<Path>>(
pub fn run(cmd: &mut Command, print_cmd_on_fail: bool) {
if !try_run(cmd, print_cmd_on_fail) {
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
}
pub fn try_run(cmd: &mut Command, print_cmd_on_fail: bool) -> bool {
let status = match cmd.status() {
Ok(status) => status,
Err(e) => fail(&format!("failed to execute command: {:?}\nerror: {}", cmd, e)),
};
if !status.success() && print_cmd_on_fail {
println!(
"\n\ncommand did not execute successfully: {:?}\n\
expected success, got: {}\n\n",
cmd, status
);
}
status.success()
}
pub fn check_run(cmd: &mut Command, print_cmd_on_fail: bool) -> bool {
let status = match cmd.status() {
Ok(status) => status,
@ -269,7 +255,7 @@ pub fn check_run(cmd: &mut Command, print_cmd_on_fail: bool) -> bool {
pub fn run_suppressed(cmd: &mut Command) {
if !try_run_suppressed(cmd) {
crate::detail_exit(1);
crate::detail_exit_macro!(1);
}
}
@ -374,11 +360,6 @@ fn dir_up_to_date(src: &Path, threshold: SystemTime) -> bool {
})
}
fn fail(s: &str) -> ! {
eprintln!("\n\n{}\n\n", s);
crate::detail_exit(1);
}
/// Copied from `std::path::absolute` until it stabilizes.
///
/// FIXME: this shouldn't exist.

View File

@ -1111,8 +1111,8 @@ fn clean_fn_decl_with_args<'tcx>(
args: Arguments,
) -> FnDecl {
let output = match decl.output {
hir::FnRetTy::Return(typ) => Return(clean_ty(typ, cx)),
hir::FnRetTy::DefaultReturn(..) => DefaultReturn,
hir::FnRetTy::Return(typ) => clean_ty(typ, cx),
hir::FnRetTy::DefaultReturn(..) => Type::Tuple(Vec::new()),
};
FnDecl { inputs: args, output, c_variadic: decl.c_variadic }
}
@ -1126,10 +1126,7 @@ fn clean_fn_decl_from_did_and_sig<'tcx>(
// We assume all empty tuples are default return type. This theoretically can discard `-> ()`,
// but shouldn't change any code meaning.
let output = match clean_middle_ty(sig.output(), cx, None) {
Type::Tuple(inner) if inner.is_empty() => DefaultReturn,
ty => Return(ty),
};
let output = clean_middle_ty(sig.output(), cx, None);
FnDecl {
output,

View File

@ -42,7 +42,6 @@ use crate::formats::item_type::ItemType;
use crate::html::render::Context;
use crate::passes::collect_intra_doc_links::UrlFragment;
pub(crate) use self::FnRetTy::*;
pub(crate) use self::ItemKind::*;
pub(crate) use self::SelfTy::*;
pub(crate) use self::Type::{
@ -1353,7 +1352,7 @@ pub(crate) struct Function {
#[derive(Clone, PartialEq, Eq, Debug, Hash)]
pub(crate) struct FnDecl {
pub(crate) inputs: Arguments,
pub(crate) output: FnRetTy,
pub(crate) output: Type,
pub(crate) c_variadic: bool,
}
@ -1371,18 +1370,16 @@ impl FnDecl {
///
/// This function will panic if the return type does not match the expected sugaring for async
/// functions.
pub(crate) fn sugared_async_return_type(&self) -> FnRetTy {
match &self.output {
FnRetTy::Return(Type::ImplTrait(bounds)) => match &bounds[0] {
GenericBound::TraitBound(PolyTrait { trait_, .. }, ..) => {
pub(crate) fn sugared_async_return_type(&self) -> Type {
if let Type::ImplTrait(v) = &self.output &&
let [GenericBound::TraitBound(PolyTrait { trait_, .. }, _ )] = &v[..]
{
let bindings = trait_.bindings().unwrap();
let ret_ty = bindings[0].term();
let ty = ret_ty.ty().expect("Unexpected constant return term");
FnRetTy::Return(ty.clone())
}
_ => panic!("unexpected desugaring of async function"),
},
_ => panic!("unexpected desugaring of async function"),
ty.clone()
} else {
panic!("unexpected desugaring of async function")
}
}
}
@ -1425,21 +1422,6 @@ impl Argument {
}
}
#[derive(Clone, PartialEq, Eq, Debug, Hash)]
pub(crate) enum FnRetTy {
Return(Type),
DefaultReturn,
}
impl FnRetTy {
pub(crate) fn as_return(&self) -> Option<&Type> {
match self {
Return(ret) => Some(ret),
DefaultReturn => None,
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct Trait {
pub(crate) def_id: DefId,
@ -1641,6 +1623,10 @@ impl Type {
matches!(self, Type::ImplTrait(_))
}
pub(crate) fn is_unit(&self) -> bool {
matches!(self, Type::Tuple(v) if v.is_empty())
}
pub(crate) fn projection(&self) -> Option<(&Type, DefId, PathSegment)> {
if let QPath(box QPathData { self_type, trait_, assoc, .. }) = self {
Some((self_type, trait_.as_ref()?.def_id(), assoc.clone()))

View File

@ -1257,9 +1257,9 @@ impl clean::Impl {
};
primitive_link_fragment(f, PrimitiveType::Tuple, &format!("fn ({name}₁, {name}₂, …, {name}{ellipsis})"), "#trait-implementations-1", cx)?;
// Write output.
if let clean::FnRetTy::Return(ty) = &bare_fn.decl.output {
if !bare_fn.decl.output.is_unit() {
write!(f, " -> ")?;
fmt_type(ty, f, use_absolute, cx)?;
fmt_type(&bare_fn.decl.output, f, use_absolute, cx)?;
}
} else if let Some(ty) = self.kind.as_blanket_ty() {
fmt_type(ty, f, use_absolute, cx)?;
@ -1296,22 +1296,6 @@ impl clean::Arguments {
}
}
impl clean::FnRetTy {
pub(crate) fn print<'a, 'tcx: 'a>(
&'a self,
cx: &'a Context<'tcx>,
) -> impl fmt::Display + 'a + Captures<'tcx> {
display_fn(move |f| match self {
clean::Return(clean::Tuple(tys)) if tys.is_empty() => Ok(()),
clean::Return(ty) if f.alternate() => {
write!(f, " -> {:#}", ty.print(cx))
}
clean::Return(ty) => write!(f, " -&gt; {}", ty.print(cx)),
clean::DefaultReturn => Ok(()),
})
}
}
impl clean::BareFunctionDecl {
fn print_hrtb_with_space<'a, 'tcx: 'a>(
&'a self,
@ -1366,7 +1350,7 @@ impl clean::FnDecl {
"({args:#}{ellipsis}){arrow:#}",
args = self.inputs.print(cx),
ellipsis = ellipsis,
arrow = self.output.print(cx)
arrow = self.print_output(cx)
)
} else {
write!(
@ -1374,7 +1358,7 @@ impl clean::FnDecl {
"({args}{ellipsis}){arrow}",
args = self.inputs.print(cx),
ellipsis = ellipsis,
arrow = self.output.print(cx)
arrow = self.print_output(cx)
)
}
})
@ -1464,9 +1448,22 @@ impl clean::FnDecl {
Some(n) => write!(f, "\n{})", Indent(n))?,
};
fmt::Display::fmt(&self.output.print(cx), f)?;
fmt::Display::fmt(&self.print_output(cx), f)?;
Ok(())
}
pub(crate) fn print_output<'a, 'tcx: 'a>(
&'a self,
cx: &'a Context<'tcx>,
) -> impl fmt::Display + 'a + Captures<'tcx> {
display_fn(move |f| match &self.output {
clean::Tuple(tys) if tys.is_empty() => Ok(()),
ty if f.alternate() => {
write!(f, " -> {:#}", ty.print(cx))
}
ty => write!(f, " -&gt; {}", ty.print(cx)),
})
}
}
pub(crate) fn visibility_print_with_space<'a, 'tcx: 'a>(

View File

@ -844,7 +844,7 @@ fn assoc_method(
+ name.as_str().len()
+ generics_len;
let notable_traits = d.output.as_return().and_then(|output| notable_traits_button(output, cx));
let notable_traits = notable_traits_button(&d.output, cx);
let (indent, indent_str, end_newline) = if parent == ItemType::Trait {
header_len += 4;
@ -1282,6 +1282,11 @@ fn should_render_item(item: &clean::Item, deref_mut_: bool, tcx: TyCtxt<'_>) ->
pub(crate) fn notable_traits_button(ty: &clean::Type, cx: &mut Context<'_>) -> Option<String> {
let mut has_notable_trait = false;
if ty.is_unit() {
// Very common fast path.
return None;
}
let did = ty.def_id(cx.cache())?;
// Box has pass-through impls for Read, Write, Iterator, and Future when the

View File

@ -587,8 +587,7 @@ fn item_function(w: &mut Buffer, cx: &mut Context<'_>, it: &clean::Item, f: &cle
+ name.as_str().len()
+ generics_len;
let notable_traits =
f.decl.output.as_return().and_then(|output| notable_traits_button(output, cx));
let notable_traits = notable_traits_button(&f.decl.output, cx);
wrap_item(w, |w| {
w.reserve(header_len);
@ -1420,30 +1419,36 @@ fn item_macro(w: &mut Buffer, cx: &mut Context<'_>, it: &clean::Item, t: &clean:
write!(w, "{}", document(cx, it, None, HeadingOffset::H2))
}
fn item_proc_macro(w: &mut Buffer, cx: &mut Context<'_>, it: &clean::Item, m: &clean::ProcMacro) {
wrap_item(w, |w| {
fn item_proc_macro(
w: &mut impl fmt::Write,
cx: &mut Context<'_>,
it: &clean::Item,
m: &clean::ProcMacro,
) {
let mut buffer = Buffer::new();
wrap_item(&mut buffer, |buffer| {
let name = it.name.expect("proc-macros always have names");
match m.kind {
MacroKind::Bang => {
write!(w, "{}!() {{ /* proc-macro */ }}", name);
write!(buffer, "{}!() {{ /* proc-macro */ }}", name);
}
MacroKind::Attr => {
write!(w, "#[{}]", name);
write!(buffer, "#[{}]", name);
}
MacroKind::Derive => {
write!(w, "#[derive({})]", name);
write!(buffer, "#[derive({})]", name);
if !m.helpers.is_empty() {
w.push_str("\n{\n");
w.push_str(" // Attributes available to this derive:\n");
buffer.push_str("\n{\n");
buffer.push_str(" // Attributes available to this derive:\n");
for attr in &m.helpers {
writeln!(w, " #[{}]", attr);
writeln!(buffer, " #[{}]", attr);
}
w.push_str("}\n");
buffer.push_str("}\n");
}
}
}
});
write!(w, "{}", document(cx, it, None, HeadingOffset::H2))
write!(w, "{}{}", buffer.into_inner(), document(cx, it, None, HeadingOffset::H2)).unwrap();
}
fn item_primitive(w: &mut Buffer, cx: &mut Context<'_>, it: &clean::Item) {

View File

@ -7,7 +7,7 @@ use rustc_span::symbol::Symbol;
use serde::ser::{Serialize, SerializeStruct, Serializer};
use crate::clean;
use crate::clean::types::{FnRetTy, Function, Generics, ItemId, Type, WherePredicate};
use crate::clean::types::{Function, Generics, ItemId, Type, WherePredicate};
use crate::formats::cache::{Cache, OrphanImplItem};
use crate::formats::item_type::ItemType;
use crate::html::format::join_with_double_colon;
@ -656,22 +656,9 @@ fn get_fn_inputs_and_outputs<'tcx>(
}
let mut ret_types = Vec::new();
match decl.output {
FnRetTy::Return(ref return_type) => {
add_generics_and_bounds_as_types(
self_,
generics,
return_type,
tcx,
0,
&mut ret_types,
cache,
);
add_generics_and_bounds_as_types(self_, generics, &decl.output, tcx, 0, &mut ret_types, cache);
if ret_types.is_empty() {
ret_types.push(get_index_type(return_type, vec![]));
ret_types.push(get_index_type(&decl.output, vec![]));
}
}
_ => {}
};
(all_types, ret_types)
}

View File

@ -624,10 +624,7 @@ impl FromWithTcx<clean::FnDecl> for FnDecl {
.into_iter()
.map(|arg| (arg.name.to_string(), arg.type_.into_tcx(tcx)))
.collect(),
output: match output {
clean::FnRetTy::Return(t) => Some(t.into_tcx(tcx)),
clean::FnRetTy::DefaultReturn => None,
},
output: if output.is_unit() { None } else { Some(output.into_tcx(tcx)) },
c_variadic,
}
}

View File

@ -267,6 +267,10 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
let is_no_inline = use_attrs.lists(sym::doc).has_word(sym::no_inline)
|| use_attrs.lists(sym::doc).has_word(sym::hidden);
if is_no_inline {
return false;
}
// For cross-crate impl inlining we need to know whether items are
// reachable in documentation -- a previously unreachable item can be
// made reachable by cross-crate inlining which we're checking here.
@ -281,15 +285,21 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
};
let is_private = !self.cx.cache.effective_visibilities.is_directly_public(tcx, ori_res_did);
let is_hidden = inherits_doc_hidden(tcx, res_did, None);
let is_hidden = tcx.is_doc_hidden(ori_res_did);
let item = tcx.hir().get_by_def_id(res_did);
if !please_inline {
let inherits_hidden = inherits_doc_hidden(tcx, res_did, None);
// Only inline if requested or if the item would otherwise be stripped.
if (!please_inline && !is_private && !is_hidden) || is_no_inline {
//
// If it's a doc hidden module, we need to keep it in case some of its inner items
// are re-exported.
if (!is_private && !inherits_hidden) || (
is_hidden &&
!matches!(item, Node::Item(&hir::Item { kind: hir::ItemKind::Mod(_), .. }))
) {
return false;
}
if !please_inline &&
let Some(item_def_id) = reexport_chain(tcx, def_id, res_did).iter()
} else if let Some(item_def_id) = reexport_chain(tcx, def_id, res_did).iter()
.flat_map(|reexport| reexport.id()).map(|id| id.expect_local())
.chain(iter::once(res_did)).nth(1) &&
item_def_id != def_id &&
@ -303,9 +313,10 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
// The imported item is public and not `doc(hidden)` so no need to inline it.
return false;
}
}
let is_bang_macro = matches!(
tcx.hir().get_by_def_id(res_did),
item,
Node::Item(&hir::Item { kind: hir::ItemKind::Macro(_, MacroKind::Bang), .. })
);
@ -317,12 +328,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
// Bang macros are handled a bit on their because of how they are handled by the
// compiler. If they have `#[doc(hidden)]` and the re-export doesn't have
// `#[doc(inline)]`, then we don't inline it.
Node::Item(_)
if is_bang_macro
&& !please_inline
&& renamed.is_some()
&& self.cx.tcx.is_doc_hidden(ori_res_did) =>
{
Node::Item(_) if is_bang_macro && !please_inline && renamed.is_some() && is_hidden => {
return false;
}
Node::Item(&hir::Item { kind: hir::ItemKind::Mod(ref m), .. }) if glob => {
@ -455,6 +461,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
is_glob,
please_inline,
) {
debug!("Inlining {:?}", item.owner_id.def_id);
continue;
}
}

View File

@ -1,2 +1,3 @@
pub mod ci;
pub mod git;
pub mod util;

View File

@ -0,0 +1,41 @@
use std::process::Command;
/// Invokes `build_helper::util::detail_exit` with `cfg!(test)`
#[macro_export]
macro_rules! detail_exit_macro {
($code:expr) => {
build_helper::util::detail_exit($code, cfg!(test));
};
}
/// If code is not 0 (successful exit status), exit status is 101 (rust's default error code.)
/// If `is_test` true and code is an error code, it will cause a panic.
pub fn detail_exit(code: i32, is_test: bool) -> ! {
// if in test and code is an error code, panic with status code provided
if is_test {
panic!("status code: {}", code);
} else {
// otherwise,exit with provided status code
std::process::exit(code);
}
}
pub fn fail(s: &str) -> ! {
eprintln!("\n\n{}\n\n", s);
detail_exit(1, cfg!(test));
}
pub fn try_run(cmd: &mut Command, print_cmd_on_fail: bool) -> bool {
let status = match cmd.status() {
Ok(status) => status,
Err(e) => fail(&format!("failed to execute command: {:?}\nerror: {}", cmd, e)),
};
if !status.success() && print_cmd_on_fail {
println!(
"\n\ncommand did not execute successfully: {:?}\n\
expected success, got: {}\n\n",
cmd, status
);
}
status.success()
}

View File

@ -4,6 +4,7 @@ version = "0.1.0"
edition = "2021"
[dependencies]
build_helper = { path = "../build_helper" }
compiletest = { path = "../compiletest" }
getopts = "0.2"
walkdir = "2"

View File

@ -1,3 +1,4 @@
use build_helper::util::try_run;
use compiletest::header::TestProps;
use config::Config;
use std::path::{Path, PathBuf};
@ -60,23 +61,6 @@ fn find_librs<P: AsRef<Path>>(path: P) -> Option<PathBuf> {
None
}
// FIXME: move `bootstrap::util::try_run` into `build_helper` crate
// and use that one instead of creating this function.
fn try_run(cmd: &mut Command, print_cmd_on_fail: bool) -> bool {
let status = match cmd.status() {
Ok(status) => status,
Err(e) => panic!("failed to execute command: {:?}\nerror: {}", cmd, e),
};
if !status.success() && print_cmd_on_fail {
println!(
"\n\ncommand did not execute successfully: {:?}\n\
expected success, got: {}\n\n",
cmd, status
);
}
status.success()
}
fn main() {
let config = Arc::new(Config::from_args(env::args().collect()));
@ -143,6 +127,16 @@ If you want to install the `browser-ui-test` dependency, run `npm install browse
}
let mut command = Command::new(&config.nodejs);
if let Ok(current_dir) = env::current_dir() {
let local_node_modules = current_dir.join("node_modules");
if local_node_modules.exists() {
// Link the local node_modules if exists.
// This is useful when we run rustdoc-gui-test from outside of the source root.
env::set_var("NODE_PATH", local_node_modules);
}
}
command
.arg(config.rust_src.join("src/tools/rustdoc-gui/tester.js"))
.arg("--jobs")

View File

@ -4,7 +4,6 @@
// ignore-debug (the extra assertions get in the way)
#![crate_type = "lib"]
#![feature(array_zip)]
// CHECK-LABEL: @short_integer_map
#[no_mangle]
@ -16,16 +15,6 @@ pub fn short_integer_map(x: [u32; 8]) -> [u32; 8] {
x.map(|x| 2 * x + 1)
}
// CHECK-LABEL: @short_integer_zip_map
#[no_mangle]
pub fn short_integer_zip_map(x: [u32; 8], y: [u32; 8]) -> [u32; 8] {
// CHECK: %[[A:.+]] = load <8 x i32>
// CHECK: %[[B:.+]] = load <8 x i32>
// CHECK: sub <8 x i32> %[[B]], %[[A]]
// CHECK: store <8 x i32>
x.zip(y).map(|(x, y)| x - y)
}
// This test is checking that LLVM can SRoA away a bunch of the overhead,
// like fully moving the iterators to registers. Notably, previous implementations
// of `map` ended up `alloca`ing the whole `array::IntoIterator`, meaning both a

View File

@ -1,7 +1,6 @@
// compile-flags: -C opt-level=3 -Z merge-functions=disabled
// only-x86_64
#![crate_type = "lib"]
#![feature(array_zip)]
// CHECK-LABEL: @auto_vectorize_direct
#[no_mangle]
@ -32,12 +31,12 @@ pub fn auto_vectorize_loop(a: [f32; 4], b: [f32; 4]) -> [f32; 4] {
c
}
// CHECK-LABEL: @auto_vectorize_array_zip_map
// CHECK-LABEL: @auto_vectorize_array_from_fn
#[no_mangle]
pub fn auto_vectorize_array_zip_map(a: [f32; 4], b: [f32; 4]) -> [f32; 4] {
pub fn auto_vectorize_array_from_fn(a: [f32; 4], b: [f32; 4]) -> [f32; 4] {
// CHECK: load <4 x float>
// CHECK: load <4 x float>
// CHECK: fadd <4 x float>
// CHECK: store <4 x float>
a.zip(b).map(|(a, b)| a + b)
std::array::from_fn(|i| a[i] + b[i])
}

View File

@ -0,0 +1,16 @@
// This test ensures that a re-export of `#[doc(hidden)]` item inside a private
// module will still be displayed (the re-export, not the item).
#![crate_name = "foo"]
mod private_module {
#[doc(hidden)]
pub struct Public;
}
// @has 'foo/index.html'
// @has - '//*[@id="reexport.Foo"]/code' 'pub use crate::private_module::Public as Foo;'
pub use crate::private_module::Public as Foo;
// Glob re-exports with no visible items should not be displayed.
// @count - '//*[@class="item-table"]/li' 1
pub use crate::private_module::*;

View File

@ -0,0 +1,15 @@
// build-pass
// regression test for #112051
#![feature(offset_of)]
struct S<T: ?Sized> {
a: u64,
b: T,
}
trait Tr {}
fn main() {
let _a = core::mem::offset_of!(S<dyn Tr>, a);
let _b = core::mem::offset_of!((u64, dyn Tr), 0);
}