2019-11-29 21:05:28 +00:00
|
|
|
use crate::code_stats::CodeStats;
|
2023-01-31 20:59:29 +00:00
|
|
|
pub use crate::code_stats::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo};
|
2023-02-26 20:27:27 +00:00
|
|
|
use crate::config::{
|
2023-10-18 14:58:17 +00:00
|
|
|
self, CrateType, FunctionReturn, InstrumentCoverage, OptLevel, OutFileName, OutputType,
|
2023-08-23 13:46:58 +00:00
|
|
|
RemapPathScopeComponents, SwitchWithOptPath,
|
2023-02-26 20:27:27 +00:00
|
|
|
};
|
2023-06-22 21:56:09 +00:00
|
|
|
use crate::config::{ErrorOutputType, Input};
|
2023-02-05 02:39:18 +00:00
|
|
|
use crate::errors;
|
2022-06-29 06:22:15 +00:00
|
|
|
use crate::parse::{add_feature_diagnostics, ParseSess};
|
2019-11-29 21:05:28 +00:00
|
|
|
use crate::search_paths::{PathKind, SearchPath};
|
2021-09-28 08:53:33 +00:00
|
|
|
use crate::{filesearch, lint};
|
2012-12-13 21:05:22 +00:00
|
|
|
|
2020-03-21 03:30:30 +00:00
|
|
|
use rustc_data_structures::flock;
|
2023-02-21 14:15:16 +00:00
|
|
|
use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
|
2020-03-21 03:30:30 +00:00
|
|
|
use rustc_data_structures::jobserver::{self, Client};
|
2023-11-09 03:11:39 +00:00
|
|
|
use rustc_data_structures::profiling::{SelfProfiler, SelfProfilerRef};
|
2023-11-06 05:52:38 +00:00
|
|
|
use rustc_data_structures::sync::{
|
|
|
|
AtomicU64, DynSend, DynSync, Lock, Lrc, OneThread, Ordering::SeqCst,
|
|
|
|
};
|
2019-12-22 22:42:04 +00:00
|
|
|
use rustc_errors::annotate_snippet_emitter_writer::AnnotateSnippetEmitterWriter;
|
2024-01-04 23:02:40 +00:00
|
|
|
use rustc_errors::emitter::{DynEmitter, HumanEmitter, HumanReadableErrorType};
|
2019-11-29 21:05:28 +00:00
|
|
|
use rustc_errors::json::JsonEmitter;
|
2020-05-02 12:39:19 +00:00
|
|
|
use rustc_errors::registry::Registry;
|
2022-03-26 07:27:43 +00:00
|
|
|
use rustc_errors::{
|
2023-12-17 10:48:57 +00:00
|
|
|
error_code, fallback_fluent_bundle, DiagCtxt, DiagnosticBuilder, DiagnosticId,
|
2023-12-18 05:31:15 +00:00
|
|
|
DiagnosticMessage, ErrorGuaranteed, FatalAbort, FluentBundle, IntoDiagnostic,
|
2023-12-18 11:21:37 +00:00
|
|
|
LazyFallbackBundle, TerminalUrl,
|
2022-03-26 07:27:43 +00:00
|
|
|
};
|
2021-06-25 23:48:26 +00:00
|
|
|
use rustc_macros::HashStable_Generic;
|
2021-06-08 16:36:30 +00:00
|
|
|
pub use rustc_span::def_id::StableCrateId;
|
2021-08-12 20:30:40 +00:00
|
|
|
use rustc_span::edition::Edition;
|
2023-11-02 03:10:12 +00:00
|
|
|
use rustc_span::source_map::{FileLoader, RealFileLoader, SourceMap};
|
|
|
|
use rustc_span::{SourceFileHashAlgorithm, Span, Symbol};
|
2020-02-12 15:48:03 +00:00
|
|
|
use rustc_target::asm::InlineAsmArch;
|
2020-05-07 00:34:27 +00:00
|
|
|
use rustc_target::spec::{CodeModel, PanicStrategy, RelocModel, RelroLevel};
|
add rustc option for using LLVM stack smash protection
LLVM has built-in heuristics for adding stack canaries to functions. These
heuristics can be selected with LLVM function attributes. This patch adds a
rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use
of these attributes. This gives rustc the same stack smash protection support as
clang offers through options `-fno-stack-protector`, `-fstack-protector`,
`-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can
offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the
current list of rustc exploit
mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html),
originally discussed in #15179.
Stack smash protection adds runtime overhead and is therefore still off by
default, but now users have the option to trade performance for security as they
see fit. An example use case is adding Rust code in an existing C/C++ code base
compiled with stack smash protection. Without the ability to add stack smash
protection to the Rust code, the code base artifacts could be exploitable in
ways not possible if the code base remained pure C/C++.
Stack smash protection support is present in LLVM for almost all the current
tier 1/tier 2 targets: see
test/assembly/stack-protector/stack-protector-target-support.rs. The one
exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a
warning message printed if stack smash protection is used with this target (see
test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3
targets has not been checked.
Since the heuristics are applied at the LLVM level, the heuristics are expected
to add stack smash protection to a fraction of functions comparable to C/C++.
Some experiments demonstrating how Rust code is affected by the different
heuristics can be found in
test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is
potential for better heuristics using Rust-specific safety information. For
example it might be reasonable to skip stack smash protection in functions which
transitively only use safe Rust code, or which uses only a subset of functions
the user declares safe (such as anything under `std.*`). Such alternative
heuristics could be added at a later point.
LLVM also offers a "safestack" sanitizer as an alternative way to guard against
stack smashing (see #26612). This could possibly also be included as a
stack-protection heuristic. An alternative is to add it as a sanitizer (#39699).
This is what clang does: safestack is exposed with option
`-fsanitize=safe-stack`.
The options are only supported by the LLVM backend, but as with other codegen
options it is visible in the main codegen option help menu. The heuristic names
"basic", "strong", and "all" are hopefully sufficiently generic to be usable in
other backends as well.
Reviewed-by: Nikita Popov <nikic@php.net>
Extra commits during review:
- [address-review] make the stack-protector option unstable
- [address-review] reduce detail level of stack-protector option help text
- [address-review] correct grammar in comment
- [address-review] use compiler flag to avoid merging functions in test
- [address-review] specify min LLVM version in fortanix stack-protector test
Only for Fortanix test, since this target specifically requests the
`--x86-experimental-lvi-inline-asm-hardening` flag.
- [address-review] specify required LLVM components in stack-protector tests
- move stack protector option enum closer to other similar option enums
- rustc_interface/tests: sort debug option list in tracking hash test
- add an explicit `none` stack-protector option
Revert "set LLVM requirements for all stack protector support test revisions"
This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 19:37:49 +00:00
|
|
|
use rustc_target::spec::{
|
2022-06-13 10:40:28 +00:00
|
|
|
DebuginfoKind, SanitizerSet, SplitDebuginfo, StackProtector, Target, TargetTriple, TlsModel,
|
add rustc option for using LLVM stack smash protection
LLVM has built-in heuristics for adding stack canaries to functions. These
heuristics can be selected with LLVM function attributes. This patch adds a
rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use
of these attributes. This gives rustc the same stack smash protection support as
clang offers through options `-fno-stack-protector`, `-fstack-protector`,
`-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can
offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the
current list of rustc exploit
mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html),
originally discussed in #15179.
Stack smash protection adds runtime overhead and is therefore still off by
default, but now users have the option to trade performance for security as they
see fit. An example use case is adding Rust code in an existing C/C++ code base
compiled with stack smash protection. Without the ability to add stack smash
protection to the Rust code, the code base artifacts could be exploitable in
ways not possible if the code base remained pure C/C++.
Stack smash protection support is present in LLVM for almost all the current
tier 1/tier 2 targets: see
test/assembly/stack-protector/stack-protector-target-support.rs. The one
exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a
warning message printed if stack smash protection is used with this target (see
test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3
targets has not been checked.
Since the heuristics are applied at the LLVM level, the heuristics are expected
to add stack smash protection to a fraction of functions comparable to C/C++.
Some experiments demonstrating how Rust code is affected by the different
heuristics can be found in
test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is
potential for better heuristics using Rust-specific safety information. For
example it might be reasonable to skip stack smash protection in functions which
transitively only use safe Rust code, or which uses only a subset of functions
the user declares safe (such as anything under `std.*`). Such alternative
heuristics could be added at a later point.
LLVM also offers a "safestack" sanitizer as an alternative way to guard against
stack smashing (see #26612). This could possibly also be included as a
stack-protection heuristic. An alternative is to add it as a sanitizer (#39699).
This is what clang does: safestack is exposed with option
`-fsanitize=safe-stack`.
The options are only supported by the LLVM backend, but as with other codegen
options it is visible in the main codegen option help menu. The heuristic names
"basic", "strong", and "all" are hopefully sufficiently generic to be usable in
other backends as well.
Reviewed-by: Nikita Popov <nikic@php.net>
Extra commits during review:
- [address-review] make the stack-protector option unstable
- [address-review] reduce detail level of stack-protector option help text
- [address-review] correct grammar in comment
- [address-review] use compiler flag to avoid merging functions in test
- [address-review] specify min LLVM version in fortanix stack-protector test
Only for Fortanix test, since this target specifically requests the
`--x86-experimental-lvi-inline-asm-hardening` flag.
- [address-review] specify required LLVM components in stack-protector tests
- move stack protector option enum closer to other similar option enums
- rustc_interface/tests: sort debug option list in tracking hash test
- add an explicit `none` stack-protector option
Revert "set LLVM requirements for all stack protector support test revisions"
This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 19:37:49 +00:00
|
|
|
};
|
2015-01-09 01:14:10 +00:00
|
|
|
|
2023-11-06 05:52:38 +00:00
|
|
|
use std::any::Any;
|
2019-11-03 22:38:02 +00:00
|
|
|
use std::cell::{self, RefCell};
|
2015-02-27 05:00:43 +00:00
|
|
|
use std::env;
|
2020-05-26 18:48:08 +00:00
|
|
|
use std::fmt;
|
|
|
|
use std::ops::{Div, Mul};
|
2021-08-31 16:36:25 +00:00
|
|
|
use std::path::{Path, PathBuf};
|
2020-02-12 15:48:03 +00:00
|
|
|
use std::str::FromStr;
|
2023-10-16 20:11:57 +00:00
|
|
|
use std::sync::{atomic::AtomicBool, Arc};
|
2019-02-10 22:23:00 +00:00
|
|
|
|
2023-11-28 05:30:57 +00:00
|
|
|
struct OptimizationFuel {
|
2019-02-08 13:53:55 +00:00
|
|
|
/// If `-zfuel=crate=n` is specified, initially set to `n`, otherwise `0`.
|
2018-12-29 12:03:33 +00:00
|
|
|
remaining: u64,
|
|
|
|
/// We're rejecting all further optimizations.
|
|
|
|
out_of_fuel: bool,
|
|
|
|
}
|
|
|
|
|
2020-02-17 01:32:25 +00:00
|
|
|
/// The behavior of the CTFE engine when an error occurs with regards to backtraces.
|
|
|
|
#[derive(Clone, Copy)]
|
|
|
|
pub enum CtfeBacktrace {
|
|
|
|
/// Do nothing special, return the error as usual without a backtrace.
|
|
|
|
Disabled,
|
|
|
|
/// Capture a backtrace at the point the error is created and return it in the error
|
|
|
|
/// (to be printed later if/when the error ever actually gets shown to the user).
|
|
|
|
Capture,
|
|
|
|
/// Capture a backtrace at the point the error is created and immediately print it out.
|
|
|
|
Immediate,
|
|
|
|
}
|
|
|
|
|
2020-05-26 18:48:08 +00:00
|
|
|
/// New-type wrapper around `usize` for representing limits. Ensures that comparisons against
|
|
|
|
/// limits are consistent throughout the compiler.
|
2021-06-25 23:48:26 +00:00
|
|
|
#[derive(Clone, Copy, Debug, HashStable_Generic)]
|
2020-05-26 18:48:08 +00:00
|
|
|
pub struct Limit(pub usize);
|
|
|
|
|
|
|
|
impl Limit {
|
|
|
|
/// Create a new limit from a `usize`.
|
|
|
|
pub fn new(value: usize) -> Self {
|
|
|
|
Limit(value)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check that `value` is within the limit. Ensures that the same comparisons are used
|
|
|
|
/// throughout the compiler, as mismatches can cause ICEs, see #72540.
|
2021-02-26 00:00:00 +00:00
|
|
|
#[inline]
|
2020-05-26 18:48:08 +00:00
|
|
|
pub fn value_within_limit(&self, value: usize) -> bool {
|
|
|
|
value <= self.0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-26 16:28:52 +00:00
|
|
|
impl From<usize> for Limit {
|
|
|
|
fn from(value: usize) -> Self {
|
|
|
|
Self::new(value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-26 18:48:08 +00:00
|
|
|
impl fmt::Display for Limit {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
2022-03-25 05:11:05 +00:00
|
|
|
self.0.fmt(f)
|
2020-05-26 18:48:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Div<usize> for Limit {
|
|
|
|
type Output = Limit;
|
|
|
|
|
|
|
|
fn div(self, rhs: usize) -> Self::Output {
|
|
|
|
Limit::new(self.0 / rhs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Mul<usize> for Limit {
|
|
|
|
type Output = Limit;
|
|
|
|
|
|
|
|
fn mul(self, rhs: usize) -> Self::Output {
|
|
|
|
Limit::new(self.0 * rhs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-26 18:08:58 +00:00
|
|
|
impl rustc_errors::IntoDiagnosticArg for Limit {
|
|
|
|
fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
|
|
|
|
self.to_string().into_diagnostic_arg()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-04 18:02:51 +00:00
|
|
|
#[derive(Clone, Copy, Debug, HashStable_Generic)]
|
|
|
|
pub struct Limits {
|
|
|
|
/// The maximum recursion limit for potentially infinitely recursive
|
|
|
|
/// operations such as auto-dereference and monomorphization.
|
|
|
|
pub recursion_limit: Limit,
|
|
|
|
/// The size at which the `large_assignments` lint starts
|
|
|
|
/// being emitted.
|
|
|
|
pub move_size_limit: Limit,
|
|
|
|
/// The maximum length of types during monomorphization.
|
|
|
|
pub type_length_limit: Limit,
|
|
|
|
}
|
|
|
|
|
2022-12-07 09:24:00 +00:00
|
|
|
pub struct CompilerIO {
|
|
|
|
pub input: Input,
|
|
|
|
pub output_dir: Option<PathBuf>,
|
2023-02-26 20:27:27 +00:00
|
|
|
pub output_file: Option<OutFileName>,
|
2022-12-07 09:24:00 +00:00
|
|
|
pub temps_dir: Option<PathBuf>,
|
|
|
|
}
|
|
|
|
|
2023-11-17 20:24:55 +00:00
|
|
|
pub trait LintStoreMarker: Any + DynSync + DynSend {}
|
|
|
|
|
2017-10-10 14:12:11 +00:00
|
|
|
/// Represents the data associated with a compilation
|
|
|
|
/// session for a single crate.
|
2014-03-05 14:36:01 +00:00
|
|
|
pub struct Session {
|
2020-10-14 16:42:13 +00:00
|
|
|
pub target: Target,
|
2015-01-09 01:14:10 +00:00
|
|
|
pub host: Target,
|
2014-05-06 11:38:01 +00:00
|
|
|
pub opts: config::Options,
|
2021-09-01 11:39:48 +00:00
|
|
|
pub host_tlib_path: Lrc<SearchPath>,
|
|
|
|
pub target_tlib_path: Lrc<SearchPath>,
|
2014-03-28 17:05:27 +00:00
|
|
|
pub parse_sess: ParseSess,
|
2018-11-22 04:49:48 +00:00
|
|
|
pub sysroot: PathBuf,
|
2022-12-07 09:24:00 +00:00
|
|
|
/// Input, input file path and output file path to this compilation process.
|
|
|
|
pub io: CompilerIO,
|
2018-04-01 06:14:25 +00:00
|
|
|
|
2018-04-01 06:19:26 +00:00
|
|
|
incr_comp_session: OneThread<RefCell<IncrCompSession>>,
|
2016-08-11 23:02:39 +00:00
|
|
|
|
2019-09-06 02:57:44 +00:00
|
|
|
/// Used by `-Z self-profile`.
|
2019-09-27 12:03:09 +00:00
|
|
|
pub prof: SelfProfilerRef,
|
2018-05-19 17:50:58 +00:00
|
|
|
|
2016-11-14 16:46:20 +00:00
|
|
|
/// Data about code being compiled, gathered during compilation.
|
2019-11-10 16:48:47 +00:00
|
|
|
pub code_stats: CodeStats,
|
2016-11-14 16:46:20 +00:00
|
|
|
|
2019-02-08 13:53:55 +00:00
|
|
|
/// Tracks fuel info if `-zfuel=crate=n` is specified.
|
2018-12-29 12:03:33 +00:00
|
|
|
optimization_fuel: Lock<OptimizationFuel>,
|
2017-03-08 21:28:47 +00:00
|
|
|
|
|
|
|
/// Always set to zero and incremented so that we can print fuel expended by a crate.
|
2019-12-17 21:28:33 +00:00
|
|
|
pub print_fuel: AtomicU64,
|
2017-06-15 14:08:18 +00:00
|
|
|
|
|
|
|
/// Loaded up early on in the initialization of this `Session` to avoid
|
|
|
|
/// false positives about a job server in our environment.
|
2018-04-23 15:00:08 +00:00
|
|
|
pub jobserver: Client,
|
2017-06-03 21:54:08 +00:00
|
|
|
|
2023-11-06 05:52:38 +00:00
|
|
|
/// This only ever stores a `LintStore` but we don't want a dependency on that type here.
|
2023-11-17 20:24:55 +00:00
|
|
|
pub lint_store: Option<Lrc<dyn LintStoreMarker>>,
|
2023-11-06 05:52:38 +00:00
|
|
|
|
|
|
|
/// Should be set if any lints are registered in `lint_store`.
|
|
|
|
pub registered_lints: bool,
|
|
|
|
|
2018-06-30 22:27:44 +00:00
|
|
|
/// Cap lint level specified by a driver specifically.
|
|
|
|
pub driver_lint_caps: FxHashMap<lint::LintId, lint::Level>,
|
2019-03-05 03:24:52 +00:00
|
|
|
|
2020-02-17 01:32:25 +00:00
|
|
|
/// Tracks the current behavior of the CTFE engine when an error occurs.
|
|
|
|
/// Options range from returning the error without a backtrace to returning an error
|
|
|
|
/// and immediately printing the backtrace to stderr.
|
2021-08-31 13:06:59 +00:00
|
|
|
/// The `Lock` is only used by miri to allow setting `ctfe_backtrace` after analysis when
|
|
|
|
/// `MIRI_BACKTRACE` is set. This makes it only apply to miri's errors and not to all CTFE
|
|
|
|
/// errors.
|
2020-02-17 01:32:25 +00:00
|
|
|
pub ctfe_backtrace: Lock<CtfeBacktrace>,
|
2020-04-01 01:00:52 +00:00
|
|
|
|
2020-05-03 12:23:08 +00:00
|
|
|
/// This tracks where `-Zunleash-the-miri-inside-of-you` was used to get around a
|
2022-11-16 20:34:16 +00:00
|
|
|
/// const check, optionally with the relevant feature gate. We use this to
|
2020-05-03 12:23:08 +00:00
|
|
|
/// warn about unleashing, but with a single diagnostic instead of dozens that
|
|
|
|
/// drown everything else in noise.
|
|
|
|
miri_unleashed_features: Lock<Vec<(Span, Option<Symbol>)>>,
|
2020-05-02 11:19:24 +00:00
|
|
|
|
2020-02-12 15:48:03 +00:00
|
|
|
/// Architecture to use for interpreting asm!.
|
|
|
|
pub asm_arch: Option<InlineAsmArch>,
|
|
|
|
|
|
|
|
/// Set of enabled features for the current target.
|
2023-02-21 14:15:16 +00:00
|
|
|
pub target_features: FxIndexSet<Symbol>,
|
2022-07-11 13:26:58 +00:00
|
|
|
|
|
|
|
/// Set of enabled features for the current target, including unstable ones.
|
2023-02-21 14:15:16 +00:00
|
|
|
pub unstable_target_features: FxIndexSet<Symbol>,
|
2023-05-08 09:12:38 +00:00
|
|
|
|
|
|
|
/// The version of the rustc process, possibly including a commit hash and description.
|
|
|
|
pub cfg_version: &'static str,
|
2023-07-03 11:11:27 +00:00
|
|
|
|
2023-10-16 20:11:57 +00:00
|
|
|
/// The inner atomic value is set to true when a feature marked as `internal` is
|
|
|
|
/// enabled. Makes it so that "please report a bug" is hidden, as ICEs with
|
|
|
|
/// internal features are wontfix, and they are usually the cause of the ICEs.
|
|
|
|
/// None signifies that this is not tracked.
|
|
|
|
pub using_internal_features: Arc<AtomicBool>,
|
|
|
|
|
2023-07-03 11:11:27 +00:00
|
|
|
/// All commandline args used to invoke the compiler, with @file args fully expanded.
|
|
|
|
/// This will only be used within debug info, e.g. in the pdb file on windows
|
|
|
|
/// This is mainly useful for other tools that reads that debuginfo to figure out
|
|
|
|
/// how to call the compiler with the same arguments.
|
|
|
|
pub expanded_args: Vec<String>,
|
2012-07-11 22:00:40 +00:00
|
|
|
}
|
|
|
|
|
2023-03-03 06:02:11 +00:00
|
|
|
#[derive(PartialEq, Eq, PartialOrd, Ord)]
|
|
|
|
pub enum MetadataKind {
|
|
|
|
None,
|
|
|
|
Uncompressed,
|
|
|
|
Compressed,
|
|
|
|
}
|
|
|
|
|
Introduce a minimum CGU size in non-incremental builds.
Because tiny CGUs make compilation less efficient *and* result in worse
generated code.
We don't do this when the number of CGUs is explicitly given, because
there are times when the requested number is very important, as
described in some comments within the commit. So the commit also
introduces a `CodegenUnits` type that distinguishes between default
values and user-specified values.
This change has a roughly neutral effect on walltimes across the
rustc-perf benchmarks; there are some speedups and some slowdowns. But
it has significant wins for most other metrics on numerous benchmarks,
including instruction counts, cycles, binary size, and max-rss. It also
reduces parallelism, which is good for reducing jobserver competition
when multiple rustc processes are running at the same time. It's smaller
benchmarks that benefit the most; larger benchmarks already have CGUs
that are all larger than the minimum size.
Here are some example before/after CGU sizes for opt builds.
- html5ever
- CGUs: 16, mean size: 1196.1, sizes: [3908, 2992, 1706, 1652, 1572,
1136, 1045, 948, 946, 938, 579, 471, 443, 327, 286, 189]
- CGUs: 4, mean size: 4396.0, sizes: [6706, 3908, 3490, 3480]
- libc
- CGUs: 12, mean size: 35.3, sizes: [163, 93, 58, 53, 37, 8, 2 (x6)]
- CGUs: 1, mean size: 424.0, sizes: [424]
- tt-muncher
- CGUs: 5, mean size: 1819.4, sizes: [8508, 350, 198, 34, 7]
- CGUs: 1, mean size: 9075.0, sizes: [9075]
Note that CGUs of size 100,000+ aren't unusual in larger programs.
2023-06-09 04:39:13 +00:00
|
|
|
#[derive(Clone, Copy)]
|
|
|
|
pub enum CodegenUnits {
|
|
|
|
/// Specified by the user. In this case we try fairly hard to produce the
|
|
|
|
/// number of CGUs requested.
|
|
|
|
User(usize),
|
|
|
|
|
|
|
|
/// A default value, i.e. not specified by the user. In this case we take
|
|
|
|
/// more liberties about CGU formation, e.g. avoid producing very small
|
|
|
|
/// CGUs.
|
|
|
|
Default(usize),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl CodegenUnits {
|
|
|
|
pub fn as_usize(self) -> usize {
|
|
|
|
match self {
|
|
|
|
CodegenUnits::User(n) => n,
|
|
|
|
CodegenUnits::Default(n) => n,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-05 14:36:01 +00:00
|
|
|
impl Session {
|
2020-05-03 12:23:08 +00:00
|
|
|
pub fn miri_unleashed_feature(&self, span: Span, feature_gate: Option<Symbol>) {
|
|
|
|
self.miri_unleashed_features.lock().push((span, feature_gate));
|
2020-05-02 12:39:19 +00:00
|
|
|
}
|
|
|
|
|
2022-12-07 09:24:00 +00:00
|
|
|
pub fn local_crate_source_file(&self) -> Option<PathBuf> {
|
|
|
|
let path = self.io.input.opt_path()?;
|
2023-08-23 13:46:58 +00:00
|
|
|
if self.should_prefer_remapped_for_codegen() {
|
|
|
|
Some(self.opts.file_path_mapping().map_prefix(path).0.into_owned())
|
|
|
|
} else {
|
|
|
|
Some(path.to_path_buf())
|
|
|
|
}
|
2022-12-07 09:24:00 +00:00
|
|
|
}
|
|
|
|
|
2020-05-02 12:39:19 +00:00
|
|
|
fn check_miri_unleashed_features(&self) {
|
2020-05-03 12:23:08 +00:00
|
|
|
let unleashed_features = self.miri_unleashed_features.lock();
|
|
|
|
if !unleashed_features.is_empty() {
|
|
|
|
let mut must_err = false;
|
|
|
|
// Create a diagnostic pointing at where things got unleashed.
|
2023-12-18 11:21:37 +00:00
|
|
|
self.dcx().emit_warning(errors::SkippingConstChecks {
|
2022-10-14 12:25:43 +00:00
|
|
|
unleashed_features: unleashed_features
|
|
|
|
.iter()
|
|
|
|
.map(|(span, gate)| {
|
|
|
|
gate.map(|gate| {
|
|
|
|
must_err = true;
|
2023-02-05 02:39:18 +00:00
|
|
|
errors::UnleashedFeatureHelp::Named { span: *span, gate }
|
2022-10-14 12:25:43 +00:00
|
|
|
})
|
2023-02-05 02:39:18 +00:00
|
|
|
.unwrap_or(errors::UnleashedFeatureHelp::Unnamed { span: *span })
|
2022-10-14 12:25:43 +00:00
|
|
|
})
|
|
|
|
.collect(),
|
|
|
|
});
|
|
|
|
|
2020-05-03 12:23:08 +00:00
|
|
|
// If we should err, make sure we did.
|
2023-12-21 05:26:09 +00:00
|
|
|
if must_err && self.dcx().has_errors().is_none() {
|
2020-05-02 11:19:24 +00:00
|
|
|
// We have skipped a feature gate, and not run into other errors... reject.
|
2023-12-18 11:21:37 +00:00
|
|
|
self.dcx().emit_err(errors::NotCircumventFeature);
|
2020-05-02 11:19:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-02 12:39:19 +00:00
|
|
|
/// Invoked all the way at the end to finish off diagnostics printing.
|
|
|
|
pub fn finish_diagnostics(&self, registry: &Registry) {
|
|
|
|
self.check_miri_unleashed_features();
|
2023-12-17 19:21:26 +00:00
|
|
|
self.dcx().print_error_count(registry);
|
2020-08-13 19:41:52 +00:00
|
|
|
self.emit_future_breakage();
|
|
|
|
}
|
|
|
|
|
|
|
|
fn emit_future_breakage(&self) {
|
2021-12-04 19:34:20 +00:00
|
|
|
if !self.opts.json_future_incompat {
|
2020-08-13 19:41:52 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-12-17 19:21:26 +00:00
|
|
|
let diags = self.dcx().take_future_breakage_diagnostics();
|
2020-08-13 19:41:52 +00:00
|
|
|
if diags.is_empty() {
|
|
|
|
return;
|
|
|
|
}
|
2023-12-17 19:21:26 +00:00
|
|
|
self.dcx().emit_future_breakage_report(diags);
|
2020-05-02 11:19:24 +00:00
|
|
|
}
|
|
|
|
|
2023-04-05 21:36:40 +00:00
|
|
|
/// Returns true if the crate is a testing one.
|
|
|
|
pub fn is_test_crate(&self) -> bool {
|
|
|
|
self.opts.test
|
|
|
|
}
|
|
|
|
|
2022-10-18 22:08:20 +00:00
|
|
|
#[track_caller]
|
2022-06-29 06:22:15 +00:00
|
|
|
pub fn create_feature_err<'a>(
|
|
|
|
&'a self,
|
2022-09-18 15:45:41 +00:00
|
|
|
err: impl IntoDiagnostic<'a>,
|
2022-06-29 06:22:15 +00:00
|
|
|
feature: Symbol,
|
2023-12-19 04:26:24 +00:00
|
|
|
) -> DiagnosticBuilder<'a> {
|
2023-12-18 10:14:02 +00:00
|
|
|
let mut err = self.dcx().create_err(err);
|
2022-08-21 16:08:14 +00:00
|
|
|
if err.code.is_none() {
|
2023-12-21 21:41:15 +00:00
|
|
|
err.code(error_code!(E0658));
|
2022-08-21 16:08:14 +00:00
|
|
|
}
|
2022-06-29 06:22:15 +00:00
|
|
|
add_feature_diagnostics(&mut err, &self.parse_sess, feature);
|
|
|
|
err
|
|
|
|
}
|
2023-12-21 05:26:09 +00:00
|
|
|
|
2022-01-23 18:34:26 +00:00
|
|
|
pub fn compile_status(&self) -> Result<(), ErrorGuaranteed> {
|
2023-12-17 19:21:26 +00:00
|
|
|
if let Some(reported) = self.dcx().has_errors_or_lint_errors() {
|
|
|
|
let _ = self.dcx().emit_stashed_diagnostics();
|
2022-01-23 00:49:12 +00:00
|
|
|
Err(reported)
|
2019-06-22 11:44:03 +00:00
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
2017-07-02 13:09:09 +00:00
|
|
|
}
|
2023-12-21 05:26:09 +00:00
|
|
|
|
2019-06-22 11:44:03 +00:00
|
|
|
// FIXME(matthewjasper) Remove this method, it should never be needed.
|
2022-01-23 18:34:26 +00:00
|
|
|
pub fn track_errors<F, T>(&self, f: F) -> Result<T, ErrorGuaranteed>
|
2018-03-06 05:29:03 +00:00
|
|
|
where
|
|
|
|
F: FnOnce() -> T,
|
2015-12-11 07:59:11 +00:00
|
|
|
{
|
2023-12-21 05:26:09 +00:00
|
|
|
let old_count = self.dcx().err_count();
|
2016-01-20 09:07:33 +00:00
|
|
|
let result = f();
|
2023-12-21 05:26:09 +00:00
|
|
|
if self.dcx().err_count() == old_count {
|
2022-01-23 00:49:12 +00:00
|
|
|
Ok(result)
|
|
|
|
} else {
|
2023-12-18 11:21:37 +00:00
|
|
|
Err(self.dcx().span_delayed_bug(
|
2022-11-17 21:09:59 +00:00
|
|
|
rustc_span::DUMMY_SP,
|
|
|
|
"`self.err_count()` changed but an error was not emitted",
|
|
|
|
))
|
2022-01-23 00:49:12 +00:00
|
|
|
}
|
2016-01-21 00:19:20 +00:00
|
|
|
}
|
2022-08-19 13:48:15 +00:00
|
|
|
|
2020-08-22 19:24:48 +00:00
|
|
|
/// Used for code paths of expensive computations that should only take place when
|
|
|
|
/// warnings or errors are emitted. If no messages are emitted ("good path"), then
|
|
|
|
/// it's likely a bug.
|
2023-11-30 05:05:50 +00:00
|
|
|
pub fn good_path_delayed_bug(&self, msg: impl Into<DiagnosticMessage>) {
|
2022-07-06 12:44:47 +00:00
|
|
|
if self.opts.unstable_opts.print_type_sizes
|
|
|
|
|| self.opts.unstable_opts.query_dep_graph
|
|
|
|
|| self.opts.unstable_opts.dump_mir.is_some()
|
|
|
|
|| self.opts.unstable_opts.unpretty.is_some()
|
2020-08-22 19:24:48 +00:00
|
|
|
|| self.opts.output_types.contains_key(&OutputType::Mir)
|
|
|
|
|| std::env::var_os("RUSTC_LOG").is_some()
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-12-17 19:21:26 +00:00
|
|
|
self.dcx().good_path_delayed_bug(msg)
|
2020-08-22 19:24:48 +00:00
|
|
|
}
|
|
|
|
|
2021-06-01 00:00:00 +00:00
|
|
|
#[inline]
|
2023-12-17 19:21:26 +00:00
|
|
|
pub fn dcx(&self) -> &DiagCtxt {
|
2023-12-17 11:25:47 +00:00
|
|
|
&self.parse_sess.dcx
|
2012-03-23 00:39:45 +00:00
|
|
|
}
|
2016-10-15 17:28:12 +00:00
|
|
|
|
2020-02-17 17:38:30 +00:00
|
|
|
#[inline]
|
2020-05-27 18:34:17 +00:00
|
|
|
pub fn source_map(&self) -> &SourceMap {
|
2018-08-18 10:14:09 +00:00
|
|
|
self.parse_sess.source_map()
|
2014-03-16 18:56:24 +00:00
|
|
|
}
|
2022-07-25 12:02:39 +00:00
|
|
|
|
2022-07-26 10:42:27 +00:00
|
|
|
/// Returns `true` if internal lints should be added to the lint store - i.e. if
|
|
|
|
/// `-Zunstable-options` is provided and this isn't rustdoc (internal lints can trigger errors
|
|
|
|
/// to be emitted under rustdoc).
|
|
|
|
pub fn enable_internal_lints(&self) -> bool {
|
|
|
|
self.unstable_options() && !self.opts.actually_rustdoc
|
|
|
|
}
|
|
|
|
|
2022-07-25 12:02:39 +00:00
|
|
|
pub fn instrument_coverage(&self) -> bool {
|
|
|
|
self.opts.cg.instrument_coverage() != InstrumentCoverage::Off
|
|
|
|
}
|
|
|
|
|
2023-08-21 08:54:37 +00:00
|
|
|
pub fn instrument_coverage_branch(&self) -> bool {
|
|
|
|
self.opts.cg.instrument_coverage() == InstrumentCoverage::Branch
|
|
|
|
}
|
|
|
|
|
2022-07-25 12:02:39 +00:00
|
|
|
pub fn instrument_coverage_except_unused_generics(&self) -> bool {
|
|
|
|
self.opts.cg.instrument_coverage() == InstrumentCoverage::ExceptUnusedGenerics
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn instrument_coverage_except_unused_functions(&self) -> bool {
|
|
|
|
self.opts.cg.instrument_coverage() == InstrumentCoverage::ExceptUnusedFunctions
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_sanitizer_cfi_enabled(&self) -> bool {
|
|
|
|
self.opts.unstable_opts.sanitizer.contains(SanitizerSet::CFI)
|
|
|
|
}
|
|
|
|
|
2022-12-13 06:42:44 +00:00
|
|
|
pub fn is_sanitizer_cfi_canonical_jump_tables_disabled(&self) -> bool {
|
|
|
|
self.opts.unstable_opts.sanitizer_cfi_canonical_jump_tables == Some(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_sanitizer_cfi_canonical_jump_tables_enabled(&self) -> bool {
|
|
|
|
self.opts.unstable_opts.sanitizer_cfi_canonical_jump_tables == Some(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_sanitizer_cfi_generalize_pointers_enabled(&self) -> bool {
|
|
|
|
self.opts.unstable_opts.sanitizer_cfi_generalize_pointers == Some(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_sanitizer_cfi_normalize_integers_enabled(&self) -> bool {
|
|
|
|
self.opts.unstable_opts.sanitizer_cfi_normalize_integers == Some(true)
|
|
|
|
}
|
|
|
|
|
2022-11-22 05:29:00 +00:00
|
|
|
pub fn is_sanitizer_kcfi_enabled(&self) -> bool {
|
|
|
|
self.opts.unstable_opts.sanitizer.contains(SanitizerSet::KCFI)
|
|
|
|
}
|
|
|
|
|
2022-12-13 06:42:44 +00:00
|
|
|
pub fn is_split_lto_unit_enabled(&self) -> bool {
|
|
|
|
self.opts.unstable_opts.split_lto_unit == Some(true)
|
|
|
|
}
|
|
|
|
|
2022-07-25 12:02:39 +00:00
|
|
|
/// Check whether this compile session and crate type use static crt.
|
|
|
|
pub fn crt_static(&self, crate_type: Option<CrateType>) -> bool {
|
|
|
|
if !self.target.crt_static_respected {
|
|
|
|
// If the target does not opt in to crt-static support, use its default.
|
|
|
|
return self.target.crt_static_default;
|
|
|
|
}
|
|
|
|
|
|
|
|
let requested_features = self.opts.cg.target_feature.split(',');
|
|
|
|
let found_negative = requested_features.clone().any(|r| r == "-crt-static");
|
|
|
|
let found_positive = requested_features.clone().any(|r| r == "+crt-static");
|
|
|
|
|
|
|
|
// JUSTIFICATION: necessary use of crate_types directly (see FIXME below)
|
2022-08-09 13:56:13 +00:00
|
|
|
#[allow(rustc::bad_opt_access)]
|
2022-07-25 12:02:39 +00:00
|
|
|
if found_positive || found_negative {
|
|
|
|
found_positive
|
|
|
|
} else if crate_type == Some(CrateType::ProcMacro)
|
|
|
|
|| crate_type == None && self.opts.crate_types.contains(&CrateType::ProcMacro)
|
|
|
|
{
|
|
|
|
// FIXME: When crate_type is not available,
|
|
|
|
// we use compiler options to determine the crate_type.
|
|
|
|
// We can't check `#![crate_type = "proc-macro"]` here.
|
|
|
|
false
|
|
|
|
} else {
|
|
|
|
self.target.crt_static_default
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_wasi_reactor(&self) -> bool {
|
|
|
|
self.target.options.os == "wasi"
|
|
|
|
&& matches!(
|
|
|
|
self.opts.unstable_opts.wasi_exec_model,
|
|
|
|
Some(config::WasiExecModel::Reactor)
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-06-13 10:40:28 +00:00
|
|
|
/// Returns `true` if the target can use the current split debuginfo configuration.
|
2022-07-25 12:02:39 +00:00
|
|
|
pub fn target_can_use_split_dwarf(&self) -> bool {
|
2022-06-13 10:40:28 +00:00
|
|
|
self.target.debuginfo_kind == DebuginfoKind::Dwarf
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn generate_proc_macro_decls_symbol(&self, stable_crate_id: StableCrateId) -> String {
|
2023-04-08 03:11:20 +00:00
|
|
|
format!("__rustc_proc_macro_decls_{:08x}__", stable_crate_id.as_u64())
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn target_filesearch(&self, kind: PathKind) -> filesearch::FileSearch<'_> {
|
|
|
|
filesearch::FileSearch::new(
|
|
|
|
&self.sysroot,
|
|
|
|
self.opts.target_triple.triple(),
|
|
|
|
&self.opts.search_paths,
|
|
|
|
&self.target_tlib_path,
|
|
|
|
kind,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
pub fn host_filesearch(&self, kind: PathKind) -> filesearch::FileSearch<'_> {
|
|
|
|
filesearch::FileSearch::new(
|
|
|
|
&self.sysroot,
|
|
|
|
config::host_triple(),
|
|
|
|
&self.opts.search_paths,
|
|
|
|
&self.host_tlib_path,
|
|
|
|
kind,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns a list of directories where target-specific tool binaries are located.
|
|
|
|
pub fn get_tools_search_paths(&self, self_contained: bool) -> Vec<PathBuf> {
|
2023-11-21 19:07:32 +00:00
|
|
|
let rustlib_path = rustc_target::target_rustlib_path(&self.sysroot, config::host_triple());
|
2022-07-25 12:02:39 +00:00
|
|
|
let p = PathBuf::from_iter([
|
|
|
|
Path::new(&self.sysroot),
|
|
|
|
Path::new(&rustlib_path),
|
|
|
|
Path::new("bin"),
|
|
|
|
]);
|
|
|
|
if self_contained { vec![p.clone(), p.join("self-contained")] } else { vec![p] }
|
|
|
|
}
|
|
|
|
|
2023-11-30 00:54:32 +00:00
|
|
|
pub fn init_incr_comp_session(&self, session_dir: PathBuf, lock_file: flock::Lock) {
|
2022-07-25 12:02:39 +00:00
|
|
|
let mut incr_comp_session = self.incr_comp_session.borrow_mut();
|
|
|
|
|
|
|
|
if let IncrCompSession::NotInitialized = *incr_comp_session {
|
|
|
|
} else {
|
|
|
|
panic!("Trying to initialize IncrCompSession `{:?}`", *incr_comp_session)
|
|
|
|
}
|
|
|
|
|
|
|
|
*incr_comp_session =
|
2023-11-30 00:58:13 +00:00
|
|
|
IncrCompSession::Active { session_directory: session_dir, _lock_file: lock_file };
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn finalize_incr_comp_session(&self, new_directory_path: PathBuf) {
|
|
|
|
let mut incr_comp_session = self.incr_comp_session.borrow_mut();
|
|
|
|
|
|
|
|
if let IncrCompSession::Active { .. } = *incr_comp_session {
|
|
|
|
} else {
|
|
|
|
panic!("trying to finalize `IncrCompSession` `{:?}`", *incr_comp_session);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: this will also drop the lock file, thus unlocking the directory.
|
|
|
|
*incr_comp_session = IncrCompSession::Finalized { session_directory: new_directory_path };
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn mark_incr_comp_session_as_invalid(&self) {
|
|
|
|
let mut incr_comp_session = self.incr_comp_session.borrow_mut();
|
|
|
|
|
|
|
|
let session_directory = match *incr_comp_session {
|
|
|
|
IncrCompSession::Active { ref session_directory, .. } => session_directory.clone(),
|
|
|
|
IncrCompSession::InvalidBecauseOfErrors { .. } => return,
|
|
|
|
_ => panic!("trying to invalidate `IncrCompSession` `{:?}`", *incr_comp_session),
|
|
|
|
};
|
|
|
|
|
|
|
|
// Note: this will also drop the lock file, thus unlocking the directory.
|
|
|
|
*incr_comp_session = IncrCompSession::InvalidBecauseOfErrors { session_directory };
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn incr_comp_session_dir(&self) -> cell::Ref<'_, PathBuf> {
|
|
|
|
let incr_comp_session = self.incr_comp_session.borrow();
|
|
|
|
cell::Ref::map(incr_comp_session, |incr_comp_session| match *incr_comp_session {
|
|
|
|
IncrCompSession::NotInitialized => panic!(
|
|
|
|
"trying to get session directory from `IncrCompSession`: {:?}",
|
|
|
|
*incr_comp_session,
|
|
|
|
),
|
|
|
|
IncrCompSession::Active { ref session_directory, .. }
|
|
|
|
| IncrCompSession::Finalized { ref session_directory }
|
|
|
|
| IncrCompSession::InvalidBecauseOfErrors { ref session_directory } => {
|
|
|
|
session_directory
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn incr_comp_session_dir_opt(&self) -> Option<cell::Ref<'_, PathBuf>> {
|
|
|
|
self.opts.incremental.as_ref().map(|_| self.incr_comp_session_dir())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// We want to know if we're allowed to do an optimization for crate foo from -z fuel=foo=n.
|
|
|
|
/// This expends fuel if applicable, and records fuel if applicable.
|
2023-02-23 18:51:31 +00:00
|
|
|
pub fn consider_optimizing(
|
|
|
|
&self,
|
|
|
|
get_crate_name: impl Fn() -> Symbol,
|
|
|
|
msg: impl Fn() -> String,
|
|
|
|
) -> bool {
|
2022-07-25 12:02:39 +00:00
|
|
|
let mut ret = true;
|
|
|
|
if let Some((ref c, _)) = self.opts.unstable_opts.fuel {
|
2023-02-23 18:51:31 +00:00
|
|
|
if c == get_crate_name().as_str() {
|
2022-07-25 12:02:39 +00:00
|
|
|
assert_eq!(self.threads(), 1);
|
|
|
|
let mut fuel = self.optimization_fuel.lock();
|
|
|
|
ret = fuel.remaining != 0;
|
|
|
|
if fuel.remaining == 0 && !fuel.out_of_fuel {
|
2023-12-17 19:21:26 +00:00
|
|
|
if self.dcx().can_emit_warnings() {
|
2022-07-25 12:02:39 +00:00
|
|
|
// We only call `msg` in case we can actually emit warnings.
|
2023-11-30 05:05:50 +00:00
|
|
|
// Otherwise, this could cause a `good_path_delayed_bug` to
|
2022-07-25 12:02:39 +00:00
|
|
|
// trigger (issue #79546).
|
2023-12-18 11:21:37 +00:00
|
|
|
self.dcx().emit_warning(errors::OptimisationFuelExhausted { msg: msg() });
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
fuel.out_of_fuel = true;
|
|
|
|
} else if fuel.remaining > 0 {
|
|
|
|
fuel.remaining -= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if let Some(ref c) = self.opts.unstable_opts.print_fuel {
|
2023-02-23 18:51:31 +00:00
|
|
|
if c == get_crate_name().as_str() {
|
2022-07-25 12:02:39 +00:00
|
|
|
assert_eq!(self.threads(), 1);
|
|
|
|
self.print_fuel.fetch_add(1, SeqCst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret
|
|
|
|
}
|
|
|
|
|
2023-01-31 09:52:54 +00:00
|
|
|
/// Is this edition 2015?
|
2023-02-01 10:42:20 +00:00
|
|
|
pub fn is_rust_2015(&self) -> bool {
|
|
|
|
self.edition().is_rust_2015()
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Are we allowed to use features from the Rust 2018 edition?
|
2023-07-16 18:59:36 +00:00
|
|
|
pub fn at_least_rust_2018(&self) -> bool {
|
|
|
|
self.edition().at_least_rust_2018()
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Are we allowed to use features from the Rust 2021 edition?
|
2023-07-16 18:59:36 +00:00
|
|
|
pub fn at_least_rust_2021(&self) -> bool {
|
|
|
|
self.edition().at_least_rust_2021()
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Are we allowed to use features from the Rust 2024 edition?
|
2023-07-16 18:59:36 +00:00
|
|
|
pub fn at_least_rust_2024(&self) -> bool {
|
|
|
|
self.edition().at_least_rust_2024()
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
|
2023-06-08 00:18:20 +00:00
|
|
|
/// Returns `true` if we should use the PLT for shared library calls.
|
2022-07-25 12:02:39 +00:00
|
|
|
pub fn needs_plt(&self) -> bool {
|
2023-06-08 00:18:20 +00:00
|
|
|
// Check if the current target usually wants PLT to be enabled.
|
2022-07-25 12:02:39 +00:00
|
|
|
// The user can use the command line flag to override it.
|
2023-06-08 00:18:20 +00:00
|
|
|
let want_plt = self.target.plt_by_default;
|
2022-07-25 12:02:39 +00:00
|
|
|
|
|
|
|
let dbg_opts = &self.opts.unstable_opts;
|
|
|
|
|
|
|
|
let relro_level = dbg_opts.relro_level.unwrap_or(self.target.relro_level);
|
|
|
|
|
|
|
|
// Only enable this optimization by default if full relro is also enabled.
|
|
|
|
// In this case, lazy binding was already unavailable, so nothing is lost.
|
|
|
|
// This also ensures `-Wl,-z,now` is supported by the linker.
|
|
|
|
let full_relro = RelroLevel::Full == relro_level;
|
|
|
|
|
|
|
|
// If user didn't explicitly forced us to use / skip the PLT,
|
2023-06-08 00:18:20 +00:00
|
|
|
// then use it unless the target doesn't want it by default or the full relro forces it on.
|
|
|
|
dbg_opts.plt.unwrap_or(want_plt || !full_relro)
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Checks if LLVM lifetime markers should be emitted.
|
|
|
|
pub fn emit_lifetime_markers(&self) -> bool {
|
|
|
|
self.opts.optimize != config::OptLevel::No
|
2022-09-11 23:36:19 +00:00
|
|
|
// AddressSanitizer and KernelAddressSanitizer uses lifetimes to detect use after scope bugs.
|
2022-07-25 12:02:39 +00:00
|
|
|
// MemorySanitizer uses lifetimes to detect use of uninitialized stack variables.
|
|
|
|
// HWAddressSanitizer will use lifetimes to detect use after scope bugs in the future.
|
2022-09-11 23:36:19 +00:00
|
|
|
|| self.opts.unstable_opts.sanitizer.intersects(SanitizerSet::ADDRESS | SanitizerSet::KERNELADDRESS | SanitizerSet::MEMORY | SanitizerSet::HWADDRESS)
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
|
2022-11-26 01:14:25 +00:00
|
|
|
pub fn diagnostic_width(&self) -> usize {
|
|
|
|
let default_column_width = 140;
|
|
|
|
if let Some(width) = self.opts.diagnostic_width {
|
|
|
|
width
|
|
|
|
} else if self.opts.unstable_opts.ui_testing {
|
|
|
|
default_column_width
|
|
|
|
} else {
|
|
|
|
termize::dimensions().map_or(default_column_width, |(w, _)| w)
|
|
|
|
}
|
|
|
|
}
|
2023-12-13 21:14:18 +00:00
|
|
|
|
|
|
|
/// Whether the default visibility of symbols should be "hidden" rather than "default".
|
|
|
|
pub fn default_hidden_visibility(&self) -> bool {
|
|
|
|
self.opts
|
|
|
|
.unstable_opts
|
|
|
|
.default_hidden_visibility
|
|
|
|
.unwrap_or(self.target.options.default_hidden_visibility)
|
|
|
|
}
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// JUSTIFICATION: defn of the suggested wrapper fns
|
2022-08-09 13:56:13 +00:00
|
|
|
#[allow(rustc::bad_opt_access)]
|
2022-07-25 12:02:39 +00:00
|
|
|
impl Session {
|
2023-12-19 17:39:58 +00:00
|
|
|
pub fn verbose_internals(&self) -> bool {
|
|
|
|
self.opts.unstable_opts.verbose_internals
|
2022-07-25 12:02:39 +00:00
|
|
|
}
|
|
|
|
|
2022-11-05 08:08:57 +00:00
|
|
|
pub fn print_llvm_stats(&self) -> bool {
|
2023-07-16 15:37:52 +00:00
|
|
|
self.opts.unstable_opts.print_codegen_stats
|
2022-11-05 08:08:57 +00:00
|
|
|
}
|
|
|
|
|
2018-06-12 19:05:37 +00:00
|
|
|
pub fn verify_llvm_ir(&self) -> bool {
|
2022-07-06 12:44:47 +00:00
|
|
|
self.opts.unstable_opts.verify_llvm_ir || option_env!("RUSTC_VERIFY_LLVM_IR").is_some()
|
2018-03-06 05:29:03 +00:00
|
|
|
}
|
2022-07-25 12:02:39 +00:00
|
|
|
|
2019-07-24 15:00:09 +00:00
|
|
|
pub fn binary_dep_depinfo(&self) -> bool {
|
2022-07-06 12:44:47 +00:00
|
|
|
self.opts.unstable_opts.binary_dep_depinfo
|
2019-07-24 15:00:09 +00:00
|
|
|
}
|
2017-12-20 09:26:54 +00:00
|
|
|
|
2022-07-25 12:02:39 +00:00
|
|
|
pub fn mir_opt_level(&self) -> usize {
|
|
|
|
self.opts
|
|
|
|
.unstable_opts
|
|
|
|
.mir_opt_level
|
|
|
|
.unwrap_or_else(|| if self.opts.optimize != OptLevel::No { 2 } else { 1 })
|
2017-12-19 18:47:59 +00:00
|
|
|
}
|
2017-12-20 09:26:54 +00:00
|
|
|
|
2018-01-16 23:02:31 +00:00
|
|
|
/// Calculates the flavor of LTO to use for this compilation.
|
|
|
|
pub fn lto(&self) -> config::Lto {
|
|
|
|
// If our target has codegen requirements ignore the command line
|
2020-11-08 11:27:51 +00:00
|
|
|
if self.target.requires_lto {
|
2018-03-06 05:29:03 +00:00
|
|
|
return config::Lto::Fat;
|
2018-01-16 23:02:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the user specified something, return that. If they only said `-C
|
|
|
|
// lto` and we've for whatever reason forced off ThinLTO via the CLI,
|
|
|
|
// then ensure we can't use a ThinLTO.
|
|
|
|
match self.opts.cg.lto {
|
2018-09-04 15:57:17 +00:00
|
|
|
config::LtoCli::Unspecified => {
|
|
|
|
// The compiler was invoked without the `-Clto` flag. Fall
|
|
|
|
// through to the default handling
|
|
|
|
}
|
|
|
|
config::LtoCli::No => {
|
|
|
|
// The user explicitly opted out of any kind of LTO
|
|
|
|
return config::Lto::No;
|
|
|
|
}
|
2019-12-22 22:42:04 +00:00
|
|
|
config::LtoCli::Yes | config::LtoCli::Fat | config::LtoCli::NoParam => {
|
2018-09-04 15:57:17 +00:00
|
|
|
// All of these mean fat LTO
|
|
|
|
return config::Lto::Fat;
|
|
|
|
}
|
|
|
|
config::LtoCli::Thin => {
|
2022-10-27 00:28:25 +00:00
|
|
|
// The user explicitly asked for ThinLTO
|
|
|
|
return config::Lto::Thin;
|
2018-09-04 15:57:17 +00:00
|
|
|
}
|
2018-01-16 23:02:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ok at this point the target doesn't require anything and the user
|
|
|
|
// hasn't asked for anything. Our next decision is whether or not
|
|
|
|
// we enable "auto" ThinLTO where we use multiple codegen units and
|
|
|
|
// then do ThinLTO over those codegen units. The logic below will
|
|
|
|
// either return `No` or `ThinLocal`.
|
|
|
|
|
|
|
|
// If processing command line options determined that we're incompatible
|
2018-11-27 02:59:49 +00:00
|
|
|
// with ThinLTO (e.g., `-C lto --emit llvm-ir`) then return that option.
|
2022-10-27 00:28:25 +00:00
|
|
|
if self.opts.cli_forced_local_thinlto_off {
|
2018-03-06 05:29:03 +00:00
|
|
|
return config::Lto::No;
|
2018-01-16 23:02:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If `-Z thinlto` specified process that, but note that this is mostly
|
|
|
|
// a deprecated option now that `-C lto=thin` exists.
|
2022-07-06 12:44:47 +00:00
|
|
|
if let Some(enabled) = self.opts.unstable_opts.thinlto {
|
2018-01-16 23:02:31 +00:00
|
|
|
if enabled {
|
2018-03-06 05:29:03 +00:00
|
|
|
return config::Lto::ThinLocal;
|
2018-01-16 23:02:31 +00:00
|
|
|
} else {
|
2018-03-06 05:29:03 +00:00
|
|
|
return config::Lto::No;
|
2018-01-16 23:02:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there's only one codegen unit and LTO isn't enabled then there's
|
|
|
|
// no need for ThinLTO so just return false.
|
Introduce a minimum CGU size in non-incremental builds.
Because tiny CGUs make compilation less efficient *and* result in worse
generated code.
We don't do this when the number of CGUs is explicitly given, because
there are times when the requested number is very important, as
described in some comments within the commit. So the commit also
introduces a `CodegenUnits` type that distinguishes between default
values and user-specified values.
This change has a roughly neutral effect on walltimes across the
rustc-perf benchmarks; there are some speedups and some slowdowns. But
it has significant wins for most other metrics on numerous benchmarks,
including instruction counts, cycles, binary size, and max-rss. It also
reduces parallelism, which is good for reducing jobserver competition
when multiple rustc processes are running at the same time. It's smaller
benchmarks that benefit the most; larger benchmarks already have CGUs
that are all larger than the minimum size.
Here are some example before/after CGU sizes for opt builds.
- html5ever
- CGUs: 16, mean size: 1196.1, sizes: [3908, 2992, 1706, 1652, 1572,
1136, 1045, 948, 946, 938, 579, 471, 443, 327, 286, 189]
- CGUs: 4, mean size: 4396.0, sizes: [6706, 3908, 3490, 3480]
- libc
- CGUs: 12, mean size: 35.3, sizes: [163, 93, 58, 53, 37, 8, 2 (x6)]
- CGUs: 1, mean size: 424.0, sizes: [424]
- tt-muncher
- CGUs: 5, mean size: 1819.4, sizes: [8508, 350, 198, 34, 7]
- CGUs: 1, mean size: 9075.0, sizes: [9075]
Note that CGUs of size 100,000+ aren't unusual in larger programs.
2023-06-09 04:39:13 +00:00
|
|
|
if self.codegen_units().as_usize() == 1 {
|
2018-03-06 05:29:03 +00:00
|
|
|
return config::Lto::No;
|
2018-01-16 23:02:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now we're in "defaults" territory. By default we enable ThinLTO for
|
|
|
|
// optimized compiles (anything greater than O0).
|
|
|
|
match self.opts.optimize {
|
|
|
|
config::OptLevel::No => config::Lto::No,
|
|
|
|
_ => config::Lto::ThinLocal,
|
|
|
|
}
|
Implement LTO
This commit implements LTO for rust leveraging LLVM's passes. What this means
is:
* When compiling an rlib, in addition to insdering foo.o into the archive, also
insert foo.bc (the LLVM bytecode) of the optimized module.
* When the compiler detects the -Z lto option, it will attempt to perform LTO on
a staticlib or binary output. The compiler will emit an error if a dylib or
rlib output is being generated.
* The actual act of performing LTO is as follows:
1. Force all upstream libraries to have an rlib version available.
2. Load the bytecode of each upstream library from the rlib.
3. Link all this bytecode into the current LLVM module (just using llvm
apis)
4. Run an internalization pass which internalizes all symbols except those
found reachable for the local crate of compilation.
5. Run the LLVM LTO pass manager over this entire module
6a. If assembling an archive, then add all upstream rlibs into the output
archive. This ignores all of the object/bitcode/metadata files rust
generated and placed inside the rlibs.
6b. If linking a binary, create copies of all upstream rlibs, remove the
rust-generated object-file, and then link everything as usual.
As I have explained in #10741, this process is excruciatingly slow, so this is
*not* turned on by default, and it is also why I have decided to hide it behind
a -Z flag for now. The good news is that the binary sizes are about as small as
they can be as a result of LTO, so it's definitely working.
Closes #10741
Closes #10740
2013-12-03 07:19:29 +00:00
|
|
|
}
|
2018-01-16 23:02:31 +00:00
|
|
|
|
2016-09-28 02:26:08 +00:00
|
|
|
/// Returns the panic strategy for this compile session. If the user explicitly selected one
|
|
|
|
/// using '-C panic', use that, otherwise use the panic strategy defined by the target.
|
|
|
|
pub fn panic_strategy(&self) -> PanicStrategy {
|
2020-11-08 11:27:51 +00:00
|
|
|
self.opts.cg.panic.unwrap_or(self.target.panic_strategy)
|
2016-09-28 02:26:08 +00:00
|
|
|
}
|
2022-07-25 12:02:39 +00:00
|
|
|
|
2018-01-04 20:19:23 +00:00
|
|
|
pub fn fewer_names(&self) -> bool {
|
2022-07-06 12:44:47 +00:00
|
|
|
if let Some(fewer_names) = self.opts.unstable_opts.fewer_names {
|
2020-11-23 00:00:00 +00:00
|
|
|
fewer_names
|
|
|
|
} else {
|
|
|
|
let more_names = self.opts.output_types.contains_key(&OutputType::LlvmAssembly)
|
|
|
|
|| self.opts.output_types.contains_key(&OutputType::Bitcode)
|
|
|
|
// AddressSanitizer and MemorySanitizer use alloca name when reporting an issue.
|
2022-07-06 12:44:47 +00:00
|
|
|
|| self.opts.unstable_opts.sanitizer.intersects(SanitizerSet::ADDRESS | SanitizerSet::MEMORY);
|
2020-11-23 00:00:00 +00:00
|
|
|
!more_names
|
|
|
|
}
|
2018-01-04 20:19:23 +00:00
|
|
|
}
|
2022-07-25 12:02:39 +00:00
|
|
|
|
|
|
|
pub fn unstable_options(&self) -> bool {
|
|
|
|
self.opts.unstable_opts.unstable_options
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_nightly_build(&self) -> bool {
|
|
|
|
self.opts.unstable_features.is_nightly_build()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn overflow_checks(&self) -> bool {
|
|
|
|
self.opts.cg.overflow_checks.unwrap_or(self.opts.debug_assertions)
|
2017-08-22 21:24:29 +00:00
|
|
|
}
|
|
|
|
|
2020-04-22 21:46:45 +00:00
|
|
|
pub fn relocation_model(&self) -> RelocModel {
|
2020-11-08 11:27:51 +00:00
|
|
|
self.opts.cg.relocation_model.unwrap_or(self.target.relocation_model)
|
2020-04-22 21:46:45 +00:00
|
|
|
}
|
|
|
|
|
2020-05-07 00:34:27 +00:00
|
|
|
pub fn code_model(&self) -> Option<CodeModel> {
|
2020-11-08 11:27:51 +00:00
|
|
|
self.opts.cg.code_model.or(self.target.code_model)
|
2020-05-07 00:34:27 +00:00
|
|
|
}
|
|
|
|
|
2020-04-25 18:45:21 +00:00
|
|
|
pub fn tls_model(&self) -> TlsModel {
|
2022-07-06 12:44:47 +00:00
|
|
|
self.opts.unstable_opts.tls_model.unwrap_or(self.target.tls_model)
|
2020-04-25 18:45:21 +00:00
|
|
|
}
|
|
|
|
|
2020-11-30 16:39:08 +00:00
|
|
|
pub fn split_debuginfo(&self) -> SplitDebuginfo {
|
|
|
|
self.opts.cg.split_debuginfo.unwrap_or(self.target.split_debuginfo)
|
|
|
|
}
|
|
|
|
|
add rustc option for using LLVM stack smash protection
LLVM has built-in heuristics for adding stack canaries to functions. These
heuristics can be selected with LLVM function attributes. This patch adds a
rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use
of these attributes. This gives rustc the same stack smash protection support as
clang offers through options `-fno-stack-protector`, `-fstack-protector`,
`-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can
offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the
current list of rustc exploit
mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html),
originally discussed in #15179.
Stack smash protection adds runtime overhead and is therefore still off by
default, but now users have the option to trade performance for security as they
see fit. An example use case is adding Rust code in an existing C/C++ code base
compiled with stack smash protection. Without the ability to add stack smash
protection to the Rust code, the code base artifacts could be exploitable in
ways not possible if the code base remained pure C/C++.
Stack smash protection support is present in LLVM for almost all the current
tier 1/tier 2 targets: see
test/assembly/stack-protector/stack-protector-target-support.rs. The one
exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a
warning message printed if stack smash protection is used with this target (see
test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3
targets has not been checked.
Since the heuristics are applied at the LLVM level, the heuristics are expected
to add stack smash protection to a fraction of functions comparable to C/C++.
Some experiments demonstrating how Rust code is affected by the different
heuristics can be found in
test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is
potential for better heuristics using Rust-specific safety information. For
example it might be reasonable to skip stack smash protection in functions which
transitively only use safe Rust code, or which uses only a subset of functions
the user declares safe (such as anything under `std.*`). Such alternative
heuristics could be added at a later point.
LLVM also offers a "safestack" sanitizer as an alternative way to guard against
stack smashing (see #26612). This could possibly also be included as a
stack-protection heuristic. An alternative is to add it as a sanitizer (#39699).
This is what clang does: safestack is exposed with option
`-fsanitize=safe-stack`.
The options are only supported by the LLVM backend, but as with other codegen
options it is visible in the main codegen option help menu. The heuristic names
"basic", "strong", and "all" are hopefully sufficiently generic to be usable in
other backends as well.
Reviewed-by: Nikita Popov <nikic@php.net>
Extra commits during review:
- [address-review] make the stack-protector option unstable
- [address-review] reduce detail level of stack-protector option help text
- [address-review] correct grammar in comment
- [address-review] use compiler flag to avoid merging functions in test
- [address-review] specify min LLVM version in fortanix stack-protector test
Only for Fortanix test, since this target specifically requests the
`--x86-experimental-lvi-inline-asm-hardening` flag.
- [address-review] specify required LLVM components in stack-protector tests
- move stack protector option enum closer to other similar option enums
- rustc_interface/tests: sort debug option list in tracking hash test
- add an explicit `none` stack-protector option
Revert "set LLVM requirements for all stack protector support test revisions"
This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 19:37:49 +00:00
|
|
|
pub fn stack_protector(&self) -> StackProtector {
|
|
|
|
if self.target.options.supports_stack_protector {
|
2022-07-06 12:44:47 +00:00
|
|
|
self.opts.unstable_opts.stack_protector
|
add rustc option for using LLVM stack smash protection
LLVM has built-in heuristics for adding stack canaries to functions. These
heuristics can be selected with LLVM function attributes. This patch adds a
rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use
of these attributes. This gives rustc the same stack smash protection support as
clang offers through options `-fno-stack-protector`, `-fstack-protector`,
`-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can
offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the
current list of rustc exploit
mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html),
originally discussed in #15179.
Stack smash protection adds runtime overhead and is therefore still off by
default, but now users have the option to trade performance for security as they
see fit. An example use case is adding Rust code in an existing C/C++ code base
compiled with stack smash protection. Without the ability to add stack smash
protection to the Rust code, the code base artifacts could be exploitable in
ways not possible if the code base remained pure C/C++.
Stack smash protection support is present in LLVM for almost all the current
tier 1/tier 2 targets: see
test/assembly/stack-protector/stack-protector-target-support.rs. The one
exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a
warning message printed if stack smash protection is used with this target (see
test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3
targets has not been checked.
Since the heuristics are applied at the LLVM level, the heuristics are expected
to add stack smash protection to a fraction of functions comparable to C/C++.
Some experiments demonstrating how Rust code is affected by the different
heuristics can be found in
test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is
potential for better heuristics using Rust-specific safety information. For
example it might be reasonable to skip stack smash protection in functions which
transitively only use safe Rust code, or which uses only a subset of functions
the user declares safe (such as anything under `std.*`). Such alternative
heuristics could be added at a later point.
LLVM also offers a "safestack" sanitizer as an alternative way to guard against
stack smashing (see #26612). This could possibly also be included as a
stack-protection heuristic. An alternative is to add it as a sanitizer (#39699).
This is what clang does: safestack is exposed with option
`-fsanitize=safe-stack`.
The options are only supported by the LLVM backend, but as with other codegen
options it is visible in the main codegen option help menu. The heuristic names
"basic", "strong", and "all" are hopefully sufficiently generic to be usable in
other backends as well.
Reviewed-by: Nikita Popov <nikic@php.net>
Extra commits during review:
- [address-review] make the stack-protector option unstable
- [address-review] reduce detail level of stack-protector option help text
- [address-review] correct grammar in comment
- [address-review] use compiler flag to avoid merging functions in test
- [address-review] specify min LLVM version in fortanix stack-protector test
Only for Fortanix test, since this target specifically requests the
`--x86-experimental-lvi-inline-asm-hardening` flag.
- [address-review] specify required LLVM components in stack-protector tests
- move stack protector option enum closer to other similar option enums
- rustc_interface/tests: sort debug option list in tracking hash test
- add an explicit `none` stack-protector option
Revert "set LLVM requirements for all stack protector support test revisions"
This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 19:37:49 +00:00
|
|
|
} else {
|
|
|
|
StackProtector::None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add Option to Force Unwind Tables
When panic != unwind, `nounwind` is added to all functions for a target.
This can cause issues when a panic happens with RUST_BACKTRACE=1, as
there needs to be a way to reconstruct the backtrace. There are three
possible sources of this information: forcing frame pointers (for which
an option exists already), debug info (for which an option exists), or
unwind tables.
Especially for embedded devices, forcing frame pointers can have code
size overheads (RISC-V sees ~10% overheads, ARM sees ~2-3% overheads).
In code, it can be the case that debug info is not kept, so it is useful
to provide this third option, unwind tables, that users can use to
reconstruct the call stack. Reconstructing this stack is harder than
with frame pointers, but it is still possible.
This commit adds a compiler option which allows a user to force the
addition of unwind tables. Unwind tables cannot be disabled on targets
that require them for correctness, or when using `-C panic=unwind`.
2020-05-04 11:08:35 +00:00
|
|
|
pub fn must_emit_unwind_tables(&self) -> bool {
|
|
|
|
// This is used to control the emission of the `uwtable` attribute on
|
|
|
|
// LLVM functions.
|
|
|
|
//
|
2021-03-25 10:49:35 +00:00
|
|
|
// Unwind tables are needed when compiling with `-C panic=unwind`, but
|
|
|
|
// LLVM won't omit unwind tables unless the function is also marked as
|
|
|
|
// `nounwind`, so users are allowed to disable `uwtable` emission.
|
|
|
|
// Historically rustc always emits `uwtable` attributes by default, so
|
|
|
|
// even they can be disabled, they're still emitted by default.
|
Add Option to Force Unwind Tables
When panic != unwind, `nounwind` is added to all functions for a target.
This can cause issues when a panic happens with RUST_BACKTRACE=1, as
there needs to be a way to reconstruct the backtrace. There are three
possible sources of this information: forcing frame pointers (for which
an option exists already), debug info (for which an option exists), or
unwind tables.
Especially for embedded devices, forcing frame pointers can have code
size overheads (RISC-V sees ~10% overheads, ARM sees ~2-3% overheads).
In code, it can be the case that debug info is not kept, so it is useful
to provide this third option, unwind tables, that users can use to
reconstruct the call stack. Reconstructing this stack is harder than
with frame pointers, but it is still possible.
This commit adds a compiler option which allows a user to force the
addition of unwind tables. Unwind tables cannot be disabled on targets
that require them for correctness, or when using `-C panic=unwind`.
2020-05-04 11:08:35 +00:00
|
|
|
//
|
|
|
|
// On some targets (including windows), however, exceptions include
|
|
|
|
// other events such as illegal instructions, segfaults, etc. This means
|
|
|
|
// that on Windows we end up still needing unwind tables even if the `-C
|
|
|
|
// panic=abort` flag is passed.
|
|
|
|
//
|
|
|
|
// You can also find more info on why Windows needs unwind tables in:
|
|
|
|
// https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
|
|
|
|
//
|
|
|
|
// If a target requires unwind tables, then they must be emitted.
|
|
|
|
// Otherwise, we can defer to the `-C force-unwind-tables=<yes/no>`
|
|
|
|
// value, if it is provided, or disable them, if not.
|
2021-03-25 10:49:35 +00:00
|
|
|
self.target.requires_uwtable
|
|
|
|
|| self.opts.cg.force_unwind_tables.unwrap_or(
|
|
|
|
self.panic_strategy() == PanicStrategy::Unwind || self.target.default_uwtable,
|
|
|
|
)
|
Add Option to Force Unwind Tables
When panic != unwind, `nounwind` is added to all functions for a target.
This can cause issues when a panic happens with RUST_BACKTRACE=1, as
there needs to be a way to reconstruct the backtrace. There are three
possible sources of this information: forcing frame pointers (for which
an option exists already), debug info (for which an option exists), or
unwind tables.
Especially for embedded devices, forcing frame pointers can have code
size overheads (RISC-V sees ~10% overheads, ARM sees ~2-3% overheads).
In code, it can be the case that debug info is not kept, so it is useful
to provide this third option, unwind tables, that users can use to
reconstruct the call stack. Reconstructing this stack is harder than
with frame pointers, but it is still possible.
This commit adds a compiler option which allows a user to force the
addition of unwind tables. Unwind tables cannot be disabled on targets
that require them for correctness, or when using `-C panic=unwind`.
2020-05-04 11:08:35 +00:00
|
|
|
}
|
|
|
|
|
2017-12-03 13:16:19 +00:00
|
|
|
/// Returns the number of query threads that should be used for this
|
|
|
|
/// compilation
|
2023-03-25 08:46:19 +00:00
|
|
|
#[inline]
|
2019-01-28 14:51:47 +00:00
|
|
|
pub fn threads(&self) -> usize {
|
2022-07-06 12:44:47 +00:00
|
|
|
self.opts.unstable_opts.threads
|
2017-12-03 13:16:19 +00:00
|
|
|
}
|
|
|
|
|
2017-10-04 21:38:52 +00:00
|
|
|
/// Returns the number of codegen units that should be used for this
|
|
|
|
/// compilation
|
Introduce a minimum CGU size in non-incremental builds.
Because tiny CGUs make compilation less efficient *and* result in worse
generated code.
We don't do this when the number of CGUs is explicitly given, because
there are times when the requested number is very important, as
described in some comments within the commit. So the commit also
introduces a `CodegenUnits` type that distinguishes between default
values and user-specified values.
This change has a roughly neutral effect on walltimes across the
rustc-perf benchmarks; there are some speedups and some slowdowns. But
it has significant wins for most other metrics on numerous benchmarks,
including instruction counts, cycles, binary size, and max-rss. It also
reduces parallelism, which is good for reducing jobserver competition
when multiple rustc processes are running at the same time. It's smaller
benchmarks that benefit the most; larger benchmarks already have CGUs
that are all larger than the minimum size.
Here are some example before/after CGU sizes for opt builds.
- html5ever
- CGUs: 16, mean size: 1196.1, sizes: [3908, 2992, 1706, 1652, 1572,
1136, 1045, 948, 946, 938, 579, 471, 443, 327, 286, 189]
- CGUs: 4, mean size: 4396.0, sizes: [6706, 3908, 3490, 3480]
- libc
- CGUs: 12, mean size: 35.3, sizes: [163, 93, 58, 53, 37, 8, 2 (x6)]
- CGUs: 1, mean size: 424.0, sizes: [424]
- tt-muncher
- CGUs: 5, mean size: 1819.4, sizes: [8508, 350, 198, 34, 7]
- CGUs: 1, mean size: 9075.0, sizes: [9075]
Note that CGUs of size 100,000+ aren't unusual in larger programs.
2023-06-09 04:39:13 +00:00
|
|
|
pub fn codegen_units(&self) -> CodegenUnits {
|
2017-10-04 21:38:52 +00:00
|
|
|
if let Some(n) = self.opts.cli_forced_codegen_units {
|
Introduce a minimum CGU size in non-incremental builds.
Because tiny CGUs make compilation less efficient *and* result in worse
generated code.
We don't do this when the number of CGUs is explicitly given, because
there are times when the requested number is very important, as
described in some comments within the commit. So the commit also
introduces a `CodegenUnits` type that distinguishes between default
values and user-specified values.
This change has a roughly neutral effect on walltimes across the
rustc-perf benchmarks; there are some speedups and some slowdowns. But
it has significant wins for most other metrics on numerous benchmarks,
including instruction counts, cycles, binary size, and max-rss. It also
reduces parallelism, which is good for reducing jobserver competition
when multiple rustc processes are running at the same time. It's smaller
benchmarks that benefit the most; larger benchmarks already have CGUs
that are all larger than the minimum size.
Here are some example before/after CGU sizes for opt builds.
- html5ever
- CGUs: 16, mean size: 1196.1, sizes: [3908, 2992, 1706, 1652, 1572,
1136, 1045, 948, 946, 938, 579, 471, 443, 327, 286, 189]
- CGUs: 4, mean size: 4396.0, sizes: [6706, 3908, 3490, 3480]
- libc
- CGUs: 12, mean size: 35.3, sizes: [163, 93, 58, 53, 37, 8, 2 (x6)]
- CGUs: 1, mean size: 424.0, sizes: [424]
- tt-muncher
- CGUs: 5, mean size: 1819.4, sizes: [8508, 350, 198, 34, 7]
- CGUs: 1, mean size: 9075.0, sizes: [9075]
Note that CGUs of size 100,000+ aren't unusual in larger programs.
2023-06-09 04:39:13 +00:00
|
|
|
return CodegenUnits::User(n);
|
2017-10-04 21:38:52 +00:00
|
|
|
}
|
2020-11-08 11:27:51 +00:00
|
|
|
if let Some(n) = self.target.default_codegen_units {
|
Introduce a minimum CGU size in non-incremental builds.
Because tiny CGUs make compilation less efficient *and* result in worse
generated code.
We don't do this when the number of CGUs is explicitly given, because
there are times when the requested number is very important, as
described in some comments within the commit. So the commit also
introduces a `CodegenUnits` type that distinguishes between default
values and user-specified values.
This change has a roughly neutral effect on walltimes across the
rustc-perf benchmarks; there are some speedups and some slowdowns. But
it has significant wins for most other metrics on numerous benchmarks,
including instruction counts, cycles, binary size, and max-rss. It also
reduces parallelism, which is good for reducing jobserver competition
when multiple rustc processes are running at the same time. It's smaller
benchmarks that benefit the most; larger benchmarks already have CGUs
that are all larger than the minimum size.
Here are some example before/after CGU sizes for opt builds.
- html5ever
- CGUs: 16, mean size: 1196.1, sizes: [3908, 2992, 1706, 1652, 1572,
1136, 1045, 948, 946, 938, 579, 471, 443, 327, 286, 189]
- CGUs: 4, mean size: 4396.0, sizes: [6706, 3908, 3490, 3480]
- libc
- CGUs: 12, mean size: 35.3, sizes: [163, 93, 58, 53, 37, 8, 2 (x6)]
- CGUs: 1, mean size: 424.0, sizes: [424]
- tt-muncher
- CGUs: 5, mean size: 1819.4, sizes: [8508, 350, 198, 34, 7]
- CGUs: 1, mean size: 9075.0, sizes: [9075]
Note that CGUs of size 100,000+ aren't unusual in larger programs.
2023-06-09 04:39:13 +00:00
|
|
|
return CodegenUnits::Default(n as usize);
|
2017-10-04 21:38:52 +00:00
|
|
|
}
|
|
|
|
|
2020-03-19 16:14:02 +00:00
|
|
|
// If incremental compilation is turned on, we default to a high number
|
|
|
|
// codegen units in order to reduce the "collateral damage" small
|
|
|
|
// changes cause.
|
|
|
|
if self.opts.incremental.is_some() {
|
Introduce a minimum CGU size in non-incremental builds.
Because tiny CGUs make compilation less efficient *and* result in worse
generated code.
We don't do this when the number of CGUs is explicitly given, because
there are times when the requested number is very important, as
described in some comments within the commit. So the commit also
introduces a `CodegenUnits` type that distinguishes between default
values and user-specified values.
This change has a roughly neutral effect on walltimes across the
rustc-perf benchmarks; there are some speedups and some slowdowns. But
it has significant wins for most other metrics on numerous benchmarks,
including instruction counts, cycles, binary size, and max-rss. It also
reduces parallelism, which is good for reducing jobserver competition
when multiple rustc processes are running at the same time. It's smaller
benchmarks that benefit the most; larger benchmarks already have CGUs
that are all larger than the minimum size.
Here are some example before/after CGU sizes for opt builds.
- html5ever
- CGUs: 16, mean size: 1196.1, sizes: [3908, 2992, 1706, 1652, 1572,
1136, 1045, 948, 946, 938, 579, 471, 443, 327, 286, 189]
- CGUs: 4, mean size: 4396.0, sizes: [6706, 3908, 3490, 3480]
- libc
- CGUs: 12, mean size: 35.3, sizes: [163, 93, 58, 53, 37, 8, 2 (x6)]
- CGUs: 1, mean size: 424.0, sizes: [424]
- tt-muncher
- CGUs: 5, mean size: 1819.4, sizes: [8508, 350, 198, 34, 7]
- CGUs: 1, mean size: 9075.0, sizes: [9075]
Note that CGUs of size 100,000+ aren't unusual in larger programs.
2023-06-09 04:39:13 +00:00
|
|
|
return CodegenUnits::Default(256);
|
2020-03-19 16:14:02 +00:00
|
|
|
}
|
|
|
|
|
2017-11-25 19:13:58 +00:00
|
|
|
// Why is 16 codegen units the default all the time?
|
|
|
|
//
|
|
|
|
// The main reason for enabling multiple codegen units by default is to
|
2018-05-08 13:10:16 +00:00
|
|
|
// leverage the ability for the codegen backend to do codegen and
|
|
|
|
// optimization in parallel. This allows us, especially for large crates, to
|
2017-11-25 19:13:58 +00:00
|
|
|
// make good use of all available resources on the machine once we've
|
|
|
|
// hit that stage of compilation. Large crates especially then often
|
2018-05-08 13:10:16 +00:00
|
|
|
// take a long time in codegen/optimization and this helps us amortize that
|
2017-11-25 19:13:58 +00:00
|
|
|
// cost.
|
|
|
|
//
|
|
|
|
// Note that a high number here doesn't mean that we'll be spawning a
|
|
|
|
// large number of threads in parallel. The backend of rustc contains
|
|
|
|
// global rate limiting through the `jobserver` crate so we'll never
|
|
|
|
// overload the system with too much work, but rather we'll only be
|
|
|
|
// optimizing when we're otherwise cooperating with other instances of
|
|
|
|
// rustc.
|
|
|
|
//
|
|
|
|
// Rather a high number here means that we should be able to keep a lot
|
|
|
|
// of idle cpus busy. By ensuring that no codegen unit takes *too* long
|
|
|
|
// to build we'll be guaranteed that all cpus will finish pretty closely
|
|
|
|
// to one another and we should make relatively optimal use of system
|
|
|
|
// resources
|
|
|
|
//
|
|
|
|
// Note that the main cost of codegen units is that it prevents LLVM
|
|
|
|
// from inlining across codegen units. Users in general don't have a lot
|
|
|
|
// of control over how codegen units are split up so it's our job in the
|
|
|
|
// compiler to ensure that undue performance isn't lost when using
|
|
|
|
// codegen units (aka we can't require everyone to slap `#[inline]` on
|
|
|
|
// everything).
|
|
|
|
//
|
|
|
|
// If we're compiling at `-O0` then the number doesn't really matter too
|
|
|
|
// much because performance doesn't matter and inlining is ok to lose.
|
|
|
|
// In debug mode we just want to try to guarantee that no cpu is stuck
|
|
|
|
// doing work that could otherwise be farmed to others.
|
|
|
|
//
|
|
|
|
// In release mode, however (O1 and above) performance does indeed
|
|
|
|
// matter! To recover the loss in performance due to inlining we'll be
|
|
|
|
// enabling ThinLTO by default (the function for which is just below).
|
|
|
|
// This will ensure that we recover any inlining wins we otherwise lost
|
|
|
|
// through codegen unit partitioning.
|
|
|
|
//
|
|
|
|
// ---
|
|
|
|
//
|
|
|
|
// Ok that's a lot of words but the basic tl;dr; is that we want a high
|
|
|
|
// number here -- but not too high. Additionally we're "safe" to have it
|
|
|
|
// always at the same number at all optimization levels.
|
|
|
|
//
|
|
|
|
// As a result 16 was chosen here! Mostly because it was a power of 2
|
|
|
|
// and most benchmarks agreed it was roughly a local optimum. Not very
|
|
|
|
// scientific.
|
Introduce a minimum CGU size in non-incremental builds.
Because tiny CGUs make compilation less efficient *and* result in worse
generated code.
We don't do this when the number of CGUs is explicitly given, because
there are times when the requested number is very important, as
described in some comments within the commit. So the commit also
introduces a `CodegenUnits` type that distinguishes between default
values and user-specified values.
This change has a roughly neutral effect on walltimes across the
rustc-perf benchmarks; there are some speedups and some slowdowns. But
it has significant wins for most other metrics on numerous benchmarks,
including instruction counts, cycles, binary size, and max-rss. It also
reduces parallelism, which is good for reducing jobserver competition
when multiple rustc processes are running at the same time. It's smaller
benchmarks that benefit the most; larger benchmarks already have CGUs
that are all larger than the minimum size.
Here are some example before/after CGU sizes for opt builds.
- html5ever
- CGUs: 16, mean size: 1196.1, sizes: [3908, 2992, 1706, 1652, 1572,
1136, 1045, 948, 946, 938, 579, 471, 443, 327, 286, 189]
- CGUs: 4, mean size: 4396.0, sizes: [6706, 3908, 3490, 3480]
- libc
- CGUs: 12, mean size: 35.3, sizes: [163, 93, 58, 53, 37, 8, 2 (x6)]
- CGUs: 1, mean size: 424.0, sizes: [424]
- tt-muncher
- CGUs: 5, mean size: 1819.4, sizes: [8508, 350, 198, 34, 7]
- CGUs: 1, mean size: 9075.0, sizes: [9075]
Note that CGUs of size 100,000+ aren't unusual in larger programs.
2023-06-09 04:39:13 +00:00
|
|
|
CodegenUnits::Default(16)
|
2017-11-25 19:13:58 +00:00
|
|
|
}
|
|
|
|
|
2018-01-23 19:34:57 +00:00
|
|
|
pub fn teach(&self, code: &DiagnosticId) -> bool {
|
2023-12-17 19:21:26 +00:00
|
|
|
self.opts.unstable_opts.teach && self.dcx().must_teach(code)
|
2018-01-23 02:07:35 +00:00
|
|
|
}
|
2018-02-04 16:52:26 +00:00
|
|
|
|
2018-03-15 03:30:06 +00:00
|
|
|
pub fn edition(&self) -> Edition {
|
2018-04-19 20:56:26 +00:00
|
|
|
self.opts.edition
|
2018-02-23 00:51:42 +00:00
|
|
|
}
|
2018-09-26 16:19:55 +00:00
|
|
|
|
2020-08-27 19:53:43 +00:00
|
|
|
pub fn link_dead_code(&self) -> bool {
|
2020-12-11 17:08:05 +00:00
|
|
|
self.opts.cg.link_dead_code.unwrap_or(false)
|
2020-08-27 19:53:43 +00:00
|
|
|
}
|
2023-08-23 13:46:58 +00:00
|
|
|
|
|
|
|
pub fn should_prefer_remapped_for_codegen(&self) -> bool {
|
|
|
|
// bail out, if any of the requested crate types aren't:
|
|
|
|
// "compiled executables or libraries"
|
|
|
|
for crate_type in &self.opts.crate_types {
|
|
|
|
match crate_type {
|
|
|
|
CrateType::Executable
|
|
|
|
| CrateType::Dylib
|
|
|
|
| CrateType::Rlib
|
|
|
|
| CrateType::Staticlib
|
|
|
|
| CrateType::Cdylib => continue,
|
|
|
|
CrateType::ProcMacro => return false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let has_split_debuginfo = match self.split_debuginfo() {
|
|
|
|
SplitDebuginfo::Off => false,
|
|
|
|
SplitDebuginfo::Packed => true,
|
|
|
|
SplitDebuginfo::Unpacked => true,
|
|
|
|
};
|
|
|
|
|
|
|
|
let remap_path_scopes = &self.opts.unstable_opts.remap_path_scope;
|
|
|
|
let mut prefer_remapped = false;
|
|
|
|
|
|
|
|
if remap_path_scopes.contains(RemapPathScopeComponents::UNSPLIT_DEBUGINFO) {
|
|
|
|
prefer_remapped |= !has_split_debuginfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
if remap_path_scopes.contains(RemapPathScopeComponents::SPLIT_DEBUGINFO) {
|
|
|
|
prefer_remapped |= has_split_debuginfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
prefer_remapped
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn should_prefer_remapped_for_split_debuginfo_paths(&self) -> bool {
|
|
|
|
let has_split_debuginfo = match self.split_debuginfo() {
|
|
|
|
SplitDebuginfo::Off => false,
|
|
|
|
SplitDebuginfo::Packed | SplitDebuginfo::Unpacked => true,
|
|
|
|
};
|
|
|
|
|
|
|
|
self.opts
|
|
|
|
.unstable_opts
|
|
|
|
.remap_path_scope
|
|
|
|
.contains(RemapPathScopeComponents::SPLIT_DEBUGINFO_PATH)
|
|
|
|
&& has_split_debuginfo
|
|
|
|
}
|
2010-09-01 20:24:14 +00:00
|
|
|
}
|
2011-12-09 05:05:44 +00:00
|
|
|
|
2022-07-25 12:02:39 +00:00
|
|
|
// JUSTIFICATION: part of session construction
|
2022-08-09 13:56:13 +00:00
|
|
|
#[allow(rustc::bad_opt_access)]
|
2018-12-08 19:30:23 +00:00
|
|
|
fn default_emitter(
|
|
|
|
sopts: &config::Options,
|
2019-11-29 21:05:28 +00:00
|
|
|
registry: rustc_errors::registry::Registry,
|
2020-05-27 18:34:17 +00:00
|
|
|
source_map: Lrc<SourceMap>,
|
2022-03-28 08:36:20 +00:00
|
|
|
bundle: Option<Lrc<FluentBundle>>,
|
2022-04-12 08:34:40 +00:00
|
|
|
fallback_bundle: LazyFallbackBundle,
|
2020-10-31 02:14:32 +00:00
|
|
|
) -> Box<DynEmitter> {
|
2022-07-06 12:44:47 +00:00
|
|
|
let macro_backtrace = sopts.unstable_opts.macro_backtrace;
|
2022-10-18 22:08:20 +00:00
|
|
|
let track_diagnostics = sopts.unstable_opts.track_diagnostics;
|
2023-02-09 10:16:00 +00:00
|
|
|
let terminal_url = match sopts.unstable_opts.terminal_urls {
|
|
|
|
TerminalUrl::Auto => {
|
|
|
|
match (std::env::var("COLORTERM").as_deref(), std::env::var("TERM").as_deref()) {
|
|
|
|
(Ok("truecolor"), Ok("xterm-256color"))
|
|
|
|
if sopts.unstable_features.is_nightly_build() =>
|
|
|
|
{
|
|
|
|
TerminalUrl::Yes
|
|
|
|
}
|
|
|
|
_ => TerminalUrl::No,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t => t,
|
|
|
|
};
|
2022-10-12 23:23:23 +00:00
|
|
|
match sopts.error_format {
|
|
|
|
config::ErrorOutputType::HumanReadable(kind) => {
|
2019-03-25 10:16:58 +00:00
|
|
|
let (short, color_config) = kind.unzip();
|
2019-05-31 19:15:59 +00:00
|
|
|
|
2019-06-05 19:13:56 +00:00
|
|
|
if let HumanReadableErrorType::AnnotateSnippet(_) = kind {
|
2022-03-26 07:27:43 +00:00
|
|
|
let emitter = AnnotateSnippetEmitterWriter::new(
|
|
|
|
Some(source_map),
|
2022-03-28 08:36:20 +00:00
|
|
|
bundle,
|
2022-03-26 07:27:43 +00:00
|
|
|
fallback_bundle,
|
|
|
|
short,
|
|
|
|
macro_backtrace,
|
|
|
|
);
|
2022-07-06 12:44:47 +00:00
|
|
|
Box::new(emitter.ui_testing(sopts.unstable_opts.ui_testing))
|
2019-05-31 19:15:59 +00:00
|
|
|
} else {
|
2024-01-04 23:02:40 +00:00
|
|
|
let emitter = HumanEmitter::stderr(color_config, fallback_bundle)
|
2023-07-25 13:09:53 +00:00
|
|
|
.fluent_bundle(bundle)
|
|
|
|
.sm(Some(source_map))
|
|
|
|
.short_message(short)
|
|
|
|
.teach(sopts.unstable_opts.teach)
|
|
|
|
.diagnostic_width(sopts.diagnostic_width)
|
|
|
|
.macro_backtrace(macro_backtrace)
|
|
|
|
.track_diagnostics(track_diagnostics)
|
2023-09-15 13:32:34 +00:00
|
|
|
.terminal_url(terminal_url)
|
|
|
|
.ignored_directories_in_source_blocks(
|
|
|
|
sopts.unstable_opts.ignore_directory_in_diagnostics_source_blocks.clone(),
|
|
|
|
);
|
2022-07-06 12:44:47 +00:00
|
|
|
Box::new(emitter.ui_testing(sopts.unstable_opts.ui_testing))
|
2019-05-31 19:15:59 +00:00
|
|
|
}
|
2019-12-22 22:42:04 +00:00
|
|
|
}
|
2022-10-12 23:23:23 +00:00
|
|
|
config::ErrorOutputType::Json { pretty, json_rendered } => Box::new(
|
2020-06-26 12:18:16 +00:00
|
|
|
JsonEmitter::stderr(
|
|
|
|
Some(registry),
|
|
|
|
source_map,
|
2022-03-28 08:36:20 +00:00
|
|
|
bundle,
|
2022-03-26 07:27:43 +00:00
|
|
|
fallback_bundle,
|
2020-06-26 12:18:16 +00:00
|
|
|
pretty,
|
|
|
|
json_rendered,
|
2022-07-06 10:57:41 +00:00
|
|
|
sopts.diagnostic_width,
|
2020-06-26 12:18:16 +00:00
|
|
|
macro_backtrace,
|
2022-10-18 22:08:20 +00:00
|
|
|
track_diagnostics,
|
2023-02-09 10:16:00 +00:00
|
|
|
terminal_url,
|
2020-06-26 12:18:16 +00:00
|
|
|
)
|
2023-09-15 13:32:34 +00:00
|
|
|
.ui_testing(sopts.unstable_opts.ui_testing)
|
|
|
|
.ignored_directories_in_source_blocks(
|
|
|
|
sopts.unstable_opts.ignore_directory_in_diagnostics_source_blocks.clone(),
|
|
|
|
),
|
2018-12-08 19:30:23 +00:00
|
|
|
),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-25 12:02:39 +00:00
|
|
|
// JUSTIFICATION: literally session construction
|
2022-08-09 13:56:13 +00:00
|
|
|
#[allow(rustc::bad_opt_access)]
|
2020-05-27 18:34:17 +00:00
|
|
|
pub fn build_session(
|
2023-12-17 23:57:26 +00:00
|
|
|
early_dcx: EarlyDiagCtxt,
|
2018-03-06 05:29:03 +00:00
|
|
|
sopts: config::Options,
|
2022-12-07 09:24:00 +00:00
|
|
|
io: CompilerIO,
|
2022-04-06 03:16:07 +00:00
|
|
|
bundle: Option<Lrc<rustc_errors::FluentBundle>>,
|
2019-11-29 21:05:28 +00:00
|
|
|
registry: rustc_errors::registry::Registry,
|
2022-10-17 13:11:26 +00:00
|
|
|
fluent_resources: Vec<&'static str>,
|
2020-03-31 05:17:15 +00:00
|
|
|
driver_lint_caps: FxHashMap<lint::LintId, lint::Level>,
|
|
|
|
file_loader: Option<Box<dyn FileLoader + Send + Sync + 'static>>,
|
2020-09-17 10:01:12 +00:00
|
|
|
target_override: Option<Target>,
|
2023-05-08 09:12:38 +00:00
|
|
|
cfg_version: &'static str,
|
2023-03-03 22:25:18 +00:00
|
|
|
ice_file: Option<PathBuf>,
|
2023-10-16 20:11:57 +00:00
|
|
|
using_internal_features: Arc<AtomicBool>,
|
2023-07-03 11:11:27 +00:00
|
|
|
expanded_args: Vec<String>,
|
2020-05-27 18:34:17 +00:00
|
|
|
) -> Session {
|
2015-01-26 23:42:24 +00:00
|
|
|
// FIXME: This is not general enough to make the warning lint completely override
|
|
|
|
// normal diagnostic warnings, since the warning lint can also be denied and changed
|
|
|
|
// later via the source code.
|
2018-03-06 05:29:03 +00:00
|
|
|
let warnings_allow = sopts
|
|
|
|
.lint_opts
|
2015-01-26 23:42:24 +00:00
|
|
|
.iter()
|
2022-12-18 16:01:58 +00:00
|
|
|
.rfind(|&(key, _)| *key == "warnings")
|
2023-05-24 14:19:22 +00:00
|
|
|
.is_some_and(|&(_, level)| level == lint::Allow);
|
|
|
|
let cap_lints_allow = sopts.lint_cap.is_some_and(|cap| cap == lint::Allow);
|
2017-11-20 18:03:20 +00:00
|
|
|
let can_emit_warnings = !(warnings_allow || cap_lints_allow);
|
2017-11-18 20:16:10 +00:00
|
|
|
|
2021-04-03 05:45:02 +00:00
|
|
|
let sysroot = match &sopts.maybe_sysroot {
|
|
|
|
Some(sysroot) => sysroot.clone(),
|
2022-10-28 07:20:51 +00:00
|
|
|
None => filesearch::get_or_default_sysroot().expect("Failed finding sysroot"),
|
2021-04-03 05:45:02 +00:00
|
|
|
};
|
|
|
|
|
2023-12-17 23:57:26 +00:00
|
|
|
let target_cfg = config::build_target_config(&early_dcx, &sopts, target_override, &sysroot);
|
2020-03-31 05:17:15 +00:00
|
|
|
let host_triple = TargetTriple::from_triple(config::host_triple());
|
2023-12-05 01:47:12 +00:00
|
|
|
let (host, target_warnings) = Target::search(&host_triple, &sysroot).unwrap_or_else(|e| {
|
2023-12-20 03:53:50 +00:00
|
|
|
early_dcx.early_fatal(format!("Error loading host specification: {e}"))
|
2023-12-05 01:47:12 +00:00
|
|
|
});
|
2021-05-27 08:21:53 +00:00
|
|
|
for warning in target_warnings.warning_messages() {
|
2023-12-17 23:57:26 +00:00
|
|
|
early_dcx.early_warn(warning)
|
2021-05-27 08:21:53 +00:00
|
|
|
}
|
2020-03-31 05:17:15 +00:00
|
|
|
|
2020-11-06 21:24:55 +00:00
|
|
|
let loader = file_loader.unwrap_or_else(|| Box::new(RealFileLoader));
|
2022-07-06 12:44:47 +00:00
|
|
|
let hash_kind = sopts.unstable_opts.src_hash_algorithm.unwrap_or_else(|| {
|
2020-11-08 11:27:51 +00:00
|
|
|
if target_cfg.is_like_msvc {
|
2023-07-14 21:30:06 +00:00
|
|
|
SourceFileHashAlgorithm::Sha256
|
2020-03-31 05:17:15 +00:00
|
|
|
} else {
|
|
|
|
SourceFileHashAlgorithm::Md5
|
|
|
|
}
|
|
|
|
});
|
|
|
|
let source_map = Lrc::new(SourceMap::with_file_loader_and_hash_kind(
|
|
|
|
loader,
|
|
|
|
sopts.file_path_mapping(),
|
|
|
|
hash_kind,
|
|
|
|
));
|
2022-03-26 07:27:43 +00:00
|
|
|
|
2022-04-12 08:34:40 +00:00
|
|
|
let fallback_bundle = fallback_fluent_bundle(
|
2022-10-13 09:13:02 +00:00
|
|
|
fluent_resources,
|
2022-07-06 12:44:47 +00:00
|
|
|
sopts.unstable_opts.translate_directionality_markers,
|
2022-04-12 08:34:40 +00:00
|
|
|
);
|
2022-10-12 23:23:23 +00:00
|
|
|
let emitter = default_emitter(&sopts, registry, source_map.clone(), bundle, fallback_bundle);
|
2015-12-31 03:50:06 +00:00
|
|
|
|
2023-12-18 00:15:13 +00:00
|
|
|
let mut dcx = DiagCtxt::with_emitter(emitter)
|
2023-12-17 21:59:22 +00:00
|
|
|
.with_flags(sopts.unstable_opts.dcx_flags(can_emit_warnings));
|
2023-07-25 10:27:34 +00:00
|
|
|
if let Some(ice_file) = ice_file {
|
2023-12-18 00:15:13 +00:00
|
|
|
dcx = dcx.with_ice_file(ice_file);
|
2023-07-25 10:27:34 +00:00
|
|
|
}
|
2014-02-07 03:57:09 +00:00
|
|
|
|
2023-12-17 23:57:26 +00:00
|
|
|
// Now that the proper handler has been constructed, drop early_dcx to
|
|
|
|
// prevent accidental use.
|
|
|
|
drop(early_dcx);
|
2023-12-05 01:47:12 +00:00
|
|
|
|
2022-07-06 12:44:47 +00:00
|
|
|
let self_profiler = if let SwitchWithOptPath::Enabled(ref d) = sopts.unstable_opts.self_profile
|
2019-12-22 22:42:04 +00:00
|
|
|
{
|
|
|
|
let directory =
|
|
|
|
if let Some(ref directory) = d { directory } else { std::path::Path::new(".") };
|
|
|
|
|
|
|
|
let profiler = SelfProfiler::new(
|
|
|
|
directory,
|
2020-12-01 03:54:20 +00:00
|
|
|
sopts.crate_name.as_deref(),
|
2022-11-16 21:58:58 +00:00
|
|
|
sopts.unstable_opts.self_profile_events.as_deref(),
|
2022-07-06 12:44:47 +00:00
|
|
|
&sopts.unstable_opts.self_profile_counter,
|
2019-12-22 22:42:04 +00:00
|
|
|
);
|
|
|
|
match profiler {
|
|
|
|
Ok(profiler) => Some(Arc::new(profiler)),
|
|
|
|
Err(e) => {
|
2023-12-18 00:15:13 +00:00
|
|
|
dcx.emit_warning(errors::FailedToCreateProfiler { err: e.to_string() });
|
2019-12-22 22:42:04 +00:00
|
|
|
None
|
2019-04-04 23:41:49 +00:00
|
|
|
}
|
2019-12-22 22:42:04 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2019-02-10 22:23:00 +00:00
|
|
|
|
2023-12-18 00:15:13 +00:00
|
|
|
let mut parse_sess = ParseSess::with_dcx(dcx, source_map);
|
2022-07-06 12:44:47 +00:00
|
|
|
parse_sess.assume_incomplete_release = sopts.unstable_opts.assume_incomplete_release;
|
2014-05-06 11:38:01 +00:00
|
|
|
|
2018-11-20 00:06:45 +00:00
|
|
|
let host_triple = config::host_triple();
|
|
|
|
let target_triple = sopts.target_triple.triple();
|
2021-09-01 11:39:48 +00:00
|
|
|
let host_tlib_path = Lrc::new(SearchPath::from_sysroot_and_triple(&sysroot, host_triple));
|
2018-11-20 00:06:45 +00:00
|
|
|
let target_tlib_path = if host_triple == target_triple {
|
2021-09-01 11:39:48 +00:00
|
|
|
// Use the same `SearchPath` if host and target triple are identical to avoid unnecessary
|
|
|
|
// rescanning of the target lib path and an unnecessary allocation.
|
|
|
|
host_tlib_path.clone()
|
2018-11-20 00:06:45 +00:00
|
|
|
} else {
|
2021-09-01 11:39:48 +00:00
|
|
|
Lrc::new(SearchPath::from_sysroot_and_triple(&sysroot, target_triple))
|
2018-11-20 00:06:45 +00:00
|
|
|
};
|
|
|
|
|
2018-12-29 12:03:33 +00:00
|
|
|
let optimization_fuel = Lock::new(OptimizationFuel {
|
2022-11-16 21:58:58 +00:00
|
|
|
remaining: sopts.unstable_opts.fuel.as_ref().map_or(0, |&(_, i)| i),
|
2018-12-29 12:03:33 +00:00
|
|
|
out_of_fuel: false,
|
|
|
|
});
|
2019-12-17 21:28:33 +00:00
|
|
|
let print_fuel = AtomicU64::new(0);
|
2017-03-08 21:28:47 +00:00
|
|
|
|
2023-02-06 05:32:34 +00:00
|
|
|
let prof = SelfProfilerRef::new(
|
|
|
|
self_profiler,
|
|
|
|
sopts.unstable_opts.time_passes.then(|| sopts.unstable_opts.time_passes_format),
|
|
|
|
);
|
2020-01-01 01:24:05 +00:00
|
|
|
|
2020-02-17 01:32:25 +00:00
|
|
|
let ctfe_backtrace = Lock::new(match env::var("RUSTC_CTFE_BACKTRACE") {
|
|
|
|
Ok(ref val) if val == "immediate" => CtfeBacktrace::Immediate,
|
|
|
|
Ok(ref val) if val != "0" => CtfeBacktrace::Capture,
|
|
|
|
_ => CtfeBacktrace::Disabled,
|
|
|
|
});
|
|
|
|
|
2020-11-08 11:27:51 +00:00
|
|
|
let asm_arch =
|
|
|
|
if target_cfg.allow_asm { InlineAsmArch::from_str(&target_cfg.arch).ok() } else { None };
|
2020-02-12 15:48:03 +00:00
|
|
|
|
2014-06-10 21:03:19 +00:00
|
|
|
let sess = Session {
|
2014-07-23 18:56:36 +00:00
|
|
|
target: target_cfg,
|
2017-07-03 18:19:51 +00:00
|
|
|
host,
|
2014-05-06 11:38:01 +00:00
|
|
|
opts: sopts,
|
2018-11-20 00:06:45 +00:00
|
|
|
host_tlib_path,
|
|
|
|
target_tlib_path,
|
2019-09-06 02:57:44 +00:00
|
|
|
parse_sess,
|
2018-11-22 04:49:48 +00:00
|
|
|
sysroot,
|
2022-12-07 09:24:00 +00:00
|
|
|
io,
|
2018-04-01 06:19:26 +00:00
|
|
|
incr_comp_session: OneThread::new(RefCell::new(IncrCompSession::NotInitialized)),
|
2020-01-01 01:24:05 +00:00
|
|
|
prof,
|
2018-10-16 14:57:53 +00:00
|
|
|
code_stats: Default::default(),
|
2018-12-29 12:03:33 +00:00
|
|
|
optimization_fuel,
|
2017-07-03 18:19:51 +00:00
|
|
|
print_fuel,
|
2018-12-18 08:03:38 +00:00
|
|
|
jobserver: jobserver::client(),
|
2023-11-06 05:52:38 +00:00
|
|
|
lint_store: None,
|
|
|
|
registered_lints: false,
|
2018-12-08 19:30:23 +00:00
|
|
|
driver_lint_caps,
|
2020-02-17 01:32:25 +00:00
|
|
|
ctfe_backtrace,
|
2020-05-02 11:19:24 +00:00
|
|
|
miri_unleashed_features: Lock::new(Default::default()),
|
2020-02-12 15:48:03 +00:00
|
|
|
asm_arch,
|
2023-02-21 14:15:16 +00:00
|
|
|
target_features: Default::default(),
|
|
|
|
unstable_target_features: Default::default(),
|
2023-05-08 09:12:38 +00:00
|
|
|
cfg_version,
|
2023-10-16 20:11:57 +00:00
|
|
|
using_internal_features,
|
2023-07-03 11:11:27 +00:00
|
|
|
expanded_args,
|
2014-06-10 21:03:19 +00:00
|
|
|
};
|
|
|
|
|
2018-08-02 12:26:27 +00:00
|
|
|
validate_commandline_args_with_session_available(&sess);
|
|
|
|
|
2020-05-27 18:34:17 +00:00
|
|
|
sess
|
2014-05-06 11:38:01 +00:00
|
|
|
}
|
2014-02-07 03:57:09 +00:00
|
|
|
|
2022-07-25 12:02:39 +00:00
|
|
|
/// Validate command line arguments with a `Session`.
|
|
|
|
///
|
|
|
|
/// If it is useful to have a Session available already for validating a commandline argument, you
|
|
|
|
/// can do so here.
|
|
|
|
// JUSTIFICATION: needs to access args to validate them
|
2022-08-09 13:56:13 +00:00
|
|
|
#[allow(rustc::bad_opt_access)]
|
2018-08-02 12:26:27 +00:00
|
|
|
fn validate_commandline_args_with_session_available(sess: &Session) {
|
2018-08-06 09:16:28 +00:00
|
|
|
// Since we don't know if code in an rlib will be linked to statically or
|
2020-05-07 09:52:21 +00:00
|
|
|
// dynamically downstream, rustc generates `__imp_` symbols that help linkers
|
|
|
|
// on Windows deal with this lack of knowledge (#27438). Unfortunately,
|
2018-08-06 09:16:28 +00:00
|
|
|
// these manually generated symbols confuse LLD when it tries to merge
|
2020-05-07 09:52:21 +00:00
|
|
|
// bitcode during ThinLTO. Therefore we disallow dynamic linking on Windows
|
2018-08-06 09:16:28 +00:00
|
|
|
// when compiling for LLD ThinLTO. This way we can validly just not generate
|
|
|
|
// the `dllimport` attributes and `__imp_` symbols in that case.
|
2019-12-22 22:42:04 +00:00
|
|
|
if sess.opts.cg.linker_plugin_lto.enabled()
|
|
|
|
&& sess.opts.cg.prefer_dynamic
|
2020-11-08 11:27:51 +00:00
|
|
|
&& sess.target.is_like_windows
|
2019-12-22 22:42:04 +00:00
|
|
|
{
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::LinkerPluginToWindowsNotSupported);
|
2018-08-02 12:26:27 +00:00
|
|
|
}
|
2019-05-22 11:00:09 +00:00
|
|
|
|
|
|
|
// Make sure that any given profiling data actually exists so LLVM can't
|
|
|
|
// decide to silently skip PGO.
|
2019-05-28 14:48:03 +00:00
|
|
|
if let Some(ref path) = sess.opts.cg.profile_use {
|
2019-05-22 11:00:09 +00:00
|
|
|
if !path.exists() {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::ProfileUseFileDoesNotExist { path });
|
2019-05-22 11:00:09 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-21 10:11:23 +00:00
|
|
|
|
2021-05-07 07:41:37 +00:00
|
|
|
// Do the same for sample profile data.
|
2022-07-06 12:44:47 +00:00
|
|
|
if let Some(ref path) = sess.opts.unstable_opts.profile_sample_use {
|
2021-05-07 07:41:37 +00:00
|
|
|
if !path.exists() {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::ProfileSampleUseFileDoesNotExist { path });
|
2021-05-07 07:41:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add Option to Force Unwind Tables
When panic != unwind, `nounwind` is added to all functions for a target.
This can cause issues when a panic happens with RUST_BACKTRACE=1, as
there needs to be a way to reconstruct the backtrace. There are three
possible sources of this information: forcing frame pointers (for which
an option exists already), debug info (for which an option exists), or
unwind tables.
Especially for embedded devices, forcing frame pointers can have code
size overheads (RISC-V sees ~10% overheads, ARM sees ~2-3% overheads).
In code, it can be the case that debug info is not kept, so it is useful
to provide this third option, unwind tables, that users can use to
reconstruct the call stack. Reconstructing this stack is harder than
with frame pointers, but it is still possible.
This commit adds a compiler option which allows a user to force the
addition of unwind tables. Unwind tables cannot be disabled on targets
that require them for correctness, or when using `-C panic=unwind`.
2020-05-04 11:08:35 +00:00
|
|
|
// Unwind tables cannot be disabled if the target requires them.
|
|
|
|
if let Some(include_uwtables) = sess.opts.cg.force_unwind_tables {
|
2020-11-08 11:27:51 +00:00
|
|
|
if sess.target.requires_uwtable && !include_uwtables {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::TargetRequiresUnwindTables);
|
Add Option to Force Unwind Tables
When panic != unwind, `nounwind` is added to all functions for a target.
This can cause issues when a panic happens with RUST_BACKTRACE=1, as
there needs to be a way to reconstruct the backtrace. There are three
possible sources of this information: forcing frame pointers (for which
an option exists already), debug info (for which an option exists), or
unwind tables.
Especially for embedded devices, forcing frame pointers can have code
size overheads (RISC-V sees ~10% overheads, ARM sees ~2-3% overheads).
In code, it can be the case that debug info is not kept, so it is useful
to provide this third option, unwind tables, that users can use to
reconstruct the call stack. Reconstructing this stack is harder than
with frame pointers, but it is still possible.
This commit adds a compiler option which allows a user to force the
addition of unwind tables. Unwind tables cannot be disabled on targets
that require them for correctness, or when using `-C panic=unwind`.
2020-05-04 11:08:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-07 22:49:00 +00:00
|
|
|
// Sanitizers can only be used on platforms that we know have working sanitizer codegen.
|
|
|
|
let supported_sanitizers = sess.target.options.supported_sanitizers;
|
2022-07-06 12:44:47 +00:00
|
|
|
let unsupported_sanitizers = sess.opts.unstable_opts.sanitizer - supported_sanitizers;
|
2021-02-07 22:49:00 +00:00
|
|
|
match unsupported_sanitizers.into_iter().count() {
|
|
|
|
0 => {}
|
2022-01-23 00:49:12 +00:00
|
|
|
1 => {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx()
|
|
|
|
.emit_err(errors::SanitizerNotSupported { us: unsupported_sanitizers.to_string() });
|
2022-01-23 00:49:12 +00:00
|
|
|
}
|
|
|
|
_ => {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::SanitizersNotSupported {
|
2023-02-05 02:39:18 +00:00
|
|
|
us: unsupported_sanitizers.to_string(),
|
|
|
|
});
|
2022-01-23 00:49:12 +00:00
|
|
|
}
|
2021-02-07 22:49:00 +00:00
|
|
|
}
|
|
|
|
// Cannot mix and match sanitizers.
|
2022-07-06 12:44:47 +00:00
|
|
|
let mut sanitizer_iter = sess.opts.unstable_opts.sanitizer.into_iter();
|
2021-02-07 22:49:00 +00:00
|
|
|
if let (Some(first), Some(second)) = (sanitizer_iter.next(), sanitizer_iter.next()) {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::CannotMixAndMatchSanitizers {
|
2022-08-26 14:06:27 +00:00
|
|
|
first: first.to_string(),
|
|
|
|
second: second.to_string(),
|
|
|
|
});
|
2019-11-07 00:00:00 +00:00
|
|
|
}
|
2021-05-21 17:24:50 +00:00
|
|
|
|
|
|
|
// Cannot enable crt-static with sanitizers on Linux
|
2022-07-06 12:44:47 +00:00
|
|
|
if sess.crt_static(None) && !sess.opts.unstable_opts.sanitizer.is_empty() {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::CannotEnableCrtStaticLinux);
|
2021-05-21 17:24:50 +00:00
|
|
|
}
|
2021-10-07 22:33:13 +00:00
|
|
|
|
2022-12-13 06:42:44 +00:00
|
|
|
// LLVM CFI requires LTO.
|
|
|
|
if sess.is_sanitizer_cfi_enabled()
|
2023-07-11 23:19:42 +00:00
|
|
|
&& !(sess.lto() == config::Lto::Fat || sess.opts.cg.linker_plugin_lto.enabled())
|
2022-12-13 06:42:44 +00:00
|
|
|
{
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::SanitizerCfiRequiresLto);
|
2021-10-07 22:33:13 +00:00
|
|
|
}
|
add rustc option for using LLVM stack smash protection
LLVM has built-in heuristics for adding stack canaries to functions. These
heuristics can be selected with LLVM function attributes. This patch adds a
rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use
of these attributes. This gives rustc the same stack smash protection support as
clang offers through options `-fno-stack-protector`, `-fstack-protector`,
`-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can
offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the
current list of rustc exploit
mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html),
originally discussed in #15179.
Stack smash protection adds runtime overhead and is therefore still off by
default, but now users have the option to trade performance for security as they
see fit. An example use case is adding Rust code in an existing C/C++ code base
compiled with stack smash protection. Without the ability to add stack smash
protection to the Rust code, the code base artifacts could be exploitable in
ways not possible if the code base remained pure C/C++.
Stack smash protection support is present in LLVM for almost all the current
tier 1/tier 2 targets: see
test/assembly/stack-protector/stack-protector-target-support.rs. The one
exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a
warning message printed if stack smash protection is used with this target (see
test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3
targets has not been checked.
Since the heuristics are applied at the LLVM level, the heuristics are expected
to add stack smash protection to a fraction of functions comparable to C/C++.
Some experiments demonstrating how Rust code is affected by the different
heuristics can be found in
test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is
potential for better heuristics using Rust-specific safety information. For
example it might be reasonable to skip stack smash protection in functions which
transitively only use safe Rust code, or which uses only a subset of functions
the user declares safe (such as anything under `std.*`). Such alternative
heuristics could be added at a later point.
LLVM also offers a "safestack" sanitizer as an alternative way to guard against
stack smashing (see #26612). This could possibly also be included as a
stack-protection heuristic. An alternative is to add it as a sanitizer (#39699).
This is what clang does: safestack is exposed with option
`-fsanitize=safe-stack`.
The options are only supported by the LLVM backend, but as with other codegen
options it is visible in the main codegen option help menu. The heuristic names
"basic", "strong", and "all" are hopefully sufficiently generic to be usable in
other backends as well.
Reviewed-by: Nikita Popov <nikic@php.net>
Extra commits during review:
- [address-review] make the stack-protector option unstable
- [address-review] reduce detail level of stack-protector option help text
- [address-review] correct grammar in comment
- [address-review] use compiler flag to avoid merging functions in test
- [address-review] specify min LLVM version in fortanix stack-protector test
Only for Fortanix test, since this target specifically requests the
`--x86-experimental-lvi-inline-asm-hardening` flag.
- [address-review] specify required LLVM components in stack-protector tests
- move stack protector option enum closer to other similar option enums
- rustc_interface/tests: sort debug option list in tracking hash test
- add an explicit `none` stack-protector option
Revert "set LLVM requirements for all stack protector support test revisions"
This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 19:37:49 +00:00
|
|
|
|
2023-07-11 23:19:42 +00:00
|
|
|
// LLVM CFI using rustc LTO requires a single codegen unit.
|
|
|
|
if sess.is_sanitizer_cfi_enabled()
|
|
|
|
&& sess.lto() == config::Lto::Fat
|
|
|
|
&& !(sess.codegen_units().as_usize() == 1)
|
|
|
|
{
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::SanitizerCfiRequiresSingleCodegenUnit);
|
2023-07-11 23:19:42 +00:00
|
|
|
}
|
|
|
|
|
2022-12-13 06:42:44 +00:00
|
|
|
// LLVM CFI is incompatible with LLVM KCFI.
|
2022-11-22 05:29:00 +00:00
|
|
|
if sess.is_sanitizer_cfi_enabled() && sess.is_sanitizer_kcfi_enabled() {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::CannotMixAndMatchSanitizers {
|
2022-11-22 05:29:00 +00:00
|
|
|
first: "cfi".to_string(),
|
|
|
|
second: "kcfi".to_string(),
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-12-13 06:42:44 +00:00
|
|
|
// Canonical jump tables requires CFI.
|
|
|
|
if sess.is_sanitizer_cfi_canonical_jump_tables_disabled() {
|
|
|
|
if !sess.is_sanitizer_cfi_enabled() {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::SanitizerCfiCanonicalJumpTablesRequiresCfi);
|
2022-12-13 06:42:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// LLVM CFI pointer generalization requires CFI or KCFI.
|
|
|
|
if sess.is_sanitizer_cfi_generalize_pointers_enabled() {
|
|
|
|
if !(sess.is_sanitizer_cfi_enabled() || sess.is_sanitizer_kcfi_enabled()) {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::SanitizerCfiGeneralizePointersRequiresCfi);
|
2022-12-13 06:42:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// LLVM CFI integer normalization requires CFI or KCFI.
|
|
|
|
if sess.is_sanitizer_cfi_normalize_integers_enabled() {
|
|
|
|
if !(sess.is_sanitizer_cfi_enabled() || sess.is_sanitizer_kcfi_enabled()) {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::SanitizerCfiNormalizeIntegersRequiresCfi);
|
2022-12-13 06:42:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// LTO unit splitting requires LTO.
|
|
|
|
if sess.is_split_lto_unit_enabled()
|
|
|
|
&& !(sess.lto() == config::Lto::Fat
|
|
|
|
|| sess.lto() == config::Lto::Thin
|
|
|
|
|| sess.opts.cg.linker_plugin_lto.enabled())
|
|
|
|
{
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::SplitLtoUnitRequiresLto);
|
2022-12-13 06:42:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// VFE requires LTO.
|
|
|
|
if sess.lto() != config::Lto::Fat {
|
|
|
|
if sess.opts.unstable_opts.virtual_function_elimination {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::UnstableVirtualFunctionElimination);
|
2022-12-13 06:42:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-06 12:44:47 +00:00
|
|
|
if sess.opts.unstable_opts.stack_protector != StackProtector::None {
|
add rustc option for using LLVM stack smash protection
LLVM has built-in heuristics for adding stack canaries to functions. These
heuristics can be selected with LLVM function attributes. This patch adds a
rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use
of these attributes. This gives rustc the same stack smash protection support as
clang offers through options `-fno-stack-protector`, `-fstack-protector`,
`-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can
offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the
current list of rustc exploit
mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html),
originally discussed in #15179.
Stack smash protection adds runtime overhead and is therefore still off by
default, but now users have the option to trade performance for security as they
see fit. An example use case is adding Rust code in an existing C/C++ code base
compiled with stack smash protection. Without the ability to add stack smash
protection to the Rust code, the code base artifacts could be exploitable in
ways not possible if the code base remained pure C/C++.
Stack smash protection support is present in LLVM for almost all the current
tier 1/tier 2 targets: see
test/assembly/stack-protector/stack-protector-target-support.rs. The one
exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a
warning message printed if stack smash protection is used with this target (see
test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3
targets has not been checked.
Since the heuristics are applied at the LLVM level, the heuristics are expected
to add stack smash protection to a fraction of functions comparable to C/C++.
Some experiments demonstrating how Rust code is affected by the different
heuristics can be found in
test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is
potential for better heuristics using Rust-specific safety information. For
example it might be reasonable to skip stack smash protection in functions which
transitively only use safe Rust code, or which uses only a subset of functions
the user declares safe (such as anything under `std.*`). Such alternative
heuristics could be added at a later point.
LLVM also offers a "safestack" sanitizer as an alternative way to guard against
stack smashing (see #26612). This could possibly also be included as a
stack-protection heuristic. An alternative is to add it as a sanitizer (#39699).
This is what clang does: safestack is exposed with option
`-fsanitize=safe-stack`.
The options are only supported by the LLVM backend, but as with other codegen
options it is visible in the main codegen option help menu. The heuristic names
"basic", "strong", and "all" are hopefully sufficiently generic to be usable in
other backends as well.
Reviewed-by: Nikita Popov <nikic@php.net>
Extra commits during review:
- [address-review] make the stack-protector option unstable
- [address-review] reduce detail level of stack-protector option help text
- [address-review] correct grammar in comment
- [address-review] use compiler flag to avoid merging functions in test
- [address-review] specify min LLVM version in fortanix stack-protector test
Only for Fortanix test, since this target specifically requests the
`--x86-experimental-lvi-inline-asm-hardening` flag.
- [address-review] specify required LLVM components in stack-protector tests
- move stack protector option enum closer to other similar option enums
- rustc_interface/tests: sort debug option list in tracking hash test
- add an explicit `none` stack-protector option
Revert "set LLVM requirements for all stack protector support test revisions"
This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 19:37:49 +00:00
|
|
|
if !sess.target.options.supports_stack_protector {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_warning(errors::StackProtectorNotSupportedForTarget {
|
2022-09-08 06:15:37 +00:00
|
|
|
stack_protector: sess.opts.unstable_opts.stack_protector,
|
|
|
|
target_triple: &sess.opts.target_triple,
|
|
|
|
});
|
add rustc option for using LLVM stack smash protection
LLVM has built-in heuristics for adding stack canaries to functions. These
heuristics can be selected with LLVM function attributes. This patch adds a
rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use
of these attributes. This gives rustc the same stack smash protection support as
clang offers through options `-fno-stack-protector`, `-fstack-protector`,
`-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can
offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the
current list of rustc exploit
mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html),
originally discussed in #15179.
Stack smash protection adds runtime overhead and is therefore still off by
default, but now users have the option to trade performance for security as they
see fit. An example use case is adding Rust code in an existing C/C++ code base
compiled with stack smash protection. Without the ability to add stack smash
protection to the Rust code, the code base artifacts could be exploitable in
ways not possible if the code base remained pure C/C++.
Stack smash protection support is present in LLVM for almost all the current
tier 1/tier 2 targets: see
test/assembly/stack-protector/stack-protector-target-support.rs. The one
exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a
warning message printed if stack smash protection is used with this target (see
test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3
targets has not been checked.
Since the heuristics are applied at the LLVM level, the heuristics are expected
to add stack smash protection to a fraction of functions comparable to C/C++.
Some experiments demonstrating how Rust code is affected by the different
heuristics can be found in
test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is
potential for better heuristics using Rust-specific safety information. For
example it might be reasonable to skip stack smash protection in functions which
transitively only use safe Rust code, or which uses only a subset of functions
the user declares safe (such as anything under `std.*`). Such alternative
heuristics could be added at a later point.
LLVM also offers a "safestack" sanitizer as an alternative way to guard against
stack smashing (see #26612). This could possibly also be included as a
stack-protection heuristic. An alternative is to add it as a sanitizer (#39699).
This is what clang does: safestack is exposed with option
`-fsanitize=safe-stack`.
The options are only supported by the LLVM backend, but as with other codegen
options it is visible in the main codegen option help menu. The heuristic names
"basic", "strong", and "all" are hopefully sufficiently generic to be usable in
other backends as well.
Reviewed-by: Nikita Popov <nikic@php.net>
Extra commits during review:
- [address-review] make the stack-protector option unstable
- [address-review] reduce detail level of stack-protector option help text
- [address-review] correct grammar in comment
- [address-review] use compiler flag to avoid merging functions in test
- [address-review] specify min LLVM version in fortanix stack-protector test
Only for Fortanix test, since this target specifically requests the
`--x86-experimental-lvi-inline-asm-hardening` flag.
- [address-review] specify required LLVM components in stack-protector tests
- move stack protector option enum closer to other similar option enums
- rustc_interface/tests: sort debug option list in tracking hash test
- add an explicit `none` stack-protector option
Revert "set LLVM requirements for all stack protector support test revisions"
This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 19:37:49 +00:00
|
|
|
}
|
|
|
|
}
|
2022-06-20 23:26:51 +00:00
|
|
|
|
2022-11-21 16:05:51 +00:00
|
|
|
if sess.opts.unstable_opts.branch_protection.is_some() && sess.target.arch != "aarch64" {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::BranchProtectionRequiresAArch64);
|
2022-11-21 16:05:51 +00:00
|
|
|
}
|
|
|
|
|
2022-07-06 12:44:47 +00:00
|
|
|
if let Some(dwarf_version) = sess.opts.unstable_opts.dwarf_version {
|
2022-06-20 23:26:51 +00:00
|
|
|
if dwarf_version > 5 {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::UnsupportedDwarfVersion { dwarf_version });
|
2022-06-20 23:26:51 +00:00
|
|
|
}
|
|
|
|
}
|
2022-06-13 10:40:28 +00:00
|
|
|
|
|
|
|
if !sess.target.options.supported_split_debuginfo.contains(&sess.split_debuginfo())
|
|
|
|
&& !sess.opts.unstable_opts.unstable_options
|
|
|
|
{
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx()
|
|
|
|
.emit_err(errors::SplitDebugInfoUnstablePlatform { debuginfo: sess.split_debuginfo() });
|
2022-06-13 10:40:28 +00:00
|
|
|
}
|
2022-10-02 01:55:35 +00:00
|
|
|
|
|
|
|
if sess.opts.unstable_opts.instrument_xray.is_some() && !sess.target.options.supports_xray {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::InstrumentationNotSupported { us: "XRay".to_string() });
|
2022-10-02 01:55:35 +00:00
|
|
|
}
|
2023-04-25 12:48:53 +00:00
|
|
|
|
|
|
|
if let Some(flavor) = sess.opts.cg.linker_flavor {
|
|
|
|
if let Some(compatible_list) = sess.target.linker_flavor.check_compatibility(flavor) {
|
|
|
|
let flavor = flavor.desc();
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::IncompatibleLinkerFlavor { flavor, compatible_list });
|
2023-04-25 12:48:53 +00:00
|
|
|
}
|
|
|
|
}
|
2023-10-18 14:58:17 +00:00
|
|
|
|
|
|
|
if sess.opts.unstable_opts.function_return != FunctionReturn::default() {
|
|
|
|
if sess.target.arch != "x86" && sess.target.arch != "x86_64" {
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::FunctionReturnRequiresX86OrX8664);
|
2023-10-18 14:58:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The code model check applies to `thunk` and `thunk-extern`, but not `thunk-inline`, so it is
|
|
|
|
// kept as a `match` to force a change if new ones are added, even if we currently only support
|
|
|
|
// `thunk-extern` like Clang.
|
|
|
|
match sess.opts.unstable_opts.function_return {
|
|
|
|
FunctionReturn::Keep => (),
|
|
|
|
FunctionReturn::ThunkExtern => {
|
|
|
|
// FIXME: In principle, the inherited base LLVM target code model could be large,
|
|
|
|
// but this only checks whether we were passed one explicitly (like Clang does).
|
|
|
|
if let Some(code_model) = sess.code_model()
|
|
|
|
&& code_model == CodeModel::Large
|
|
|
|
{
|
2023-12-18 11:21:37 +00:00
|
|
|
sess.dcx().emit_err(errors::FunctionReturnThunkExternRequiresNonLargeCodeModel);
|
2023-10-18 14:58:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-08-02 12:26:27 +00:00
|
|
|
}
|
|
|
|
|
2016-08-11 23:02:39 +00:00
|
|
|
/// Holds data on the current incremental compilation session, if there is one.
|
|
|
|
#[derive(Debug)]
|
2023-11-28 05:30:57 +00:00
|
|
|
enum IncrCompSession {
|
2017-10-10 14:12:11 +00:00
|
|
|
/// This is the state the session will be in until the incr. comp. dir is
|
|
|
|
/// needed.
|
2016-08-11 23:02:39 +00:00
|
|
|
NotInitialized,
|
2017-10-10 14:12:11 +00:00
|
|
|
/// This is the state during which the session directory is private and can
|
2023-11-30 00:58:13 +00:00
|
|
|
/// be modified. `_lock_file` is never directly used, but its presence
|
|
|
|
/// alone has an effect, because the file will unlock when the session is
|
|
|
|
/// dropped.
|
|
|
|
Active { session_directory: PathBuf, _lock_file: flock::Lock },
|
2017-10-10 14:12:11 +00:00
|
|
|
/// This is the state after the session directory has been finalized. In this
|
|
|
|
/// state, the contents of the directory must not be modified any more.
|
2018-03-06 05:29:03 +00:00
|
|
|
Finalized { session_directory: PathBuf },
|
2017-10-10 14:12:11 +00:00
|
|
|
/// This is an error state that is reached when some compilation error has
|
|
|
|
/// occurred. It indicates that the contents of the session directory must
|
|
|
|
/// not be used, since they might be invalid.
|
2018-03-06 05:29:03 +00:00
|
|
|
InvalidBecauseOfErrors { session_directory: PathBuf },
|
2016-08-11 23:02:39 +00:00
|
|
|
}
|
|
|
|
|
2023-12-17 10:48:57 +00:00
|
|
|
/// A wrapper around an [`DiagCtxt`] that is used for early error emissions.
|
2023-12-17 11:01:06 +00:00
|
|
|
pub struct EarlyDiagCtxt {
|
2023-12-17 22:56:23 +00:00
|
|
|
dcx: DiagCtxt,
|
2023-06-22 21:56:09 +00:00
|
|
|
}
|
|
|
|
|
2023-12-17 11:01:06 +00:00
|
|
|
impl EarlyDiagCtxt {
|
2023-06-22 21:56:09 +00:00
|
|
|
pub fn new(output: ErrorOutputType) -> Self {
|
|
|
|
let emitter = mk_emitter(output);
|
2023-12-17 22:56:23 +00:00
|
|
|
Self { dcx: DiagCtxt::with_emitter(emitter) }
|
2023-06-22 21:56:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn abort_if_errors(&self) {
|
2023-12-17 22:56:23 +00:00
|
|
|
self.dcx.abort_if_errors()
|
2023-06-22 21:56:09 +00:00
|
|
|
}
|
|
|
|
|
2023-12-17 22:56:23 +00:00
|
|
|
/// Swap out the underlying dcx once we acquire the user's preference on error emission
|
2023-06-22 21:56:09 +00:00
|
|
|
/// format. Any errors prior to that will cause an abort and all stashed diagnostics of the
|
2023-12-17 22:56:23 +00:00
|
|
|
/// previous dcx will be emitted.
|
2023-06-22 21:56:09 +00:00
|
|
|
pub fn abort_if_error_and_set_error_format(&mut self, output: ErrorOutputType) {
|
2023-12-17 22:56:23 +00:00
|
|
|
self.dcx.abort_if_errors();
|
2023-06-22 21:56:09 +00:00
|
|
|
|
|
|
|
let emitter = mk_emitter(output);
|
2023-12-17 22:56:23 +00:00
|
|
|
self.dcx = DiagCtxt::with_emitter(emitter);
|
2023-06-22 21:56:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(rustc::untranslatable_diagnostic)]
|
|
|
|
#[allow(rustc::diagnostic_outside_of_impl)]
|
|
|
|
pub fn early_note(&self, msg: impl Into<DiagnosticMessage>) {
|
2023-12-17 22:56:23 +00:00
|
|
|
self.dcx.struct_note(msg).emit()
|
2023-06-22 21:56:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(rustc::untranslatable_diagnostic)]
|
|
|
|
#[allow(rustc::diagnostic_outside_of_impl)]
|
|
|
|
pub fn early_help(&self, msg: impl Into<DiagnosticMessage>) {
|
2023-12-17 22:56:23 +00:00
|
|
|
self.dcx.struct_help(msg).emit()
|
2023-06-22 21:56:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(rustc::untranslatable_diagnostic)]
|
|
|
|
#[allow(rustc::diagnostic_outside_of_impl)]
|
|
|
|
#[must_use = "ErrorGuaranteed must be returned from `run_compiler` in order to exit with a non-zero status code"]
|
2023-12-20 03:53:50 +00:00
|
|
|
pub fn early_err(&self, msg: impl Into<DiagnosticMessage>) -> ErrorGuaranteed {
|
2023-12-17 22:56:23 +00:00
|
|
|
self.dcx.struct_err(msg).emit()
|
2023-06-22 21:56:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(rustc::untranslatable_diagnostic)]
|
|
|
|
#[allow(rustc::diagnostic_outside_of_impl)]
|
2023-12-20 03:53:50 +00:00
|
|
|
pub fn early_fatal(&self, msg: impl Into<DiagnosticMessage>) -> ! {
|
2023-12-17 22:56:23 +00:00
|
|
|
self.dcx.struct_fatal(msg).emit()
|
2023-06-22 21:56:09 +00:00
|
|
|
}
|
|
|
|
|
2023-08-18 11:23:53 +00:00
|
|
|
#[allow(rustc::untranslatable_diagnostic)]
|
|
|
|
#[allow(rustc::diagnostic_outside_of_impl)]
|
2023-12-20 03:53:50 +00:00
|
|
|
pub fn early_struct_fatal(
|
2023-08-18 11:23:53 +00:00
|
|
|
&self,
|
|
|
|
msg: impl Into<DiagnosticMessage>,
|
2023-12-18 05:31:15 +00:00
|
|
|
) -> DiagnosticBuilder<'_, FatalAbort> {
|
2023-12-17 22:56:23 +00:00
|
|
|
self.dcx.struct_fatal(msg)
|
2023-08-18 11:23:53 +00:00
|
|
|
}
|
|
|
|
|
2023-06-22 21:56:09 +00:00
|
|
|
#[allow(rustc::untranslatable_diagnostic)]
|
|
|
|
#[allow(rustc::diagnostic_outside_of_impl)]
|
|
|
|
pub fn early_warn(&self, msg: impl Into<DiagnosticMessage>) {
|
2023-12-17 22:56:23 +00:00
|
|
|
self.dcx.struct_warn(msg).emit()
|
2023-06-22 21:56:09 +00:00
|
|
|
}
|
2023-12-07 14:39:07 +00:00
|
|
|
|
|
|
|
pub fn initialize_checked_jobserver(&self) {
|
|
|
|
// initialize jobserver before getting `jobserver::client` and `build_session`.
|
|
|
|
jobserver::initialize_checked(|err| {
|
|
|
|
#[allow(rustc::untranslatable_diagnostic)]
|
|
|
|
#[allow(rustc::diagnostic_outside_of_impl)]
|
2023-12-17 22:56:23 +00:00
|
|
|
self.dcx.struct_warn(err).note("the build environment is likely misconfigured").emit()
|
2023-12-07 14:39:07 +00:00
|
|
|
});
|
|
|
|
}
|
2023-06-22 21:56:09 +00:00
|
|
|
}
|
|
|
|
|
2020-10-31 02:14:32 +00:00
|
|
|
fn mk_emitter(output: ErrorOutputType) -> Box<DynEmitter> {
|
2022-10-17 13:11:26 +00:00
|
|
|
// FIXME(#100717): early errors aren't translated at the moment, so this is fine, but it will
|
|
|
|
// need to reference every crate that might emit an early error for translation to work.
|
|
|
|
let fallback_bundle =
|
|
|
|
fallback_fluent_bundle(vec![rustc_errors::DEFAULT_LOCALE_RESOURCE], false);
|
2020-10-31 02:14:32 +00:00
|
|
|
let emitter: Box<DynEmitter> = match output {
|
2019-03-25 10:16:58 +00:00
|
|
|
config::ErrorOutputType::HumanReadable(kind) => {
|
|
|
|
let (short, color_config) = kind.unzip();
|
2024-01-04 23:02:40 +00:00
|
|
|
Box::new(HumanEmitter::stderr(color_config, fallback_bundle).short_message(short))
|
2017-09-16 17:24:08 +00:00
|
|
|
}
|
2022-10-18 22:08:20 +00:00
|
|
|
config::ErrorOutputType::Json { pretty, json_rendered } => Box::new(JsonEmitter::basic(
|
|
|
|
pretty,
|
|
|
|
json_rendered,
|
|
|
|
None,
|
|
|
|
fallback_bundle,
|
|
|
|
None,
|
|
|
|
false,
|
|
|
|
false,
|
2023-02-09 10:16:00 +00:00
|
|
|
TerminalUrl::No,
|
2022-10-18 22:08:20 +00:00
|
|
|
)),
|
2015-12-31 03:50:06 +00:00
|
|
|
};
|
2023-06-22 21:56:09 +00:00
|
|
|
emitter
|
2014-11-16 01:30:33 +00:00
|
|
|
}
|
2023-08-23 13:46:58 +00:00
|
|
|
|
|
|
|
pub trait RemapFileNameExt {
|
|
|
|
type Output<'a>
|
|
|
|
where
|
|
|
|
Self: 'a;
|
|
|
|
|
|
|
|
fn for_scope(&self, sess: &Session, scopes: RemapPathScopeComponents) -> Self::Output<'_>;
|
|
|
|
|
|
|
|
fn for_codegen(&self, sess: &Session) -> Self::Output<'_>;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RemapFileNameExt for rustc_span::FileName {
|
|
|
|
type Output<'a> = rustc_span::FileNameDisplay<'a>;
|
|
|
|
|
|
|
|
fn for_scope(&self, sess: &Session, scopes: RemapPathScopeComponents) -> Self::Output<'_> {
|
|
|
|
if sess.opts.unstable_opts.remap_path_scope.contains(scopes) {
|
|
|
|
self.prefer_remapped_unconditionaly()
|
|
|
|
} else {
|
|
|
|
self.prefer_local()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn for_codegen(&self, sess: &Session) -> Self::Output<'_> {
|
|
|
|
if sess.should_prefer_remapped_for_codegen() {
|
|
|
|
self.prefer_remapped_unconditionaly()
|
|
|
|
} else {
|
|
|
|
self.prefer_local()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RemapFileNameExt for rustc_span::RealFileName {
|
|
|
|
type Output<'a> = &'a Path;
|
|
|
|
|
|
|
|
fn for_scope(&self, sess: &Session, scopes: RemapPathScopeComponents) -> Self::Output<'_> {
|
|
|
|
if sess.opts.unstable_opts.remap_path_scope.contains(scopes) {
|
|
|
|
self.remapped_path_if_available()
|
|
|
|
} else {
|
|
|
|
self.local_path_if_available()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn for_codegen(&self, sess: &Session) -> Self::Output<'_> {
|
|
|
|
if sess.should_prefer_remapped_for_codegen() {
|
|
|
|
self.remapped_path_if_available()
|
|
|
|
} else {
|
|
|
|
self.local_path_if_available()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|