rust/compiler/rustc_codegen_llvm/src/lib.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

474 lines
14 KiB
Rust
Raw Normal View History

//! The Rust compiler.
//!
//! # Note
//!
//! This API is completely unstable and subject to change.
2023-11-13 12:39:17 +00:00
#![allow(internal_features)]
#![feature(rustdoc_internals)]
#![doc(rust_logo)]
2020-09-23 19:51:56 +00:00
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(exact_size_is_empty)]
#![feature(extern_types)]
#![feature(hash_raw_entry)]
#![feature(iter_intersperse)]
#![feature(let_chains)]
#![feature(impl_trait_in_assoc_type)]
#[macro_use]
extern crate rustc_macros;
#[macro_use]
extern crate tracing;
use back::owned_target_machine::OwnedTargetMachine;
use back::write::{create_informational_target_machine, create_target_machine};
use errors::ParseTargetMachineConfig;
pub use llvm_util::target_features;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
use rustc_codegen_ssa::back::write::{
CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::ModuleCodegen;
use rustc_codegen_ssa::{CodegenResults, CompiledModule};
use rustc_data_structures::fx::FxIndexMap;
2023-12-17 10:48:57 +00:00
use rustc_errors::{DiagCtxt, ErrorGuaranteed, FatalError};
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
2021-05-29 14:56:15 +00:00
use rustc_middle::ty::TyCtxt;
use rustc_middle::util::Providers;
use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest};
use rustc_session::Session;
use rustc_span::symbol::Symbol;
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
use std::any::Any;
use std::ffi::CStr;
use std::io::Write;
use std::mem::ManuallyDrop;
2017-12-27 18:03:48 +00:00
mod back {
2019-03-30 14:30:07 +00:00
pub mod archive;
pub mod lto;
pub mod owned_target_machine;
mod profiling;
pub mod write;
}
mod abi;
mod allocator;
mod asm;
mod attributes;
mod base;
mod builder;
mod callee;
mod common;
mod consts;
mod context;
mod coverageinfo;
mod debuginfo;
mod declare;
mod errors;
mod intrinsic;
// The following is a workaround that replaces `pub mod llvm;` and that fixes issue 53912.
#[path = "llvm/mod.rs"]
mod llvm_;
pub mod llvm {
pub use super::llvm_::*;
}
mod llvm_util;
2018-05-08 13:10:16 +00:00
mod mono_item;
mod type_;
mod type_of;
mod va_arg;
mod value;
rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
#[derive(Clone)]
2018-05-08 13:10:16 +00:00
pub struct LlvmCodegenBackend(());
2017-09-16 15:27:29 +00:00
struct TimeTraceProfiler {
enabled: bool,
}
impl TimeTraceProfiler {
fn new(enabled: bool) -> Self {
if enabled {
unsafe { llvm::LLVMRustTimeTraceProfilerInitialize() }
}
TimeTraceProfiler { enabled }
}
}
impl Drop for TimeTraceProfiler {
fn drop(&mut self) {
if self.enabled {
unsafe { llvm::LLVMRustTimeTraceProfilerFinishThread() }
}
}
}
impl ExtraBackendMethods for LlvmCodegenBackend {
2019-06-13 21:48:52 +00:00
fn codegen_allocator<'tcx>(
&self,
2019-06-13 21:48:52 +00:00
tcx: TyCtxt<'tcx>,
module_name: &str,
kind: AllocatorKind,
alloc_error_handler_kind: AllocatorKind,
) -> ModuleLlvm {
let mut module_llvm = ModuleLlvm::new_metadata(tcx, module_name);
unsafe {
allocator::codegen(tcx, &mut module_llvm, module_name, kind, alloc_error_handler_kind);
}
module_llvm
}
fn compile_codegen_unit(
&self,
tcx: TyCtxt<'_>,
cgu_name: Symbol,
) -> (ModuleCodegen<ModuleLlvm>, u64) {
base::compile_codegen_unit(tcx, cgu_name)
2018-09-25 15:52:03 +00:00
}
fn target_machine_factory(
2018-09-25 15:52:03 +00:00
&self,
sess: &Session,
optlvl: OptLevel,
target_features: &[String],
) -> TargetMachineFactoryFn<Self> {
back::write::target_machine_factory(sess, optlvl, target_features)
2018-09-25 15:52:03 +00:00
}
fn spawn_named_thread<F, T>(
time_trace: bool,
name: String,
f: F,
) -> std::io::Result<std::thread::JoinHandle<T>>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
std::thread::Builder::new().name(name).spawn(move || {
let _profiler = TimeTraceProfiler::new(time_trace);
f()
})
}
}
impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer;
type TargetMachine = OwnedTargetMachine;
type TargetMachineError = crate::errors::LlvmError<'static>;
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;
fn print_pass_timings(&self) {
unsafe {
let mut size = 0;
let cstr = llvm::LLVMRustPrintPassTimings(&mut size as *mut usize);
if cstr.is_null() {
println!("failed to get pass timings");
} else {
let timings = std::slice::from_raw_parts(cstr as *const u8, size);
std::io::stdout().write_all(timings).unwrap();
libc::free(cstr as *mut _);
}
}
2018-09-25 15:52:03 +00:00
}
fn print_statistics(&self) {
unsafe {
let mut size = 0;
let cstr = llvm::LLVMRustPrintStatistics(&mut size as *mut usize);
if cstr.is_null() {
println!("failed to get pass stats");
} else {
let stats = std::slice::from_raw_parts(cstr as *const u8, size);
std::io::stdout().write_all(stats).unwrap();
libc::free(cstr as *mut _);
}
}
}
fn run_link(
cgcx: &CodegenContext<Self>,
2023-12-17 23:15:45 +00:00
dcx: &DiagCtxt,
modules: Vec<ModuleCodegen<Self::Module>>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
2023-12-17 23:15:45 +00:00
back::write::link(cgcx, dcx, modules)
}
fn run_fat_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<FatLtoInput<Self>>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<LtoModuleCodegen<Self>, FatalError> {
2019-02-13 13:13:30 +00:00
back::lto::run_fat(cgcx, modules, cached_modules)
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
2019-02-13 13:13:30 +00:00
back::lto::run_thin(cgcx, modules, cached_modules)
2018-09-25 15:52:03 +00:00
}
unsafe fn optimize(
cgcx: &CodegenContext<Self>,
2023-12-17 23:15:45 +00:00
dcx: &DiagCtxt,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<(), FatalError> {
2023-12-17 23:15:45 +00:00
back::write::optimize(cgcx, dcx, module, config)
2018-09-25 15:52:03 +00:00
}
fn optimize_fat(
cgcx: &CodegenContext<Self>,
module: &mut ModuleCodegen<Self::Module>,
) -> Result<(), FatalError> {
let dcx = cgcx.create_dcx();
back::lto::run_pass_manager(cgcx, &dcx, module, false)
}
unsafe fn optimize_thin(
cgcx: &CodegenContext<Self>,
thin: ThinModule<Self>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
2019-02-13 13:13:30 +00:00
back::lto::optimize_thin_module(thin, cgcx)
2018-09-25 15:52:03 +00:00
}
unsafe fn codegen(
cgcx: &CodegenContext<Self>,
2023-12-17 23:15:45 +00:00
dcx: &DiagCtxt,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
2023-12-17 23:15:45 +00:00
back::write::codegen(cgcx, dcx, module, config)
2018-09-25 15:52:03 +00:00
}
fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
back::lto::prepare_thin(module)
}
fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
(module.name, back::lto::ModuleBuffer::new(module.module_llvm.llmod()))
}
}
unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
unsafe impl Sync for LlvmCodegenBackend {}
2018-05-08 13:10:16 +00:00
impl LlvmCodegenBackend {
pub fn new() -> Box<dyn CodegenBackend> {
Box::new(LlvmCodegenBackend(()))
2017-09-16 15:27:29 +00:00
}
}
impl CodegenBackend for LlvmCodegenBackend {
fn locale_resource(&self) -> &'static str {
crate::DEFAULT_LOCALE_RESOURCE
}
fn init(&self, sess: &Session) {
llvm_util::init(sess); // Make sure llvm is inited
}
fn provide(&self, providers: &mut Providers) {
providers.global_backend_features =
|tcx, ()| llvm_util::global_llvm_features(tcx.sess, true)
}
fn print(&self, req: &PrintRequest, out: &mut dyn PrintBackendInfo, sess: &Session) {
match req.kind {
PrintKind::RelocationModels => {
writeln!(out, "Available relocation models:");
for name in &[
"static",
"pic",
"pie",
"dynamic-no-pic",
"ropi",
"rwpi",
"ropi-rwpi",
"default",
] {
writeln!(out, " {name}");
}
writeln!(out);
}
PrintKind::CodeModels => {
writeln!(out, "Available code models:");
2020-05-20 20:09:19 +00:00
for name in &["tiny", "small", "kernel", "medium", "large"] {
writeln!(out, " {name}");
}
writeln!(out);
}
PrintKind::TlsModels => {
writeln!(out, "Available TLS models:");
for name in
&["global-dynamic", "local-dynamic", "initial-exec", "local-exec", "emulated"]
{
writeln!(out, " {name}");
}
writeln!(out);
}
PrintKind::StackProtectorStrategies => {
writeln!(
out,
add rustc option for using LLVM stack smash protection LLVM has built-in heuristics for adding stack canaries to functions. These heuristics can be selected with LLVM function attributes. This patch adds a rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use of these attributes. This gives rustc the same stack smash protection support as clang offers through options `-fno-stack-protector`, `-fstack-protector`, `-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the current list of rustc exploit mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html), originally discussed in #15179. Stack smash protection adds runtime overhead and is therefore still off by default, but now users have the option to trade performance for security as they see fit. An example use case is adding Rust code in an existing C/C++ code base compiled with stack smash protection. Without the ability to add stack smash protection to the Rust code, the code base artifacts could be exploitable in ways not possible if the code base remained pure C/C++. Stack smash protection support is present in LLVM for almost all the current tier 1/tier 2 targets: see test/assembly/stack-protector/stack-protector-target-support.rs. The one exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a warning message printed if stack smash protection is used with this target (see test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3 targets has not been checked. Since the heuristics are applied at the LLVM level, the heuristics are expected to add stack smash protection to a fraction of functions comparable to C/C++. Some experiments demonstrating how Rust code is affected by the different heuristics can be found in test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is potential for better heuristics using Rust-specific safety information. For example it might be reasonable to skip stack smash protection in functions which transitively only use safe Rust code, or which uses only a subset of functions the user declares safe (such as anything under `std.*`). Such alternative heuristics could be added at a later point. LLVM also offers a "safestack" sanitizer as an alternative way to guard against stack smashing (see #26612). This could possibly also be included as a stack-protection heuristic. An alternative is to add it as a sanitizer (#39699). This is what clang does: safestack is exposed with option `-fsanitize=safe-stack`. The options are only supported by the LLVM backend, but as with other codegen options it is visible in the main codegen option help menu. The heuristic names "basic", "strong", and "all" are hopefully sufficiently generic to be usable in other backends as well. Reviewed-by: Nikita Popov <nikic@php.net> Extra commits during review: - [address-review] make the stack-protector option unstable - [address-review] reduce detail level of stack-protector option help text - [address-review] correct grammar in comment - [address-review] use compiler flag to avoid merging functions in test - [address-review] specify min LLVM version in fortanix stack-protector test Only for Fortanix test, since this target specifically requests the `--x86-experimental-lvi-inline-asm-hardening` flag. - [address-review] specify required LLVM components in stack-protector tests - move stack protector option enum closer to other similar option enums - rustc_interface/tests: sort debug option list in tracking hash test - add an explicit `none` stack-protector option Revert "set LLVM requirements for all stack protector support test revisions" This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 19:37:49 +00:00
r#"Available stack protector strategies:
all
Generate stack canaries in all functions.
strong
Generate stack canaries in a function if it either:
- has a local variable of `[T; N]` type, regardless of `T` and `N`
- takes the address of a local variable.
(Note that a local variable being borrowed is not equivalent to its
address being taken: e.g. some borrows may be removed by optimization,
while by-value argument passing may be implemented with reference to a
local stack variable in the ABI.)
basic
Generate stack canaries in functions with local variables of `[T; N]`
type, where `T` is byte-sized and `N` >= 8.
add rustc option for using LLVM stack smash protection LLVM has built-in heuristics for adding stack canaries to functions. These heuristics can be selected with LLVM function attributes. This patch adds a rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use of these attributes. This gives rustc the same stack smash protection support as clang offers through options `-fno-stack-protector`, `-fstack-protector`, `-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the current list of rustc exploit mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html), originally discussed in #15179. Stack smash protection adds runtime overhead and is therefore still off by default, but now users have the option to trade performance for security as they see fit. An example use case is adding Rust code in an existing C/C++ code base compiled with stack smash protection. Without the ability to add stack smash protection to the Rust code, the code base artifacts could be exploitable in ways not possible if the code base remained pure C/C++. Stack smash protection support is present in LLVM for almost all the current tier 1/tier 2 targets: see test/assembly/stack-protector/stack-protector-target-support.rs. The one exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a warning message printed if stack smash protection is used with this target (see test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3 targets has not been checked. Since the heuristics are applied at the LLVM level, the heuristics are expected to add stack smash protection to a fraction of functions comparable to C/C++. Some experiments demonstrating how Rust code is affected by the different heuristics can be found in test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is potential for better heuristics using Rust-specific safety information. For example it might be reasonable to skip stack smash protection in functions which transitively only use safe Rust code, or which uses only a subset of functions the user declares safe (such as anything under `std.*`). Such alternative heuristics could be added at a later point. LLVM also offers a "safestack" sanitizer as an alternative way to guard against stack smashing (see #26612). This could possibly also be included as a stack-protection heuristic. An alternative is to add it as a sanitizer (#39699). This is what clang does: safestack is exposed with option `-fsanitize=safe-stack`. The options are only supported by the LLVM backend, but as with other codegen options it is visible in the main codegen option help menu. The heuristic names "basic", "strong", and "all" are hopefully sufficiently generic to be usable in other backends as well. Reviewed-by: Nikita Popov <nikic@php.net> Extra commits during review: - [address-review] make the stack-protector option unstable - [address-review] reduce detail level of stack-protector option help text - [address-review] correct grammar in comment - [address-review] use compiler flag to avoid merging functions in test - [address-review] specify min LLVM version in fortanix stack-protector test Only for Fortanix test, since this target specifically requests the `--x86-experimental-lvi-inline-asm-hardening` flag. - [address-review] specify required LLVM components in stack-protector tests - move stack protector option enum closer to other similar option enums - rustc_interface/tests: sort debug option list in tracking hash test - add an explicit `none` stack-protector option Revert "set LLVM requirements for all stack protector support test revisions" This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 19:37:49 +00:00
none
Do not generate stack canaries.
"#
);
}
_other => llvm_util::print(req, out, sess),
}
}
2017-09-16 15:27:29 +00:00
fn print_passes(&self) {
llvm_util::print_passes();
}
fn print_version(&self) {
llvm_util::print_version();
}
fn target_features(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
target_features(sess, allow_unstable)
}
fn codegen_crate<'tcx>(
&self,
2019-06-13 21:48:52 +00:00
tcx: TyCtxt<'tcx>,
metadata: EncodedMetadata,
need_metadata_module: bool,
) -> Box<dyn Any> {
Box::new(rustc_codegen_ssa::base::codegen_crate(
LlvmCodegenBackend(()),
tcx,
crate::llvm_util::target_cpu(tcx.sess).to_string(),
metadata,
need_metadata_module,
))
2017-09-16 15:27:29 +00:00
}
fn join_codegen(
&self,
ongoing_codegen: Box<dyn Any>,
2017-09-16 15:27:29 +00:00
sess: &Session,
outputs: &OutputFilenames,
) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
let (codegen_results, work_products) = ongoing_codegen
.downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
2018-05-08 13:10:16 +00:00
.expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
.join(sess);
2017-09-16 15:27:29 +00:00
2023-02-06 07:31:19 +00:00
if sess.opts.unstable_opts.llvm_time_trace {
sess.time("llvm_dump_timing_file", || {
let file_name = outputs.with_extension("llvm_timings.json");
llvm_util::time_trace_profiler_finish(&file_name);
2023-02-06 07:31:19 +00:00
});
}
(codegen_results, work_products)
}
fn link(
&self,
sess: &Session,
codegen_results: CodegenResults,
outputs: &OutputFilenames,
) -> Result<(), ErrorGuaranteed> {
use crate::back::archive::LlvmArchiveBuilderBuilder;
use rustc_codegen_ssa::back::link::link_binary;
// Run the linker on any artifacts that resulted from the LLVM run.
// This should produce either a finished executable or library.
link_binary(sess, &LlvmArchiveBuilderBuilder, &codegen_results, outputs)
2017-09-16 15:27:29 +00:00
}
}
2018-09-25 15:52:03 +00:00
pub struct ModuleLlvm {
llcx: &'static mut llvm::Context,
llmod_raw: *const llvm::Module,
// This field is `ManuallyDrop` because it is important that the `TargetMachine`
// is disposed prior to the `Context` being disposed otherwise UAFs can occur.
tm: ManuallyDrop<OwnedTargetMachine>,
}
unsafe impl Send for ModuleLlvm {}
unsafe impl Sync for ModuleLlvm {}
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
impl ModuleLlvm {
2019-06-13 21:48:52 +00:00
fn new(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
unsafe {
let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
ModuleLlvm {
llmod_raw,
llcx,
tm: ManuallyDrop::new(create_target_machine(tcx, mod_name)),
}
}
}
2019-06-13 21:48:52 +00:00
fn new_metadata(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
unsafe {
let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
ModuleLlvm {
llmod_raw,
llcx,
tm: ManuallyDrop::new(create_informational_target_machine(tcx.sess)),
}
}
}
fn parse(
cgcx: &CodegenContext<LlvmCodegenBackend>,
name: &CStr,
buffer: &[u8],
2023-12-17 23:15:45 +00:00
dcx: &DiagCtxt,
) -> Result<Self, FatalError> {
unsafe {
let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
2023-12-17 23:15:45 +00:00
let llmod_raw = back::lto::parse_module(llcx, name, buffer, dcx)?;
2021-09-30 17:38:50 +00:00
let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name.to_str().unwrap());
let tm = match (cgcx.tm_factory)(tm_factory_config) {
Ok(m) => m,
Err(e) => {
2023-12-17 23:15:45 +00:00
return Err(dcx.emit_almost_fatal(ParseTargetMachineConfig(e)));
}
};
Ok(ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) })
}
}
fn llmod(&self) -> &llvm::Module {
unsafe { &*self.llmod_raw }
}
}
impl Drop for ModuleLlvm {
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.tm);
llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
}
}
}