rust/src/librustc_codegen_llvm/back/write.rs

813 lines
31 KiB
Rust
Raw Normal View History

use attributes;
use back::bytecode::{self, RLIB_BYTECODE_EXTENSION};
use back::lto::ThinBuffer;
use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig, run_assembler};
use rustc_codegen_ssa::traits::*;
use base;
use consts;
use rustc::hir::def_id::LOCAL_CRATE;
use rustc::session::config::{self, OutputType, Passes, Lto};
use rustc::session::Session;
use rustc::ty::TyCtxt;
use time_graph::Timeline;
use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
use llvm_util;
use ModuleLlvm;
use rustc_codegen_ssa::{ModuleCodegen, CompiledModule};
use rustc::util::common::time_ext;
use rustc_fs_util::{path_to_c_string, link_or_copy};
use rustc_data_structures::small_c_str::SmallCStr;
use errors::{self, Handler, FatalError};
use type_::Type;
use context::{is_pie_binary, get_reloc_model};
use common;
use LlvmCodegenBackend;
use rustc_demangle;
use std::ffi::{CString, CStr};
use std::fs;
use std::io::{self, Write};
use std::path::Path;
use std::str;
use std::sync::Arc;
use std::slice;
use libc::{c_uint, c_void, c_char, size_t};
pub const RELOC_MODEL_ARGS : [(&str, llvm::RelocMode); 7] = [
2016-08-06 05:50:48 +00:00
("pic", llvm::RelocMode::PIC),
("static", llvm::RelocMode::Static),
("default", llvm::RelocMode::Default),
("dynamic-no-pic", llvm::RelocMode::DynamicNoPic),
("ropi", llvm::RelocMode::ROPI),
("rwpi", llvm::RelocMode::RWPI),
("ropi-rwpi", llvm::RelocMode::ROPI_RWPI),
];
pub const CODE_GEN_MODEL_ARGS: &[(&str, llvm::CodeModel)] = &[
2016-08-06 05:50:48 +00:00
("small", llvm::CodeModel::Small),
("kernel", llvm::CodeModel::Kernel),
("medium", llvm::CodeModel::Medium),
("large", llvm::CodeModel::Large),
];
pub const TLS_MODEL_ARGS : [(&str, llvm::ThreadLocalMode); 4] = [
("global-dynamic", llvm::ThreadLocalMode::GeneralDynamic),
("local-dynamic", llvm::ThreadLocalMode::LocalDynamic),
("initial-exec", llvm::ThreadLocalMode::InitialExec),
("local-exec", llvm::ThreadLocalMode::LocalExec),
];
pub fn llvm_err(handler: &errors::Handler, msg: &str) -> FatalError {
match llvm::last_error() {
Some(err) => handler.fatal(&format!("{}: {}", msg, err)),
None => handler.fatal(&msg),
}
}
pub fn write_output_file(
handler: &errors::Handler,
target: &'ll llvm::TargetMachine,
pm: &llvm::PassManager<'ll>,
m: &'ll llvm::Module,
output: &Path,
file_type: llvm::FileType) -> Result<(), FatalError> {
unsafe {
let output_c = path_to_c_string(output);
let result = llvm::LLVMRustWriteOutputFile(target, pm, m, output_c.as_ptr(), file_type);
if result.into_result().is_err() {
let msg = format!("could not write output to {}", output.display());
Err(llvm_err(handler, &msg))
} else {
Ok(())
}
}
}
pub fn create_target_machine(
tcx: TyCtxt,
find_features: bool,
) -> &'static mut llvm::TargetMachine {
target_machine_factory(tcx.sess, tcx.backend_optimization_level(LOCAL_CRATE), find_features)()
.unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise() )
}
pub fn create_informational_target_machine(
2018-07-17 15:26:58 +00:00
sess: &Session,
find_features: bool,
) -> &'static mut llvm::TargetMachine {
target_machine_factory(sess, config::OptLevel::No, find_features)().unwrap_or_else(|err| {
llvm_err(sess.diagnostic(), &err).raise()
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
})
}
pub fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize)
{
use self::config::OptLevel::*;
match cfg {
No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone),
Default => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone),
Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone),
Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault),
SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive),
}
}
2018-04-01 06:26:05 +00:00
// If find_features is true this won't access `sess.crate_types` by assuming
// that `is_pie_binary` is false. When we discover LLVM target features
// `sess.crate_types` is uninitialized so we cannot access it.
pub fn target_machine_factory(sess: &Session, optlvl: config::OptLevel, find_features: bool)
-> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync>
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
{
let reloc_model = get_reloc_model(sess);
let (opt_level, _) = to_llvm_opt_settings(optlvl);
let use_softfp = sess.opts.cg.soft_float;
let ffunction_sections = sess.target.target.options.function_sections;
let fdata_sections = ffunction_sections;
let code_model_arg = sess.opts.cg.code_model.as_ref().or(
sess.target.target.options.code_model.as_ref(),
);
let code_model = match code_model_arg {
Some(s) => {
match CODE_GEN_MODEL_ARGS.iter().find(|arg| arg.0 == s) {
Some(x) => x.1,
_ => {
sess.err(&format!("{:?} is not a valid code model",
code_model_arg));
sess.abort_if_errors();
bug!();
}
}
}
None => llvm::CodeModel::None,
};
let features = attributes::llvm_target_features(sess).collect::<Vec<_>>();
let mut singlethread = sess.target.target.options.singlethread;
// On the wasm target once the `atomics` feature is enabled that means that
// we're no longer single-threaded, or otherwise we don't want LLVM to
// lower atomic operations to single-threaded operations.
if singlethread &&
sess.target.target.llvm_target.contains("wasm32") &&
features.iter().any(|s| *s == "+atomics")
{
singlethread = false;
}
std: Add a new wasm32-unknown-unknown target This commit adds a new target to the compiler: wasm32-unknown-unknown. This target is a reimagining of what it looks like to generate WebAssembly code from Rust. Instead of using Emscripten which can bring with it a weighty runtime this instead is a target which uses only the LLVM backend for WebAssembly and a "custom linker" for now which will hopefully one day be direct calls to lld. Notable features of this target include: * There is zero runtime footprint. The target assumes nothing exists other than the wasm32 instruction set. * There is zero toolchain footprint beyond adding the target. No custom linker is needed, rustc contains everything. * Very small wasm modules can be generated directly from Rust code using this target. * Most of the standard library is stubbed out to return an error, but anything related to allocation works (aka `HashMap`, `Vec`, etc). * Naturally, any `#[no_std]` crate should be 100% compatible with this new target. This target is currently somewhat janky due to how linking works. The "linking" is currently unconditional whole program LTO (aka LLVM is being used as a linker). Naturally that means compiling programs is pretty slow! Eventually though this target should have a linker. This target is also intended to be quite experimental. I'm hoping that this can act as a catalyst for further experimentation in Rust with WebAssembly. Breaking changes are very likely to land to this target, so it's not recommended to rely on it in any critical capacity yet. We'll let you know when it's "production ready". --- Currently testing-wise this target is looking pretty good but isn't complete. I've got almost the entire `run-pass` test suite working with this target (lots of tests ignored, but many passing as well). The `core` test suite is still getting LLVM bugs fixed to get that working and will take some time. Relatively simple programs all seem to work though! --- It's worth nothing that you may not immediately see the "smallest possible wasm module" for the input you feed to rustc. For various reasons it's very difficult to get rid of the final "bloat" in vanilla rustc (again, a real linker should fix all this). For now what you'll have to do is: cargo install --git https://github.com/alexcrichton/wasm-gc wasm-gc foo.wasm bar.wasm And then `bar.wasm` should be the smallest we can get it! --- In any case for now I'd love feedback on this, particularly on the various integration points if you've got better ideas of how to approach them!
2017-10-23 03:01:00 +00:00
let triple = SmallCStr::new(&sess.target.target.llvm_target);
let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
let features = features.join(",");
let features = CString::new(features).unwrap();
2018-04-01 06:26:05 +00:00
let is_pie_binary = !find_features && is_pie_binary(sess);
let trap_unreachable = sess.target.target.options.trap_unreachable;
2018-09-13 17:43:15 +00:00
let emit_stack_size_section = sess.opts.debugging_opts.emit_stack_sizes;
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
let asm_comments = sess.asm_comments();
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
Arc::new(move || {
let tm = unsafe {
llvm::LLVMRustCreateTargetMachine(
triple.as_ptr(), cpu.as_ptr(), features.as_ptr(),
code_model,
reloc_model,
opt_level,
use_softfp,
is_pie_binary,
ffunction_sections,
fdata_sections,
trap_unreachable,
std: Add a new wasm32-unknown-unknown target This commit adds a new target to the compiler: wasm32-unknown-unknown. This target is a reimagining of what it looks like to generate WebAssembly code from Rust. Instead of using Emscripten which can bring with it a weighty runtime this instead is a target which uses only the LLVM backend for WebAssembly and a "custom linker" for now which will hopefully one day be direct calls to lld. Notable features of this target include: * There is zero runtime footprint. The target assumes nothing exists other than the wasm32 instruction set. * There is zero toolchain footprint beyond adding the target. No custom linker is needed, rustc contains everything. * Very small wasm modules can be generated directly from Rust code using this target. * Most of the standard library is stubbed out to return an error, but anything related to allocation works (aka `HashMap`, `Vec`, etc). * Naturally, any `#[no_std]` crate should be 100% compatible with this new target. This target is currently somewhat janky due to how linking works. The "linking" is currently unconditional whole program LTO (aka LLVM is being used as a linker). Naturally that means compiling programs is pretty slow! Eventually though this target should have a linker. This target is also intended to be quite experimental. I'm hoping that this can act as a catalyst for further experimentation in Rust with WebAssembly. Breaking changes are very likely to land to this target, so it's not recommended to rely on it in any critical capacity yet. We'll let you know when it's "production ready". --- Currently testing-wise this target is looking pretty good but isn't complete. I've got almost the entire `run-pass` test suite working with this target (lots of tests ignored, but many passing as well). The `core` test suite is still getting LLVM bugs fixed to get that working and will take some time. Relatively simple programs all seem to work though! --- It's worth nothing that you may not immediately see the "smallest possible wasm module" for the input you feed to rustc. For various reasons it's very difficult to get rid of the final "bloat" in vanilla rustc (again, a real linker should fix all this). For now what you'll have to do is: cargo install --git https://github.com/alexcrichton/wasm-gc wasm-gc foo.wasm bar.wasm And then `bar.wasm` should be the smallest we can get it! --- In any case for now I'd love feedback on this, particularly on the various integration points if you've got better ideas of how to approach them!
2017-10-23 03:01:00 +00:00
singlethread,
asm_comments,
2018-09-13 17:43:15 +00:00
emit_stack_size_section,
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
)
};
tm.ok_or_else(|| {
format!("Could not create LLVM TargetMachine for triple: {}",
triple.to_str().unwrap())
})
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
})
}
pub(crate) fn save_temp_bitcode(
cgcx: &CodegenContext<LlvmCodegenBackend>,
module: &ModuleCodegen<ModuleLlvm>,
name: &str
) {
if !cgcx.save_temps {
return
}
unsafe {
let ext = format!("{}.bc", name);
let cgu = Some(&module.name[..]);
let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
let cstr = path_to_c_string(&path);
let llmod = module.module_llvm.llmod();
llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
}
}
pub struct DiagnosticHandlers<'a> {
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
llcx: &'a llvm::Context,
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
}
impl<'a> DiagnosticHandlers<'a> {
pub fn new(cgcx: &'a CodegenContext<LlvmCodegenBackend>,
handler: &'a Handler,
llcx: &'a llvm::Context) -> Self {
2018-07-14 23:52:45 +00:00
let data = Box::into_raw(Box::new((cgcx, handler)));
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
unsafe {
2018-07-14 23:52:45 +00:00
llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data as *mut _);
llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data as *mut _);
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
}
2018-07-14 23:52:45 +00:00
DiagnosticHandlers { data, llcx }
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
}
}
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
impl<'a> Drop for DiagnosticHandlers<'a> {
fn drop(&mut self) {
2018-07-14 23:52:45 +00:00
use std::ptr::null_mut;
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
unsafe {
2018-07-14 23:52:45 +00:00
llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut());
drop(Box::from_raw(self.data));
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
}
}
}
unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext<LlvmCodegenBackend>,
msg: &'b str,
cookie: c_uint) {
cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_owned());
}
unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic,
2015-01-22 18:43:39 +00:00
user: *const c_void,
cookie: c_uint) {
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
if user.is_null() {
return
}
let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
2015-01-22 18:43:39 +00:00
let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s))
2015-01-22 18:43:39 +00:00
.expect("non-UTF8 SMDiagnostic");
report_inline_asm(cgcx, &msg, cookie);
2015-01-22 18:43:39 +00:00
}
unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
if user.is_null() {
return
}
let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
match llvm::diagnostic::Diagnostic::unpack(info) {
2015-01-22 18:43:39 +00:00
llvm::diagnostic::InlineAsm(inline) => {
report_inline_asm(cgcx,
2016-02-09 20:24:11 +00:00
&llvm::twine_to_string(inline.message),
2015-01-22 18:43:39 +00:00
inline.cookie);
}
llvm::diagnostic::Optimization(opt) => {
let enabled = match cgcx.remark {
Passes::All => true,
Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
};
if enabled {
diag_handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}",
2015-12-20 21:00:43 +00:00
opt.kind.describe(),
opt.pass_name,
opt.filename,
opt.line,
opt.column,
opt.message));
}
}
llvm::diagnostic::PGO(diagnostic_ref) |
llvm::diagnostic::Linker(diagnostic_ref) => {
let msg = llvm::build_string(|s| {
llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
}).expect("non-UTF8 diagnostic");
diag_handler.warn(&msg);
}
llvm::diagnostic::UnknownDiagnostic(..) => {},
}
}
// Unsafe due to LLVM calls.
pub(crate) unsafe fn optimize(cgcx: &CodegenContext<LlvmCodegenBackend>,
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
diag_handler: &Handler,
2018-09-25 15:52:03 +00:00
module: &ModuleCodegen<ModuleLlvm>,
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
config: &ModuleConfig,
timeline: &mut Timeline)
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
-> Result<(), FatalError>
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
2018-05-08 13:10:16 +00:00
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
if config.emit_no_opt_bc {
let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
let out = path_to_c_string(&out);
llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
}
if config.opt_level.is_some() {
// Create the two optimizing pass managers. These mirror what clang
// does, and are by populated by LLVM's default PassManagerBuilder.
// Each manager has a different set of passes, but they also share
// some common passes.
let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
let mpm = llvm::LLVMCreatePassManager();
{
// If we're verifying or linting, add them to the function pass
// manager.
let addpass = |pass_name: &str| {
let pass_name = SmallCStr::new(pass_name);
let pass = match llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr()) {
Some(pass) => pass,
None => return false,
};
let pass_manager = match llvm::LLVMRustPassKind(pass) {
llvm::PassKind::Function => &*fpm,
llvm::PassKind::Module => &*mpm,
llvm::PassKind::Other => {
diag_handler.err("Encountered LLVM pass kind we can't handle");
return true
},
};
llvm::LLVMRustAddPass(pass_manager, pass);
true
};
if config.verify_llvm_ir { assert!(addpass("verify")); }
// Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
// to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
// we'll get errors in LLVM.
let using_thin_buffers = config.bitcode_needed();
let mut have_name_anon_globals_pass = false;
if !config.no_prepopulate_passes {
llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
let opt_level = config.opt_level.map(|x| to_llvm_opt_settings(x).0)
.unwrap_or(llvm::CodeGenOptLevel::None);
let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal ||
(cgcx.lto != Lto::Fat && cgcx.opts.debugging_opts.cross_lang_lto.enabled());
with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| {
llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
});
have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
if using_thin_buffers && !prepare_for_thin_lto {
assert!(addpass("name-anon-globals"));
have_name_anon_globals_pass = true;
}
}
for pass in &config.passes {
if !addpass(pass) {
diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass));
}
if pass == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
for pass in &cgcx.plugin_passes {
if !addpass(pass) {
diag_handler.err(&format!("a plugin asked for LLVM pass \
`{}` but LLVM does not \
recognize it", pass));
}
if pass == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
if using_thin_buffers && !have_name_anon_globals_pass {
// As described above, this will probably cause an error in LLVM
if config.no_prepopulate_passes {
diag_handler.err("The current compilation is going to use thin LTO buffers \
without running LLVM's NameAnonGlobals pass. \
This will likely cause errors in LLVM. Consider adding \
-C passes=name-anon-globals to the compiler command line.");
} else {
bug!("We are using thin LTO buffers without running the NameAnonGlobals pass. \
This will likely cause errors in LLVM and should never happen.");
}
2015-04-08 19:52:58 +00:00
}
}
2015-04-08 19:52:58 +00:00
diag_handler.abort_if_errors();
2015-04-08 19:52:58 +00:00
// Finally, run the actual optimization passes
2017-12-03 13:21:23 +00:00
time_ext(config.time_passes,
None,
&format!("llvm function passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRustRunFunctionPassManager(fpm, llmod)
});
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
timeline.record("fpm");
2017-12-03 13:21:23 +00:00
time_ext(config.time_passes,
None,
&format!("llvm module passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRunPassManager(mpm, llmod)
2017-12-03 13:21:23 +00:00
});
// Deallocate managers that we're now done with
llvm::LLVMDisposePassManager(fpm);
llvm::LLVMDisposePassManager(mpm);
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
}
Ok(())
}
pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
diag_handler: &Handler,
2018-09-25 15:52:03 +00:00
module: ModuleCodegen<ModuleLlvm>,
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
config: &ModuleConfig,
timeline: &mut Timeline)
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
-> Result<CompiledModule, FatalError>
{
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
timeline.record("codegen");
2014-12-09 18:44:51 +00:00
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
if cgcx.msvc_imps_needed {
create_msvc_imps(cgcx, llcx, llmod);
}
// A codegen-specific pass manager is used to generate object
// files for an LLVM module.
//
// Apparently each of these pass managers is a one-shot kind of
// thing, so we create a new one for each type of output. The
// pass manager passed to the closure should be ensured to not
// escape the closure itself, and the manager should only be
// used once.
unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine,
llmod: &'ll llvm::Module,
no_builtins: bool,
f: F) -> R
where F: FnOnce(&'ll mut PassManager<'ll>) -> R,
{
let cpm = llvm::LLVMCreatePassManager();
llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
f(cpm)
}
// If we don't have the integrated assembler, then we need to emit asm
// from LLVM and use `gcc` to create the object file.
let asm_to_obj = config.emit_obj && config.no_integrated_as;
// Change what we write and cleanup based on whether obj files are
// just llvm bitcode. In that case write bitcode, and possibly
// delete the bitcode if it wasn't requested. Don't generate the
// machine code, instead copy the .o file from the .bc
let write_bc = config.emit_bc || config.obj_is_bitcode;
let rm_bc = !config.emit_bc && config.obj_is_bitcode;
let write_obj = config.emit_obj && !config.obj_is_bitcode && !asm_to_obj;
let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode;
let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
if write_bc || config.emit_bc_compressed || config.embed_bitcode {
let thin = ThinBuffer::new(llmod);
let data = thin.data();
timeline.record("make-bc");
if write_bc {
if let Err(e) = fs::write(&bc_out, data) {
diag_handler.err(&format!("failed to write bytecode: {}", e));
}
timeline.record("write-bc");
}
if config.embed_bitcode {
embed_bitcode(cgcx, llcx, llmod, Some(data));
timeline.record("embed-bc");
}
if config.emit_bc_compressed {
let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
let data = bytecode::encode(&module.name, data);
if let Err(e) = fs::write(&dst, data) {
diag_handler.err(&format!("failed to write bytecode: {}", e));
}
timeline.record("compress-bc");
}
} else if config.embed_bitcode_marker {
embed_bitcode(cgcx, llcx, llmod, None);
}
time_ext(config.time_passes, None, &format!("codegen passes [{}]", module_name.unwrap()),
|| -> Result<(), FatalError> {
if config.emit_ir {
let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
let out = path_to_c_string(&out);
extern "C" fn demangle_callback(input_ptr: *const c_char,
input_len: size_t,
output_ptr: *mut c_char,
output_len: size_t) -> size_t {
let input = unsafe {
slice::from_raw_parts(input_ptr as *const u8, input_len as usize)
};
let input = match str::from_utf8(input) {
Ok(s) => s,
Err(_) => return 0,
};
let output = unsafe {
slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
};
let mut cursor = io::Cursor::new(output);
let demangled = match rustc_demangle::try_demangle(input) {
Ok(d) => d,
Err(_) => return 0,
};
if let Err(_) = write!(cursor, "{:#}", demangled) {
// Possible only if provided buffer is not big enough
return 0;
}
cursor.position() as size_t
}
with_codegen(tm, llmod, config.no_builtins, |cpm| {
llvm::LLVMRustPrintModule(cpm, llmod, out.as_ptr(), demangle_callback);
llvm::LLVMDisposePassManager(cpm);
});
timeline.record("ir");
}
if config.emit_asm || asm_to_obj {
let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
// We can't use the same module for asm and binary output, because that triggers
// various errors like invalid IR or broken binaries, so we might have to clone the
// module to produce the asm output
let llmod = if config.emit_obj {
llvm::LLVMCloneModule(llmod)
} else {
llmod
};
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(diag_handler, tm, cpm, llmod, &path,
llvm::FileType::AssemblyFile)
})?;
timeline.record("asm");
}
if write_obj {
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(diag_handler, tm, cpm, llmod, &obj_out,
llvm::FileType::ObjectFile)
})?;
timeline.record("obj");
} else if asm_to_obj {
let assembly = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
run_assembler(cgcx, diag_handler, &assembly, &obj_out);
timeline.record("asm_to_obj");
if !config.emit_asm && !cgcx.save_temps {
drop(fs::remove_file(&assembly));
}
}
Ok(())
})?;
if copy_bc_to_obj {
debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
if let Err(e) = link_or_copy(&bc_out, &obj_out) {
diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
}
}
if rm_bc {
debug!("removing_bitcode {:?}", bc_out);
if let Err(e) = fs::remove_file(&bc_out) {
diag_handler.err(&format!("failed to remove bitcode: {}", e));
}
}
drop(handlers);
}
2018-05-08 13:10:16 +00:00
Ok(module.into_compiled_module(config.emit_obj,
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
config.emit_bc,
config.emit_bc_compressed,
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 15:14:38 +00:00
&cgcx.output_filenames))
}
/// Embed the bitcode of an LLVM module in the LLVM module itself.
///
/// This is done primarily for iOS where it appears to be standard to compile C
/// code at least with `-fembed-bitcode` which creates two sections in the
/// executable:
///
/// * __LLVM,__bitcode
/// * __LLVM,__cmdline
///
/// It appears *both* of these sections are necessary to get the linker to
/// recognize what's going on. For us though we just always throw in an empty
/// cmdline section.
///
/// Furthermore debug/O1 builds don't actually embed bitcode but rather just
/// embed an empty section.
///
/// Basically all of this is us attempting to follow in the footsteps of clang
/// on iOS. See #35968 for lots more info.
unsafe fn embed_bitcode(cgcx: &CodegenContext<LlvmCodegenBackend>,
llcx: &llvm::Context,
llmod: &llvm::Module,
bitcode: Option<&[u8]>) {
let llconst = common::bytes_in_context(llcx, bitcode.unwrap_or(&[]));
let llglobal = llvm::LLVMAddGlobal(
llmod,
common::val_ty(llconst),
"rustc.embedded.module\0".as_ptr() as *const _,
);
llvm::LLVMSetInitializer(llglobal, llconst);
let is_apple = cgcx.opts.target_triple.triple().contains("-ios") ||
cgcx.opts.target_triple.triple().contains("-darwin");
let section = if is_apple {
"__LLVM,__bitcode\0"
} else {
".llvmbc\0"
};
llvm::LLVMSetSection(llglobal, section.as_ptr() as *const _);
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
let llconst = common::bytes_in_context(llcx, &[]);
let llglobal = llvm::LLVMAddGlobal(
llmod,
common::val_ty(llconst),
"rustc.embedded.cmdline\0".as_ptr() as *const _,
);
llvm::LLVMSetInitializer(llglobal, llconst);
let section = if is_apple {
"__LLVM,__cmdline\0"
} else {
".llvmcmd\0"
};
llvm::LLVMSetSection(llglobal, section.as_ptr() as *const _);
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
}
pub unsafe fn with_llvm_pmb(llmod: &llvm::Module,
config: &ModuleConfig,
opt_level: llvm::CodeGenOptLevel,
prepare_for_thin_lto: bool,
f: &mut dyn FnMut(&llvm::PassManagerBuilder)) {
use std::ptr;
// Create the PassManagerBuilder for LLVM. We configure it with
// reasonable defaults and prepare it to actually populate the pass
// manager.
let builder = llvm::LLVMPassManagerBuilderCreate();
let opt_size = config.opt_size.map(|x| to_llvm_opt_settings(x).1)
.unwrap_or(llvm::CodeGenOptSizeNone);
let inline_threshold = config.inline_threshold;
let pgo_gen_path = config.pgo_gen.as_ref().map(|s| {
let s = if s.is_empty() { "default_%m.profraw" } else { s };
CString::new(s.as_bytes()).unwrap()
});
let pgo_use_path = if config.pgo_use.is_empty() {
None
} else {
Some(CString::new(config.pgo_use.as_bytes()).unwrap())
};
llvm::LLVMRustConfigurePassManagerBuilder(
builder,
opt_level,
config.merge_functions,
config.vectorize_slp,
config.vectorize_loop,
prepare_for_thin_lto,
pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
);
llvm::LLVMPassManagerBuilderSetSizeLevel(builder, opt_size as u32);
if opt_size != llvm::CodeGenOptSizeNone {
llvm::LLVMPassManagerBuilderSetDisableUnrollLoops(builder, 1);
}
llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
// Here we match what clang does (kinda). For O0 we only inline
// always-inline functions (but don't add lifetime intrinsics), at O1 we
// inline with lifetime intrinsics, and O2+ we add an inliner with a
// thresholds copied from clang.
match (opt_level, opt_size, inline_threshold) {
2016-08-26 16:23:42 +00:00
(.., Some(t)) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t as u32);
}
2016-08-26 16:23:42 +00:00
(llvm::CodeGenOptLevel::Aggressive, ..) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275);
}
(_, llvm::CodeGenOptSizeDefault, _) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 75);
}
(_, llvm::CodeGenOptSizeAggressive, _) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25);
}
2016-08-26 16:23:42 +00:00
(llvm::CodeGenOptLevel::None, ..) => {
llvm::LLVMRustAddAlwaysInlinePass(builder, false);
}
2016-08-26 16:23:42 +00:00
(llvm::CodeGenOptLevel::Less, ..) => {
llvm::LLVMRustAddAlwaysInlinePass(builder, true);
}
2016-08-26 16:23:42 +00:00
(llvm::CodeGenOptLevel::Default, ..) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225);
}
2016-08-26 16:23:42 +00:00
(llvm::CodeGenOptLevel::Other, ..) => {
bug!("CodeGenOptLevel::Other selected")
}
}
f(builder);
llvm::LLVMPassManagerBuilderDispose(builder);
}
// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
// This is required to satisfy `dllimport` references to static data in .rlibs
// when using MSVC linker. We do this only for data, as linker can fix up
// code references on its own.
// See #26591, #27438
fn create_msvc_imps(
cgcx: &CodegenContext<LlvmCodegenBackend>,
llcx: &llvm::Context,
llmod: &llvm::Module
) {
if !cgcx.msvc_imps_needed {
return
}
// The x86 ABI seems to require that leading underscores are added to symbol
// names, so we need an extra underscore on 32-bit. There's also a leading
// '\x01' here which disables LLVM's symbol mangling (e.g., no extra
// underscores added in front).
let prefix = if cgcx.target_pointer_width == "32" {
"\x01__imp__"
} else {
"\x01__imp_"
};
unsafe {
let i8p_ty = Type::i8p_llcx(llcx);
let globals = base::iter_globals(llmod)
.filter(|&val| {
llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage &&
llvm::LLVMIsDeclaration(val) == 0
})
.map(move |val| {
let name = CStr::from_ptr(llvm::LLVMGetValueName(val));
let mut imp_name = prefix.as_bytes().to_vec();
imp_name.extend(name.to_bytes());
let imp_name = CString::new(imp_name).unwrap();
(imp_name, val)
})
.collect::<Vec<_>>();
for (imp_name, val) in globals {
let imp = llvm::LLVMAddGlobal(llmod,
i8p_ty,
imp_name.as_ptr() as *const _);
llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
}
}
}