Merge from rustc

This commit is contained in:
The Miri Cronjob Bot 2024-03-12 05:45:33 +00:00
commit 19378ca096
1350 changed files with 19391 additions and 10710 deletions

View File

@ -337,7 +337,7 @@ jobs:
- name: dist-x86_64-apple
env:
SCRIPT: "./x.py dist bootstrap --include-default-paths --host=x86_64-apple-darwin --target=x86_64-apple-darwin"
RUST_CONFIGURE_ARGS: "--enable-full-tools --enable-sanitizers --enable-profiler --set rust.jemalloc --set rust.lto=thin"
RUST_CONFIGURE_ARGS: "--enable-full-tools --enable-sanitizers --enable-profiler --set rust.jemalloc --set rust.lto=thin --set rust.codegen-units=1"
RUSTC_RETRY_LINKER_ON_SEGFAULT: 1
MACOSX_DEPLOYMENT_TARGET: 10.12
SELECT_XCODE: /Applications/Xcode_14.3.1.app
@ -442,7 +442,7 @@ jobs:
os: windows-2019-8core-32gb
- name: dist-x86_64-msvc
env:
RUST_CONFIGURE_ARGS: "--build=x86_64-pc-windows-msvc --host=x86_64-pc-windows-msvc --target=x86_64-pc-windows-msvc --enable-full-tools --enable-profiler"
RUST_CONFIGURE_ARGS: "--build=x86_64-pc-windows-msvc --host=x86_64-pc-windows-msvc --target=x86_64-pc-windows-msvc --enable-full-tools --enable-profiler --set rust.codegen-units=1"
SCRIPT: python x.py build --set rust.debug=true opt-dist && PGO_HOST=x86_64-pc-windows-msvc ./build/x86_64-pc-windows-msvc/stage0-tools-bin/opt-dist windows-ci -- python x.py dist bootstrap --include-default-paths
DIST_REQUIRE_ALL_TOOLS: 1
os: windows-2019-8core-32gb

View File

@ -2265,6 +2265,17 @@ checksum = "f9d642685b028806386b2b6e75685faadd3eb65a85fff7df711ce18446a422da"
name = "lld-wrapper"
version = "0.1.0"
[[package]]
name = "llvm-bitcode-linker"
version = "0.0.1"
dependencies = [
"anyhow",
"clap",
"thiserror",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "lock_api"
version = "0.4.11"
@ -3298,6 +3309,9 @@ dependencies = [
[[package]]
name = "run_make_support"
version = "0.0.0"
dependencies = [
"wasmparser",
]
[[package]]
name = "rust-demangler"

View File

@ -34,6 +34,7 @@ members = [
"src/tools/expand-yaml-anchors",
"src/tools/jsondocck",
"src/tools/jsondoclint",
"src/tools/llvm-bitcode-linker",
"src/tools/html-checker",
"src/tools/bump-stage0",
"src/tools/replace-version-placeholder",

View File

@ -145,6 +145,15 @@ toolchain.
1. Download the latest [MSYS2 installer][msys2] and go through the installer.
2. Download and install [Git for Windows](https://git-scm.com/download/win).
Make sure that it's in your Windows PATH. To enable access to it from within
MSYS2, edit the relevant `mingw[32|64].ini` file in your MSYS2 installation
directory and uncomment the line `MSYS2_PATH_TYPE=inherit`.
You could install and use MSYS2's version of git instead with `pacman`,
however this is not recommended as it's excrutiatingly slow, and not frequently
tested for compatability.
2. Start a MINGW64 or MINGW32 shell (depending on whether you want 32-bit
or 64-bit Rust) either from your start menu, or by running `mingw64.exe`
or `mingw32.exe` from your MSYS2 installation directory (e.g. `C:\msys64`).
@ -160,8 +169,7 @@ toolchain.
# Note that it is important that you do **not** use the 'python2', 'cmake',
# and 'ninja' packages from the 'msys2' subsystem.
# The build has historically been known to fail with these packages.
pacman -S git \
make \
pacman -S make \
diffutils \
tar \
mingw-w64-x86_64-python \
@ -176,11 +184,9 @@ toolchain.
python x.py setup dist && python x.py build && python x.py install
```
If you want to use the native versions of Git, Python, or CMake you can remove
them from the above pacman command and install them from another source. Make
sure that they're in your Windows PATH, and edit the relevant `mingw[32|64].ini`
file in your MSYS2 installation directory by uncommenting the line
`MSYS2_PATH_TYPE=inherit` to include them in your MSYS2 PATH.
If you want to try the native Windows versions of Python or CMake, you can remove
them from the above pacman command and install them from another source. Follow
the instructions in step 2 to get them on PATH.
Using Windows native Python can be helpful if you get errors when building LLVM.
You may also want to use Git for Windows, as it is often *much* faster. Turning

View File

@ -1,10 +1,10 @@
use rustc_errors::{
codes::*, AddToDiagnostic, Diag, DiagArgFromDisplay, EmissionGuarantee, SubdiagMessageOp,
codes::*, Diag, DiagArgFromDisplay, EmissionGuarantee, SubdiagMessageOp, Subdiagnostic,
};
use rustc_macros::{Diagnostic, Subdiagnostic};
use rustc_span::{symbol::Ident, Span, Symbol};
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_generic_type_with_parentheses, code = E0214)]
pub struct GenericTypeWithParentheses {
#[primary_span]
@ -14,7 +14,7 @@ pub struct GenericTypeWithParentheses {
pub sub: Option<UseAngleBrackets>,
}
#[derive(Clone, Copy, Subdiagnostic)]
#[derive(Subdiagnostic)]
#[multipart_suggestion(ast_lowering_use_angle_brackets, applicability = "maybe-incorrect")]
pub struct UseAngleBrackets {
#[suggestion_part(code = "<")]
@ -40,8 +40,8 @@ pub struct InvalidAbi {
pub struct InvalidAbiReason(pub &'static str);
impl AddToDiagnostic for InvalidAbiReason {
fn add_to_diagnostic_with<G: EmissionGuarantee, F: SubdiagMessageOp<G>>(
impl Subdiagnostic for InvalidAbiReason {
fn add_to_diag_with<G: EmissionGuarantee, F: SubdiagMessageOp<G>>(
self,
diag: &mut Diag<'_, G>,
_: F,
@ -63,7 +63,7 @@ pub struct InvalidAbiSuggestion {
pub suggestion: String,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_assoc_ty_parentheses)]
pub struct AssocTyParentheses {
#[primary_span]
@ -72,7 +72,7 @@ pub struct AssocTyParentheses {
pub sub: AssocTyParenthesesSub,
}
#[derive(Clone, Copy, Subdiagnostic)]
#[derive(Subdiagnostic)]
pub enum AssocTyParenthesesSub {
#[multipart_suggestion(ast_lowering_remove_parentheses)]
Empty {
@ -106,7 +106,7 @@ pub struct MisplacedAssocTyBinding {
pub suggestion: Option<Span>,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_underscore_expr_lhs_assign)]
pub struct UnderscoreExprLhsAssign {
#[primary_span]
@ -114,7 +114,7 @@ pub struct UnderscoreExprLhsAssign {
pub span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_base_expression_double_dot, code = E0797)]
pub struct BaseExpressionDoubleDot {
#[primary_span]
@ -122,7 +122,7 @@ pub struct BaseExpressionDoubleDot {
pub span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_await_only_in_async_fn_and_blocks, code = E0728)]
pub struct AwaitOnlyInAsyncFnAndBlocks {
#[primary_span]
@ -132,21 +132,21 @@ pub struct AwaitOnlyInAsyncFnAndBlocks {
pub item_span: Option<Span>,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_coroutine_too_many_parameters, code = E0628)]
pub struct CoroutineTooManyParameters {
#[primary_span]
pub fn_decl_span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_closure_cannot_be_static, code = E0697)]
pub struct ClosureCannotBeStatic {
#[primary_span]
pub fn_decl_span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_functional_record_update_destructuring_assignment)]
pub struct FunctionalRecordUpdateDestructuringAssignment {
#[primary_span]
@ -154,28 +154,28 @@ pub struct FunctionalRecordUpdateDestructuringAssignment {
pub span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_async_coroutines_not_supported, code = E0727)]
pub struct AsyncCoroutinesNotSupported {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_inline_asm_unsupported_target, code = E0472)]
pub struct InlineAsmUnsupportedTarget {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_att_syntax_only_x86)]
pub struct AttSyntaxOnlyX86 {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_abi_specified_multiple_times)]
pub struct AbiSpecifiedMultipleTimes {
#[primary_span]
@ -187,7 +187,7 @@ pub struct AbiSpecifiedMultipleTimes {
pub equivalent: Option<()>,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_clobber_abi_not_supported)]
pub struct ClobberAbiNotSupported {
#[primary_span]
@ -203,7 +203,7 @@ pub struct InvalidAbiClobberAbi {
pub supported_abis: String,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_invalid_register)]
pub struct InvalidRegister<'a> {
#[primary_span]
@ -212,7 +212,7 @@ pub struct InvalidRegister<'a> {
pub error: &'a str,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_invalid_register_class)]
pub struct InvalidRegisterClass<'a> {
#[primary_span]
@ -241,7 +241,7 @@ pub enum InvalidAsmTemplateModifierRegClassSub {
DoesNotSupportModifier { class_name: Symbol },
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_invalid_asm_template_modifier_const)]
pub struct InvalidAsmTemplateModifierConst {
#[primary_span]
@ -251,7 +251,7 @@ pub struct InvalidAsmTemplateModifierConst {
pub op_span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_invalid_asm_template_modifier_sym)]
pub struct InvalidAsmTemplateModifierSym {
#[primary_span]
@ -261,7 +261,7 @@ pub struct InvalidAsmTemplateModifierSym {
pub op_span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_invalid_asm_template_modifier_label)]
pub struct InvalidAsmTemplateModifierLabel {
#[primary_span]
@ -271,7 +271,7 @@ pub struct InvalidAsmTemplateModifierLabel {
pub op_span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_register_class_only_clobber)]
pub struct RegisterClassOnlyClobber {
#[primary_span]
@ -279,7 +279,7 @@ pub struct RegisterClassOnlyClobber {
pub reg_class_name: Symbol,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_register_conflict)]
pub struct RegisterConflict<'a> {
#[primary_span]
@ -293,7 +293,7 @@ pub struct RegisterConflict<'a> {
pub in_out: Option<Span>,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[help]
#[diag(ast_lowering_sub_tuple_binding)]
pub struct SubTupleBinding<'a> {
@ -311,7 +311,7 @@ pub struct SubTupleBinding<'a> {
pub ctx: &'a str,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_extra_double_dot)]
pub struct ExtraDoubleDot<'a> {
#[primary_span]
@ -322,7 +322,7 @@ pub struct ExtraDoubleDot<'a> {
pub ctx: &'a str,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[note]
#[diag(ast_lowering_misplaced_double_dot)]
pub struct MisplacedDoubleDot {
@ -330,7 +330,7 @@ pub struct MisplacedDoubleDot {
pub span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_misplaced_relax_trait_bound)]
pub struct MisplacedRelaxTraitBound {
#[primary_span]
@ -363,14 +363,14 @@ pub struct NeverPatternWithGuard {
pub span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_arbitrary_expression_in_pattern)]
pub struct ArbitraryExpressionInPattern {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic, Clone, Copy)]
#[derive(Diagnostic)]
#[diag(ast_lowering_inclusive_range_with_no_end)]
pub struct InclusiveRangeWithNoEnd {
#[primary_span]

View File

@ -2,7 +2,7 @@
use rustc_ast::ParamKindOrd;
use rustc_errors::{
codes::*, AddToDiagnostic, Applicability, Diag, EmissionGuarantee, SubdiagMessageOp,
codes::*, Applicability, Diag, EmissionGuarantee, SubdiagMessageOp, Subdiagnostic,
};
use rustc_macros::{Diagnostic, Subdiagnostic};
use rustc_span::{symbol::Ident, Span, Symbol};
@ -373,8 +373,8 @@ pub struct ArgsBeforeConstraint {
pub struct EmptyLabelManySpans(pub Vec<Span>);
// The derive for `Vec<Span>` does multiple calls to `span_label`, adding commas between each
impl AddToDiagnostic for EmptyLabelManySpans {
fn add_to_diagnostic_with<G: EmissionGuarantee, F: SubdiagMessageOp<G>>(
impl Subdiagnostic for EmptyLabelManySpans {
fn add_to_diag_with<G: EmissionGuarantee, F: SubdiagMessageOp<G>>(
self,
diag: &mut Diag<'_, G>,
_: F,
@ -742,8 +742,8 @@ pub struct StableFeature {
pub since: Symbol,
}
impl AddToDiagnostic for StableFeature {
fn add_to_diagnostic_with<G: EmissionGuarantee, F: SubdiagMessageOp<G>>(
impl Subdiagnostic for StableFeature {
fn add_to_diag_with<G: EmissionGuarantee, F: SubdiagMessageOp<G>>(
self,
diag: &mut Diag<'_, G>,
_: F,

View File

@ -1,9 +1,7 @@
use std::num::IntErrorKind;
use rustc_ast as ast;
use rustc_errors::{
codes::*, Applicability, Diag, DiagCtxt, EmissionGuarantee, IntoDiagnostic, Level,
};
use rustc_errors::{codes::*, Applicability, Diag, DiagCtxt, Diagnostic, EmissionGuarantee, Level};
use rustc_macros::Diagnostic;
use rustc_span::{Span, Symbol};
@ -50,8 +48,8 @@ pub(crate) struct UnknownMetaItem<'a> {
}
// Manual implementation to be able to format `expected` items correctly.
impl<'a, G: EmissionGuarantee> IntoDiagnostic<'a, G> for UnknownMetaItem<'_> {
fn into_diagnostic(self, dcx: &'a DiagCtxt, level: Level) -> Diag<'a, G> {
impl<'a, G: EmissionGuarantee> Diagnostic<'a, G> for UnknownMetaItem<'_> {
fn into_diag(self, dcx: &'a DiagCtxt, level: Level) -> Diag<'a, G> {
let expected = self.expected.iter().map(|name| format!("`{name}`")).collect::<Vec<_>>();
Diag::new(dcx, level, fluent::attr_unknown_meta_item)
.with_span(self.span)
@ -203,8 +201,8 @@ pub(crate) struct UnsupportedLiteral {
pub start_point_span: Span,
}
impl<'a, G: EmissionGuarantee> IntoDiagnostic<'a, G> for UnsupportedLiteral {
fn into_diagnostic(self, dcx: &'a DiagCtxt, level: Level) -> Diag<'a, G> {
impl<'a, G: EmissionGuarantee> Diagnostic<'a, G> for UnsupportedLiteral {
fn into_diag(self, dcx: &'a DiagCtxt, level: Level) -> Diag<'a, G> {
let mut diag = Diag::new(
dcx,
level,

View File

@ -106,7 +106,7 @@ pub fn get_body_with_borrowck_facts(
options: ConsumerOptions,
) -> BodyWithBorrowckFacts<'_> {
let (input_body, promoted) = tcx.mir_promoted(def);
let infcx = tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bind(def)).build();
let infcx = tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::bind(tcx, def)).build();
let input_body: &Body<'_> = &input_body.borrow();
let promoted: &IndexSlice<_, _> = &promoted.borrow();
*super::do_mir_borrowck(&infcx, input_body, promoted, Some(options)).1.unwrap()

View File

@ -191,9 +191,9 @@ impl Display for RegionName {
}
}
impl rustc_errors::IntoDiagnosticArg for RegionName {
fn into_diagnostic_arg(self) -> rustc_errors::DiagArgValue {
self.to_string().into_diagnostic_arg()
impl rustc_errors::IntoDiagArg for RegionName {
fn into_diag_arg(self) -> rustc_errors::DiagArgValue {
self.to_string().into_diag_arg()
}
}

View File

@ -126,10 +126,7 @@ fn mir_borrowck(tcx: TyCtxt<'_>, def: LocalDefId) -> &BorrowCheckResult<'_> {
return tcx.arena.alloc(result);
}
let hir_owner = tcx.local_def_id_to_hir_id(def).owner;
let infcx =
tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bind(hir_owner.def_id)).build();
let infcx = tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::bind(tcx, def)).build();
let promoted: &IndexSlice<_, _> = &promoted.borrow();
let opt_closure_req = do_mir_borrowck(&infcx, input_body, promoted, None).0;
debug!("mir_borrowck done");

View File

@ -311,13 +311,13 @@ fn check_opaque_type_well_formed<'tcx>(
parent_def_id = tcx.local_parent(parent_def_id);
}
// FIXME(-Znext-solver): We probably should use `DefiningAnchor::Error`
// FIXME(-Znext-solver): We probably should use `DefiningAnchor::Bind(&[])`
// and prepopulate this `InferCtxt` with known opaque values, rather than
// using the `Bind` anchor here. For now it's fine.
let infcx = tcx
.infer_ctxt()
.with_next_trait_solver(next_trait_solver)
.with_opaque_type_inference(DefiningAnchor::Bind(parent_def_id))
.with_opaque_type_inference(DefiningAnchor::bind(tcx, parent_def_id))
.build();
let ocx = ObligationCtxt::new(&infcx);
let identity_args = GenericArgs::identity_for_item(tcx, def_id);

View File

@ -1,6 +1,6 @@
use rustc_errors::{
codes::*, AddToDiagnostic, Diag, DiagCtxt, EmissionGuarantee, IntoDiagnostic, Level, MultiSpan,
SingleLabelManySpans, SubdiagMessageOp,
codes::*, Diag, DiagCtxt, Diagnostic, EmissionGuarantee, Level, MultiSpan,
SingleLabelManySpans, SubdiagMessageOp, Subdiagnostic,
};
use rustc_macros::{Diagnostic, Subdiagnostic};
use rustc_span::{symbol::Ident, Span, Symbol};
@ -425,9 +425,9 @@ pub(crate) struct EnvNotDefinedWithUserMessage {
}
// Hand-written implementation to support custom user messages.
impl<'a, G: EmissionGuarantee> IntoDiagnostic<'a, G> for EnvNotDefinedWithUserMessage {
impl<'a, G: EmissionGuarantee> Diagnostic<'a, G> for EnvNotDefinedWithUserMessage {
#[track_caller]
fn into_diagnostic(self, dcx: &'a DiagCtxt, level: Level) -> Diag<'a, G> {
fn into_diag(self, dcx: &'a DiagCtxt, level: Level) -> Diag<'a, G> {
#[expect(
rustc::untranslatable_diagnostic,
reason = "cannot translate user-provided messages"
@ -589,8 +589,8 @@ pub(crate) struct FormatUnusedArg {
// Allow the singular form to be a subdiagnostic of the multiple-unused
// form of diagnostic.
impl AddToDiagnostic for FormatUnusedArg {
fn add_to_diagnostic_with<G: EmissionGuarantee, F: SubdiagMessageOp<G>>(
impl Subdiagnostic for FormatUnusedArg {
fn add_to_diag_with<G: EmissionGuarantee, F: SubdiagMessageOp<G>>(
self,
diag: &mut Diag<'_, G>,
f: F,
@ -785,8 +785,8 @@ pub(crate) struct AsmClobberNoReg {
pub(crate) clobbers: Vec<Span>,
}
impl<'a, G: EmissionGuarantee> IntoDiagnostic<'a, G> for AsmClobberNoReg {
fn into_diagnostic(self, dcx: &'a DiagCtxt, level: Level) -> Diag<'a, G> {
impl<'a, G: EmissionGuarantee> Diagnostic<'a, G> for AsmClobberNoReg {
fn into_diag(self, dcx: &'a DiagCtxt, level: Level) -> Diag<'a, G> {
// eager translation as `span_labels` takes `AsRef<str>`
let lbl1 = dcx.eagerly_translate_to_string(
crate::fluent_generated::builtin_macros_asm_clobber_abi,

View File

@ -8,6 +8,7 @@
rustc_attrs,
transparent_unions,
auto_traits,
freeze_impls,
thread_local
)]
#![no_core]

View File

@ -115,14 +115,12 @@ pub(crate) fn maybe_create_entry_wrapper(
termination_trait,
)
.unwrap();
let report = Instance::resolve(
let report = Instance::expect_resolve(
tcx,
ParamEnv::reveal_all(),
report.def_id,
tcx.mk_args(&[GenericArg::from(main_ret_ty)]),
)
.unwrap()
.unwrap()
.polymorphize(tcx);
let report_name = tcx.symbol_name(report).name;
@ -142,14 +140,12 @@ pub(crate) fn maybe_create_entry_wrapper(
}
} else if is_main_fn {
let start_def_id = tcx.require_lang_item(LangItem::Start, None);
let start_instance = Instance::resolve(
let start_instance = Instance::expect_resolve(
tcx,
ParamEnv::reveal_all(),
start_def_id,
tcx.mk_args(&[main_ret_ty.into()]),
)
.unwrap()
.unwrap()
.polymorphize(tcx);
let start_func_id = import_function(tcx, m, start_instance);

View File

@ -19,8 +19,8 @@ jobs:
fail-fast: false
matrix:
libgccjit_version:
- { gcc: "libgccjit.so", artifacts_branch: "master" }
- { gcc: "libgccjit_without_int128.so", artifacts_branch: "master-without-128bit-integers" }
- { gcc: "gcc-13.deb" }
- { gcc: "gcc-13-without-int128.deb" }
commands: [
"--mini-tests",
"--std-tests",
@ -32,60 +32,39 @@ jobs:
"--extended-regex-tests",
"--test-successful-rustc --nb-parts 2 --current-part 0",
"--test-successful-rustc --nb-parts 2 --current-part 1",
"--projects",
]
steps:
- uses: actions/checkout@v3
# `rustup show` installs from rust-toolchain.toml
- name: Setup rust toolchain
run: rustup show
- name: Setup rust cache
uses: Swatinem/rust-cache@v2
- name: Install packages
# `llvm-14-tools` is needed to install the `FileCheck` binary which is used for asm tests.
run: sudo apt-get install ninja-build ripgrep llvm-14-tools
- name: Install rustfmt
run: rustup component add rustfmt
- name: Download artifact
uses: dawidd6/action-download-artifact@v2
with:
workflow: main.yml
name: gcc-13
path: gcc-13
repo: antoyo/gcc
branch: ${{ matrix.libgccjit_version.artifacts_branch }}
event: push
search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts.
run: curl -LO https://github.com/antoyo/gcc/releases/latest/download/${{ matrix.libgccjit_version.gcc }}
- name: Setup path to libgccjit
run: |
sudo dpkg --force-overwrite -i gcc-13/gcc-13.deb
echo /usr/lib/ > gcc_path
sudo dpkg --force-overwrite -i ${{ matrix.libgccjit_version.gcc }}
echo 'gcc-path = "/usr/lib/"' > config.toml
- name: Set env
run: |
echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV
- name: Cache cargo installed crates
uses: actions/cache@v3
with:
path: ~/.cargo/bin
key: cargo-installed-crates2-ubuntu-latest
- name: Cache cargo registry
uses: actions/cache@v3
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v3
with:
path: ~/.cargo/git
key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo target dir
uses: actions/cache@v3
with:
path: target
key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
echo "LIBRARY_PATH=/usr/lib" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=/usr/lib" >> $GITHUB_ENV
#- name: Cache rust repository
## We only clone the rust repository for rustc tests
@ -99,11 +78,9 @@ jobs:
- name: Build
run: |
./y.sh prepare --only-libcore
# TODO: remove --features master when it is back to the default.
./y.sh build --features master
# TODO: remove --features master when it is back to the default.
cargo test --features master
./clean_all.sh
./y.sh build
cargo test
./y.sh clean all
- name: Prepare dependencies
run: |
@ -111,23 +88,27 @@ jobs:
git config --global user.name "User"
./y.sh prepare
# Compile is a separate step, as the actions-rs/cargo action supports error annotations
- name: Compile
uses: actions-rs/cargo@v1.0.3
with:
command: build
args: --release
- name: Add more failing tests because the sysroot is not compiled with LTO
run: cat failing-non-lto-tests.txt >> failing-ui-tests.txt
run: cat tests/failing-non-lto-tests.txt >> tests/failing-ui-tests.txt
- name: Run tests
run: |
# TODO: remove --features master when it is back to the default.
./test.sh --features master --release --clean --build-sysroot ${{ matrix.commands }}
./y.sh test --release --clean --build-sysroot ${{ matrix.commands }}
- name: Check formatting
run: cargo fmt -- --check
duplicates:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- run: python tools/check_intrinsics_duplicates.py
build_system:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Test build system
run: |
cd build_system
cargo test

View File

@ -21,14 +21,11 @@ jobs:
libgccjit_version:
- gcc: "libgccjit.so"
artifacts_branch: "master"
# TODO: switch back to --no-default-features in the case of libgccjit 12 when the default is to enable
# master again.
extra: "--features master"
- gcc: "libgccjit_without_int128.so"
artifacts_branch: "master-without-128bit-integers"
extra: "--features master"
- gcc: "libgccjit12.so"
artifacts_branch: "gcc12"
extra: "--no-default-features"
# FIXME(antoyo): we need to set GCC_EXEC_PREFIX so that the linker can find the linker plugin.
# Not sure why it's not found otherwise.
env_extra: "TEST_FLAGS='-Cpanic=abort -Zpanic-abort-tests' GCC_EXEC_PREFIX=/usr/lib/gcc/"
@ -36,6 +33,13 @@ jobs:
steps:
- uses: actions/checkout@v3
# `rustup show` installs from rust-toolchain.toml
- name: Setup rust toolchain
run: rustup show
- name: Setup rust cache
uses: Swatinem/rust-cache@v2
- name: Install packages
run: sudo apt-get install ninja-build ripgrep
@ -45,56 +49,27 @@ jobs:
- name: Setup path to libgccjit
if: matrix.libgccjit_version.gcc == 'libgccjit12.so'
run: echo /usr/lib/gcc/x86_64-linux-gnu/12 > gcc_path
run: |
echo 'gcc-path = "/usr/lib/gcc/x86_64-linux-gnu/12"' > config.toml
echo "LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/12" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/12" >> $GITHUB_ENV
- name: Download artifact
if: matrix.libgccjit_version.gcc != 'libgccjit12.so'
uses: dawidd6/action-download-artifact@v2
with:
workflow: main.yml
name: gcc-13
path: gcc-13
repo: antoyo/gcc
branch: ${{ matrix.libgccjit_version.artifacts_branch }}
event: push
search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts.
run: curl -LO https://github.com/antoyo/gcc/releases/latest/download/gcc-13.deb
- name: Setup path to libgccjit
if: matrix.libgccjit_version.gcc != 'libgccjit12.so'
run: |
sudo dpkg --force-overwrite -i gcc-13/gcc-13.deb
echo /usr/lib/ > gcc_path
sudo dpkg --force-overwrite -i gcc-13.deb
echo 'gcc-path = "/usr/lib"' > config.toml
echo "LIBRARY_PATH=/usr/lib" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=/usr/lib" >> $GITHUB_ENV
- name: Set env
run: |
echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV
- name: Cache cargo installed crates
uses: actions/cache@v3
with:
path: ~/.cargo/bin
key: cargo-installed-crates2-ubuntu-latest
- name: Cache cargo registry
uses: actions/cache@v3
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v3
with:
path: ~/.cargo/git
key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo target dir
uses: actions/cache@v3
with:
path: target
key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
#- name: Cache rust repository
#uses: actions/cache@v3
#id: cache-rust-repository
@ -115,18 +90,11 @@ jobs:
if: matrix.libgccjit_version.gcc != 'libgccjit12.so'
run: ./y.sh prepare
# Compile is a separate step, as the actions-rs/cargo action supports error annotations
- name: Compile
uses: actions-rs/cargo@v1.0.3
with:
command: build
args: --release
- name: Add more failing tests because the sysroot is not compiled with LTO
run: cat failing-non-lto-tests.txt >> failing-ui-tests.txt
run: cat tests/failing-non-lto-tests.txt >> tests/failing-ui-tests.txt
- name: Run tests
id: tests
run: |
${{ matrix.libgccjit_version.env_extra }} ./test.sh --release --clean --build-sysroot --test-failing-rustc ${{ matrix.libgccjit_version.extra }} | tee output_log
${{ matrix.libgccjit_version.env_extra }} ./y.sh test --release --clean --build-sysroot --test-failing-rustc ${{ matrix.libgccjit_version.extra }} | tee output_log
rg --text "test result" output_log >> $GITHUB_STEP_SUMMARY

View File

@ -28,9 +28,6 @@ jobs:
# FIXME: re-enable asm tests when GCC can emit in the right syntax.
# "--asm-tests",
"--test-libcore",
"--extended-rand-tests",
"--extended-regex-example-tests",
"--extended-regex-tests",
"--test-successful-rustc --nb-parts 2 --current-part 0",
"--test-successful-rustc --nb-parts 2 --current-part 1",
]
@ -38,42 +35,25 @@ jobs:
steps:
- uses: actions/checkout@v3
# `rustup show` installs from rust-toolchain.toml
- name: Setup rust toolchain
run: rustup show
- name: Setup rust cache
uses: Swatinem/rust-cache@v2
- name: Install packages
# `llvm-14-tools` is needed to install the `FileCheck` binary which is used for asm tests.
run: sudo apt-get install ninja-build ripgrep llvm-14-tools libgccjit-12-dev
- name: Setup path to libgccjit
run: echo /usr/lib/gcc/x86_64-linux-gnu/12 > gcc_path
run: echo 'gcc-path = "/usr/lib/gcc/x86_64-linux-gnu/12"' > config.toml
- name: Set env
run: |
echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV
- name: Cache cargo installed crates
uses: actions/cache@v3
with:
path: ~/.cargo/bin
key: cargo-installed-crates2-ubuntu-latest
- name: Cache cargo registry
uses: actions/cache@v3
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v3
with:
path: ~/.cargo/git
key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo target dir
uses: actions/cache@v3
with:
path: target
key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
echo "LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/12" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/12" >> $GITHUB_ENV
#- name: Cache rust repository
## We only clone the rust repository for rustc tests
@ -89,7 +69,7 @@ jobs:
./y.sh prepare --only-libcore --libgccjit12-patches
./y.sh build --no-default-features --sysroot-panic-abort
cargo test --no-default-features
./clean_all.sh
./y.sh clean all
- name: Prepare dependencies
run: |
@ -97,19 +77,12 @@ jobs:
git config --global user.name "User"
./y.sh prepare --libgccjit12-patches
# Compile is a separate step, as the actions-rs/cargo action supports error annotations
- name: Compile
uses: actions-rs/cargo@v1.0.3
with:
command: build
args: --release
- name: Add more failing tests for GCC 12
run: cat failing-ui-tests12.txt >> failing-ui-tests.txt
run: cat tests/failing-ui-tests12.txt >> tests/failing-ui-tests.txt
- name: Add more failing tests because the sysroot is not compiled with LTO
run: cat failing-non-lto-tests.txt >> failing-ui-tests.txt
run: cat tests/failing-non-lto-tests.txt >> tests/failing-ui-tests.txt
- name: Run tests
run: |
./test.sh --release --clean --build-sysroot ${{ matrix.commands }} --no-default-features
./y.sh test --release --clean --build-sysroot ${{ matrix.commands }} --no-default-features

View File

@ -36,21 +36,22 @@ jobs:
]
steps:
- uses: actions/checkout@v3
# `rustup show` installs from rust-toolchain.toml
- name: Setup rust toolchain
run: rustup show
- name: Setup rust cache
uses: Swatinem/rust-cache@v2
- name: Install packages
run: |
sudo apt-get update
sudo apt-get install qemu qemu-user-static
- uses: actions/checkout@v3
- name: Download GCC artifact
uses: dawidd6/action-download-artifact@v2
with:
workflow: m68k.yml
name: gcc-m68k-13
repo: cross-cg-gcc-tools/cross-gcc
branch: master
event: push
- name: Download artifact
run: curl -LO https://github.com/cross-cg-gcc-tools/cross-gcc/releases/latest/download/gcc-m68k-13.deb
- name: Download VM artifact
uses: dawidd6/action-download-artifact@v2
@ -64,37 +65,13 @@ jobs:
- name: Setup path to libgccjit
run: |
sudo dpkg -i gcc-m68k-13.deb
echo /usr/lib/ > gcc_path
echo 'gcc-path = "/usr/lib/"' > config.toml
- name: Set env
run: |
echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV
- name: Cache cargo installed crates
uses: actions/cache@v3
with:
path: ~/.cargo/bin
key: cargo-installed-crates2-ubuntu-latest
#- name: Cache cargo registry
#uses: actions/cache@v3
#with:
#path: ~/.cargo/registry
#key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
#- name: Cache cargo index
#uses: actions/cache@v3
#with:
#path: ~/.cargo/git
#key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo target dir
uses: actions/cache@v3
with:
path: target
key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
echo "LIBRARY_PATH=/usr/lib" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=/usr/lib" >> $GITHUB_ENV
#- name: Cache rust repository
## We only clone the rust repository for rustc tests
@ -114,11 +91,9 @@ jobs:
- name: Build
run: |
./y.sh prepare --only-libcore --cross
# TODO: remove --features master when it is back to the default.
./y.sh build --target-triple m68k-unknown-linux-gnu --features master
# TODO: remove --features master when it is back to the default.
CG_GCC_TEST_TARGET=m68k-unknown-linux-gnu cargo test --features master
./clean_all.sh
./y.sh build --target-triple m68k-unknown-linux-gnu
CG_GCC_TEST_TARGET=m68k-unknown-linux-gnu cargo test
./y.sh clean all
- name: Prepare dependencies
run: |
@ -126,17 +101,9 @@ jobs:
git config --global user.name "User"
./y.sh prepare --cross
# Compile is a separate step, as the actions-rs/cargo action supports error annotations
- name: Compile
uses: actions-rs/cargo@v1.0.3
with:
command: build
args: --release
- name: Add more failing tests because the sysroot is not compiled with LTO
run: cat failing-non-lto-tests.txt >> failing-ui-tests.txt
run: cat tests/failing-non-lto-tests.txt >> tests/failing-ui-tests.txt
- name: Run tests
run: |
# TODO: remove --features master when it is back to the default.
./test.sh --release --features master --clean --build-sysroot ${{ matrix.commands }}
./y.sh test --release --clean --build-sysroot ${{ matrix.commands }}

View File

@ -26,63 +26,36 @@ jobs:
steps:
- uses: actions/checkout@v3
# `rustup show` installs from rust-toolchain.toml
- name: Setup rust toolchain
run: rustup show
- name: Setup rust cache
uses: Swatinem/rust-cache@v2
- name: Install packages
run: sudo apt-get install ninja-build ripgrep
- name: Download artifact
uses: dawidd6/action-download-artifact@v2
with:
workflow: main.yml
name: gcc-13
path: gcc-13
repo: antoyo/gcc
branch: "master"
event: push
search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts.
run: curl -LO https://github.com/antoyo/gcc/releases/latest/download/gcc-13.deb
- name: Setup path to libgccjit
run: |
sudo dpkg --force-overwrite -i gcc-13/gcc-13.deb
echo /usr/lib/ > gcc_path
sudo dpkg --force-overwrite -i gcc-13.deb
echo 'gcc-path = "/usr/lib/"' > config.toml
- name: Set env
run: |
echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV
- name: Cache cargo installed crates
uses: actions/cache@v3
with:
path: ~/.cargo/bin
key: cargo-installed-crates2-ubuntu-latest
- name: Cache cargo registry
uses: actions/cache@v3
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v3
with:
path: ~/.cargo/git
key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo target dir
uses: actions/cache@v3
with:
path: target
key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
echo "LIBRARY_PATH=/usr/lib" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=/usr/lib" >> $GITHUB_ENV
- name: Build
run: |
./y.sh prepare --only-libcore
# TODO: remove --features master when it is back to the default.
EMBED_LTO_BITCODE=1 ./y.sh build --release --release-sysroot --features master
# TODO: remove --features master when it is back to the default.
cargo test --features master
./clean_all.sh
EMBED_LTO_BITCODE=1 ./y.sh build --release --release-sysroot
cargo test
./y.sh clean all
- name: Prepare dependencies
run: |
@ -92,17 +65,9 @@ jobs:
# FIXME(antoyo): we cannot enable LTO for stdarch tests currently because of some failing LTO tests using proc-macros.
echo -n 'lto = "fat"' >> build_sysroot/Cargo.toml
# Compile is a separate step, as the actions-rs/cargo action supports error annotations
- name: Compile
uses: actions-rs/cargo@v1.0.3
with:
command: build
args: --release
- name: Add more failing tests because of undefined symbol errors (FIXME)
run: cat failing-lto-tests.txt >> failing-ui-tests.txt
run: cat tests/failing-lto-tests.txt >> tests/failing-ui-tests.txt
- name: Run tests
run: |
# TODO: remove --features master when it is back to the default.
EMBED_LTO_BITCODE=1 ./test.sh --release --clean --release-sysroot --build-sysroot ${{ matrix.commands }} --features master
EMBED_LTO_BITCODE=1 ./y.sh test --release --clean --release-sysroot --build-sysroot ${{ matrix.commands }}

View File

@ -26,6 +26,13 @@ jobs:
steps:
- uses: actions/checkout@v3
# `rustup show` installs from rust-toolchain.toml
- name: Setup rust toolchain
run: rustup show
- name: Setup rust cache
uses: Swatinem/rust-cache@v2
- name: Install packages
run: sudo apt-get install ninja-build ripgrep
@ -34,73 +41,39 @@ jobs:
run: |
mkdir intel-sde
cd intel-sde
dir=sde-external-9.14.0-2022-10-25-lin
dir=sde-external-9.33.0-2024-01-07-lin
file=$dir.tar.xz
wget https://downloadmirror.intel.com/751535/$file
wget https://downloadmirror.intel.com/813591/$file
tar xvf $file
sudo mkdir /usr/share/intel-sde
sudo cp -r $dir/* /usr/share/intel-sde
sudo ln -s /usr/share/intel-sde/sde /usr/bin/sde
sudo ln -s /usr/share/intel-sde/sde64 /usr/bin/sde64
- name: Download artifact
uses: dawidd6/action-download-artifact@v2
with:
workflow: main.yml
name: gcc-13
path: gcc-13
repo: antoyo/gcc
branch: "master"
event: push
search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts.
- name: Setup path to libgccjit
run: |
sudo dpkg --force-overwrite -i gcc-13/gcc-13.deb
echo /usr/lib/ > gcc_path
- name: Set env
run: |
echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV
- name: Cache cargo installed crates
uses: actions/cache@v3
with:
path: ~/.cargo/bin
key: cargo-installed-crates2-ubuntu-latest
- name: Cache cargo registry
uses: actions/cache@v3
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v3
with:
path: ~/.cargo/git
key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo target dir
uses: actions/cache@v3
with:
path: target
key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
echo 'download-gccjit = true' > config.toml
- name: Build
run: |
./y.sh prepare --only-libcore
# TODO: remove `--features master` when it is back to the default.
./y.sh build --release --release-sysroot --features master
# TODO: remove --features master when it is back to the default.
cargo test --features master
./y.sh build --release --release-sysroot
- name: Set env (part 2)
run: |
# Set the `LD_LIBRARY_PATH` and `LIBRARY_PATH` env variables...
echo "LD_LIBRARY_PATH="$(./y.sh info | grep -v Using) >> $GITHUB_ENV
echo "LIBRARY_PATH="$(./y.sh info | grep -v Using) >> $GITHUB_ENV
- name: Build (part 2)
run: |
cargo test
- name: Clean
if: ${{ !matrix.cargo_runner }}
run: |
./clean_all.sh
./y.sh clean all
- name: Prepare dependencies
run: |
@ -108,29 +81,20 @@ jobs:
git config --global user.name "User"
./y.sh prepare
# Compile is a separate step, as the actions-rs/cargo action supports error annotations
- name: Compile
uses: actions-rs/cargo@v1.0.3
with:
command: build
# TODO: remove `--features master` when it is back to the default.
args: --release --features master
- name: Run tests
if: ${{ !matrix.cargo_runner }}
run: |
# TODO: remove `--features master` when it is back to the default.
./test.sh --release --clean --release-sysroot --build-sysroot --mini-tests --std-tests --test-libcore --features master
./y.sh test --release --clean --release-sysroot --build-sysroot --mini-tests --std-tests --test-libcore
- name: Run stdarch tests
if: ${{ !matrix.cargo_runner }}
run: |
cd build_sysroot/sysroot_src/library/stdarch/
CHANNEL=release TARGET=x86_64-unknown-linux-gnu CG_RUSTFLAGS="-Ainternal_features" ../../../../cargo.sh test
CHANNEL=release TARGET=x86_64-unknown-linux-gnu CG_RUSTFLAGS="-Ainternal_features" ../../../../y.sh cargo test
- name: Run stdarch tests
if: ${{ matrix.cargo_runner }}
run: |
cd build_sysroot/sysroot_src/library/stdarch/
# FIXME: these tests fail when the sysroot is compiled with LTO because of a missing symbol in proc-macro.
STDARCH_TEST_EVERYTHING=1 CHANNEL=release CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER="${{ matrix.cargo_runner }}" TARGET=x86_64-unknown-linux-gnu CG_RUSTFLAGS="-Ainternal_features" ../../../../cargo.sh test -- --skip rtm --skip tbm --skip sse4a
STDARCH_TEST_EVERYTHING=1 CHANNEL=release CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER="${{ matrix.cargo_runner }}" TARGET=x86_64-unknown-linux-gnu CG_RUSTFLAGS="-Ainternal_features" ../../../../y.sh cargo test -- --skip rtm --skip tbm --skip sse4a

View File

@ -10,15 +10,11 @@ perf.data.old
/build_sysroot/sysroot_src
/build_sysroot/Cargo.lock
/build_sysroot/test_target/Cargo.lock
/rust
/simple-raytracer
/regex
/rand
gimple*
*asm
res
test-backend
gcc_path
projects
benchmarks
tools/llvm-project
tools/llvmint
@ -26,3 +22,5 @@ tools/llvmint-2
# The `llvm` folder is generated by the `tools/generate_intrinsics.py` script to update intrinsics.
llvm
build_system/target
config.toml
build

View File

@ -8,3 +8,4 @@
!*gimple*
!*asm*
!.github
!config.toml

View File

@ -1 +1 @@
disable_all_formatting = true
use_small_heuristics = "Max"

View File

@ -23,6 +23,12 @@ version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
[[package]]
name = "boml"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85fdb93f04c73bff54305fa437ffea5449c41edcaadfe882f35836206b166ac5"
[[package]]
name = "cc"
version = "1.0.79"
@ -64,9 +70,9 @@ checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764"
[[package]]
name = "fm"
version = "0.1.4"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68fda3cff2cce84c19e5dfa5179a4b35d2c0f18b893f108002b8a6a54984acca"
checksum = "21bcf4db620a804cf7e9d84fbcb5d4ac83a8c43396203b2507d62ea31814dfd4"
dependencies = [
"regex",
]
@ -74,7 +80,7 @@ dependencies = [
[[package]]
name = "gccjit"
version = "1.0.0"
source = "git+https://github.com/antoyo/gccjit.rs#6e290f25b1d1edab5ae9ace486fd2dc8c08d6421"
source = "git+https://github.com/antoyo/gccjit.rs#9f8f67edc006d543b17529a001803ffece48349e"
dependencies = [
"gccjit_sys",
]
@ -82,7 +88,7 @@ dependencies = [
[[package]]
name = "gccjit_sys"
version = "0.0.1"
source = "git+https://github.com/antoyo/gccjit.rs#6e290f25b1d1edab5ae9ace486fd2dc8c08d6421"
source = "git+https://github.com/antoyo/gccjit.rs#9f8f67edc006d543b17529a001803ffece48349e"
dependencies = [
"libc",
]
@ -104,9 +110,9 @@ checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
[[package]]
name = "lang_tester"
version = "0.3.13"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96bd995a092cac79868250589869b5a5d656b02a02bd74c8ebdc566dc7203090"
checksum = "9af8149dbb3ed7d8e529fcb141fe033b1c26ed54cbffc6762d3a86483c485d23"
dependencies = [
"fm",
"getopts",
@ -185,6 +191,7 @@ checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78"
name = "rustc_codegen_gcc"
version = "0.1.0"
dependencies = [
"boml",
"gccjit",
"lang_tester",
"object",

View File

@ -19,6 +19,7 @@ harness = false
[features]
master = ["gccjit/master"]
default = ["master"]
[dependencies]
gccjit = { git = "https://github.com/antoyo/gccjit.rs" }
@ -35,8 +36,9 @@ smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
tempfile = "3.7.1"
[dev-dependencies]
lang_tester = "0.3.9"
lang_tester = "0.8.0"
tempfile = "3.1.0"
boml = "0.3.1"
[profile.dev]
# By compiling dependencies with optimizations, performing tests gets much faster.
@ -55,3 +57,6 @@ debug = false
[profile.release.build-override]
opt-level = 0
debug = false
[package.metadata.rust-analyzer]
rustc_private = true

View File

@ -17,6 +17,18 @@ A secondary goal is to check if using the gcc backend will provide any run-time
**This requires a patched libgccjit in order to work.
You need to use my [fork of gcc](https://github.com/antoyo/gcc) which already includes these patches.**
```bash
$ cp config.example.toml config.toml
```
If don't need to test GCC patches you wrote in our GCC fork, then the default configuration should
be all you need. You can update the `rustc_codegen_gcc` without worrying about GCC.
### Building with your own GCC version
If you wrote a patch for GCC and want to test it without this backend, you will need
to do a few more things.
To build it (most of these instructions come from [here](https://gcc.gnu.org/onlinedocs/jit/internals/index.html), so don't hesitate to take a look there if you encounter an issue):
```bash
@ -49,23 +61,32 @@ $ make check-jit
$ make check-jit RUNTESTFLAGS="-v -v -v jit.exp=jit.dg/test-asm.cc"
```
**Put the path to your custom build of libgccjit in the file `gcc_path`.**
**Put the path to your custom build of libgccjit in the file `config.toml`.**
You now need to set the `gcc-path` value in `config.toml` with the result of this command:
```bash
$ dirname $(readlink -f `find . -name libgccjit.so`) > gcc_path
$ dirname $(readlink -f `find . -name libgccjit.so`)
```
and to comment the `download-gccjit` setting:
```toml
gcc-path = "[MY PATH]"
# download-gccjit = true
```
Then you can run commands like this:
```bash
$ ./y.sh prepare # download and patch sysroot src and install hyperfine for benchmarking
$ LIBRARY_PATH=$(cat gcc_path) LD_LIBRARY_PATH=$(cat gcc_path) ./y.sh build --release
$ ./y.sh build --release
```
To run the tests:
```bash
$ ./test.sh --release
$ ./y.sh test --release
```
## Usage
@ -79,10 +100,10 @@ export CG_GCCJIT_DIR=[the full path to rustc_codegen_gcc]
### Cargo
```bash
$ CHANNEL="release" $CG_GCCJIT_DIR/cargo.sh run
$ CHANNEL="release" $CG_GCCJIT_DIR/y.sh cargo run
```
If you compiled cg_gccjit in debug mode (aka you didn't pass `--release` to `./test.sh`) you should use `CHANNEL="debug"` instead or omit `CHANNEL="release"` completely.
If you compiled cg_gccjit in debug mode (aka you didn't pass `--release` to `./y.sh test`) you should use `CHANNEL="debug"` instead or omit `CHANNEL="release"` completely.
### LTO
@ -100,7 +121,7 @@ error: failed to copy bitcode to object file: No such file or directory (os erro
> You should prefer using the Cargo method.
```bash
$ LIBRARY_PATH=$(cat gcc_path) LD_LIBRARY_PATH=$(cat gcc_path) rustc +$(cat $CG_GCCJIT_DIR/rust-toolchain | grep 'channel' | cut -d '=' -f 2 | sed 's/"//g' | sed 's/ //g') -Cpanic=abort -Zcodegen-backend=$CG_GCCJIT_DIR/target/release/librustc_codegen_gcc.so --sysroot $CG_GCCJIT_DIR/build_sysroot/sysroot my_crate.rs
$ LIBRARY_PATH="[gcc-path value]" LD_LIBRARY_PATH="[gcc-path value]" rustc +$(cat $CG_GCCJIT_DIR/rust-toolchain | grep 'channel' | cut -d '=' -f 2 | sed 's/"//g' | sed 's/ //g') -Cpanic=abort -Zcodegen-backend=$CG_GCCJIT_DIR/target/release/librustc_codegen_gcc.so --sysroot $CG_GCCJIT_DIR/build_sysroot/sysroot my_crate.rs
```
## Env vars
@ -118,221 +139,19 @@ $ LIBRARY_PATH=$(cat gcc_path) LD_LIBRARY_PATH=$(cat gcc_path) rustc +$(cat $CG_
<dd>Dump a C-like representation to /tmp/gccjit_dumps and enable debug info in order to debug this C-like representation.</dd>
</dl>
## Extra documentation
More specific documentation is available in the [`doc`](./doc) folder:
* [Common errors](./doc/errors.md)
* [Debugging GCC LTO](./doc/debugging-gcc-lto.md)
* [Debugging libgccjit](./doc/debugging-libgccjit.md)
* [Git subtree sync](./doc/subtree.md)
* [List of useful commands](./doc/tips.md)
* [Send a patch to GCC](./doc/sending-gcc-patch.md)
## Licensing
While this crate is licensed under a dual Apache/MIT license, it links to `libgccjit` which is under the GPLv3+ and thus, the resulting toolchain (rustc + GCC codegen) will need to be released under the GPL license.
However, programs compiled with `rustc_codegen_gcc` do not need to be released under a GPL license.
## Debugging
Sometimes, libgccjit will crash and output an error like this:
```
during RTL pass: expand
libgccjit.so: error: in expmed_mode_index, at expmed.h:249
0x7f0da2e61a35 expmed_mode_index
../../../gcc/gcc/expmed.h:249
0x7f0da2e61aa4 expmed_op_cost_ptr
../../../gcc/gcc/expmed.h:271
0x7f0da2e620dc sdiv_cost_ptr
../../../gcc/gcc/expmed.h:540
0x7f0da2e62129 sdiv_cost
../../../gcc/gcc/expmed.h:558
0x7f0da2e73c12 expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int)
../../../gcc/gcc/expmed.c:4335
0x7f0da2ea1423 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier)
../../../gcc/gcc/expr.c:9240
0x7f0da2cd1a1e expand_gimple_stmt_1
../../../gcc/gcc/cfgexpand.c:3796
0x7f0da2cd1c30 expand_gimple_stmt
../../../gcc/gcc/cfgexpand.c:3857
0x7f0da2cd90a9 expand_gimple_basic_block
../../../gcc/gcc/cfgexpand.c:5898
0x7f0da2cdade8 execute
../../../gcc/gcc/cfgexpand.c:6582
```
To see the code which causes this error, call the following function:
```c
gcc_jit_context_dump_to_file(ctxt, "/tmp/output.c", 1 /* update_locations */)
```
This will create a C-like file and add the locations into the IR pointing to this C file.
Then, rerun the program and it will output the location in the second line:
```
libgccjit.so: /tmp/something.c:61322:0: error: in expmed_mode_index, at expmed.h:249
```
Or add a breakpoint to `add_error` in gdb and print the line number using:
```
p loc->m_line
p loc->m_filename->m_buffer
```
To print a debug representation of a tree:
```c
debug_tree(expr);
```
(defined in print-tree.h)
To print a debug reprensentation of a gimple struct:
```c
debug_gimple_stmt(gimple_struct)
```
To get the `rustc` command to run in `gdb`, add the `--verbose` flag to `cargo build`.
To have the correct file paths in `gdb` instead of `/usr/src/debug/gcc/libstdc++-v3/libsupc++/eh_personality.cc`:
Maybe by calling the following at the beginning of gdb:
```
set substitute-path /usr/src/debug/gcc /path/to/gcc-repo/gcc
```
TODO(antoyo): but that's not what I remember I was doing.
### `failed to build archive` error
When you get this error:
```
error: failed to build archive: failed to open object file: No such file or directory (os error 2)
```
That can be caused by the fact that you try to compile with `lto = "fat"`, but you didn't compile the sysroot with LTO.
(Not sure if that's the reason since I cannot reproduce anymore. Maybe it happened when forgetting setting `FAT_LTO`.)
### ld: cannot find crtbegin.o
When compiling an executable with libgccijt, if setting the `*LIBRARY_PATH` variables to the install directory, you will get the following errors:
```
ld: cannot find crtbegin.o: No such file or directory
ld: cannot find -lgcc: No such file or directory
ld: cannot find -lgcc: No such file or directory
libgccjit.so: error: error invoking gcc driver
```
To fix this, set the variables to `gcc-build/build/gcc`.
### How to debug GCC LTO
Run do the command with `-v -save-temps` and then extract the `lto1` line from the output and run that under the debugger.
### How to send arguments to the GCC linker
```
CG_RUSTFLAGS="-Clink-args=-save-temps -v" ../cargo.sh build
```
### How to see the personality functions in the asm dump
```
CG_RUSTFLAGS="-Clink-arg=-save-temps -v -Clink-arg=-dA" ../cargo.sh build
```
### How to see the LLVM IR for a sysroot crate
```
cargo build -v --target x86_64-unknown-linux-gnu -Zbuild-std
# Take the command from the output and add --emit=llvm-ir
```
### To prevent the linker from unmangling symbols
Run with:
```
COLLECT_NO_DEMANGLE=1
```
### How to use a custom-build rustc
* Build the stage2 compiler (`rustup toolchain link debug-current build/x86_64-unknown-linux-gnu/stage2`).
* Clean and rebuild the codegen with `debug-current` in the file `rust-toolchain`.
### How to install a forked git-subtree
Using git-subtree with `rustc` requires a patched git to make it work.
The PR that is needed is [here](https://github.com/gitgitgadget/git/pull/493).
Use the following instructions to install it:
```bash
git clone git@github.com:tqc/git.git
cd git
git checkout tqc/subtree
make
make install
cd contrib/subtree
make
cp git-subtree ~/bin
```
Then, do a sync with this command:
```bash
PATH="$HOME/bin:$PATH" ~/bin/git-subtree push -P compiler/rustc_codegen_gcc/ ../rustc_codegen_gcc/ sync_branch_name
cd ../rustc_codegen_gcc
git checkout master
git pull
git checkout sync_branch_name
git merge master
```
To send the changes to the rust repo:
```bash
cd ../rust
git pull origin master
git checkout -b subtree-update_cg_gcc_YYYY-MM-DD
PATH="$HOME/bin:$PATH" ~/bin/git-subtree pull --prefix=compiler/rustc_codegen_gcc/ https://github.com/rust-lang/rustc_codegen_gcc.git master
git push
```
TODO: write a script that does the above.
https://rust-lang.zulipchat.com/#narrow/stream/301329-t-devtools/topic/subtree.20madness/near/258877725
### How to use [mem-trace](https://github.com/antoyo/mem-trace)
`rustc` needs to be built without `jemalloc` so that `mem-trace` can overload `malloc` since `jemalloc` is linked statically, so a `LD_PRELOAD`-ed library won't a chance to intercept the calls to `malloc`.
### How to generate GIMPLE
If you need to check what gccjit is generating (GIMPLE), then take a look at how to
generate it in [gimple.md](./doc/gimple.md).
### How to build a cross-compiling libgccjit
#### Building libgccjit
* Follow the instructions on [this repo](https://github.com/cross-cg-gcc-tools/cross-gcc).
#### Configuring rustc_codegen_gcc
* Run `./y.sh prepare --cross` so that the sysroot is patched for the cross-compiling case.
* Set the path to the cross-compiling libgccjit in `gcc_path`.
* Make sure you have the linker for your target (for instance `m68k-unknown-linux-gnu-gcc`) in your `$PATH`. Currently, the linker name is hardcoded as being `$TARGET-gcc`. Specify the target when building the sysroot: `./y.sh build --target-triple m68k-unknown-linux-gnu`.
* Build your project by specifying the target: `OVERWRITE_TARGET_TRIPLE=m68k-unknown-linux-gnu ../cargo.sh build --target m68k-unknown-linux-gnu`.
If the target is not yet supported by the Rust compiler, create a [target specification file](https://docs.rust-embedded.org/embedonomicon/custom-target.html) (note that the `arch` specified in this file must be supported by the rust compiler).
Then, you can use it the following way:
* Add the target specification file using `--target` as an **absolute** path to build the sysroot: `./y.sh build --target-triple m68k-unknown-linux-gnu --target $(pwd)/m68k-unknown-linux-gnu.json`
* Build your project by specifying the target specification file: `OVERWRITE_TARGET_TRIPLE=m68k-unknown-linux-gnu ../cargo.sh build --target path/to/m68k-unknown-linux-gnu.json`.
If you get the following error:
```
/usr/bin/ld: unrecognised emulation mode: m68kelf
```
Make sure you set `gcc_path` to the install directory.

View File

@ -0,0 +1,6 @@
// TODO: remove this file and deps/libLLVM-18-rust-1.78.0-nightly.so when
// https://github.com/rust-lang/rust/pull/121967 is merged.
fn main() {
println!("cargo:rerun-if-changed=deps/libLLVM-18-rust-1.78.0-nightly.so");
println!("cargo:rustc-link-search=deps");
}

View File

@ -1,34 +0,0 @@
#!/usr/bin/env bash
# Requires the CHANNEL env var to be set to `debug` or `release.`
set -e
cd $(dirname "$0")
pushd ../
source ./config.sh
popd
# Cleanup for previous run
# v Clean target dir except for build scripts and incremental cache
rm -r target/*/{debug,release}/{build,deps,examples,libsysroot*,native} 2>/dev/null || true
rm Cargo.lock test_target/Cargo.lock 2>/dev/null || true
rm -r sysroot/ 2>/dev/null || true
# Build libs
export RUSTFLAGS="$RUSTFLAGS -Z force-unstable-if-unmarked"
if [[ "$1" == "--release" ]]; then
sysroot_channel='release'
RUSTFLAGS="$RUSTFLAGS -Zmir-opt-level=3" cargo build --target $TARGET_TRIPLE --release
else
sysroot_channel='debug'
cargo build --target $TARGET_TRIPLE
fi
# Copy files to sysroot
mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
# Copy the source files to the sysroot (Rust for Linux needs this).
source_dir=sysroot/lib/rustlib/src/rust
mkdir -p $source_dir
cp -r sysroot_src/library/ $source_dir

View File

@ -2,6 +2,15 @@
# It is not intended for manual editing.
version = 3
[[package]]
name = "boml"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85fdb93f04c73bff54305fa437ffea5449c41edcaadfe882f35836206b166ac5"
[[package]]
name = "y"
version = "0.1.0"
dependencies = [
"boml",
]

View File

@ -3,6 +3,9 @@ name = "y"
version = "0.1.0"
edition = "2021"
[dependencies]
boml = "0.3.1"
[[bin]]
name = "y"
path = "src/main.rs"

View File

@ -1,7 +1,5 @@
use crate::config::{set_config, ConfigInfo};
use crate::utils::{
get_gcc_path, run_command, run_command_with_output_and_env, walk_dir,
};
use crate::config::{Channel, ConfigInfo};
use crate::utils::{run_command, run_command_with_output_and_env, walk_dir};
use std::collections::HashMap;
use std::ffi::OsStr;
use std::fs;
@ -9,33 +7,18 @@ use std::path::Path;
#[derive(Default)]
struct BuildArg {
codegen_release_channel: bool,
sysroot_release_channel: bool,
sysroot_panic_abort: bool,
flags: Vec<String>,
gcc_path: String,
config_info: ConfigInfo,
}
impl BuildArg {
fn new() -> Result<Option<Self>, String> {
let gcc_path = get_gcc_path()?;
let mut build_arg = Self {
gcc_path,
..Default::default()
};
let mut build_arg = Self::default();
// We skip binary name and the `build` command.
let mut args = std::env::args().skip(2);
while let Some(arg) = args.next() {
match arg.as_str() {
"--release" => build_arg.codegen_release_channel = true,
"--release-sysroot" => build_arg.sysroot_release_channel = true,
"--no-default-features" => {
build_arg.flags.push("--no-default-features".to_string());
}
"--sysroot-panic-abort" => {
build_arg.sysroot_panic_abort = true;
},
"--features" => {
if let Some(arg) = args.next() {
build_arg.flags.push("--features".to_string());
@ -50,25 +33,11 @@ impl BuildArg {
Self::usage();
return Ok(None);
}
"--target-triple" => {
if args.next().is_some() {
// Handled in config.rs.
} else {
return Err(
"Expected a value after `--target-triple`, found nothing".to_string()
);
arg => {
if !build_arg.config_info.parse_argument(arg, &mut args)? {
return Err(format!("Unknown argument `{}`", arg));
}
}
"--target" => {
if args.next().is_some() {
// Handled in config.rs.
} else {
return Err(
"Expected a value after `--target`, found nothing".to_string()
);
}
}
arg => return Err(format!("Unknown argument `{}`", arg)),
}
}
Ok(Some(build_arg))
@ -79,29 +48,19 @@ impl BuildArg {
r#"
`build` command help:
--release : Build codegen in release mode
--release-sysroot : Build sysroot in release mode
--sysroot-panic-abort : Build the sysroot without unwinding support.
--no-default-features : Add `--no-default-features` flag
--features [arg] : Add a new feature [arg]
--target-triple [arg] : Set the target triple to [arg]
--help : Show this help
"#
)
--features [arg] : Add a new feature [arg]"#
);
ConfigInfo::show_usage();
println!(" --help : Show this help");
}
}
fn build_sysroot(
env: &mut HashMap<String, String>,
args: &BuildArg,
config: &ConfigInfo,
) -> Result<(), String> {
std::env::set_current_dir("build_sysroot")
.map_err(|error| format!("Failed to go to `build_sysroot` directory: {:?}", error))?;
pub fn build_sysroot(env: &HashMap<String, String>, config: &ConfigInfo) -> Result<(), String> {
let start_dir = Path::new("build_sysroot");
// Cleanup for previous run
// Clean target dir except for build scripts and incremental cache
let _ = walk_dir(
"target",
start_dir.join("target"),
|dir: &Path| {
for top in &["debug", "release"] {
let _ = fs::remove_dir_all(dir.join(top).join("build"));
@ -138,92 +97,117 @@ fn build_sysroot(
|_| Ok(()),
);
let _ = fs::remove_file("Cargo.lock");
let _ = fs::remove_file("test_target/Cargo.lock");
let _ = fs::remove_dir_all("sysroot");
let _ = fs::remove_file(start_dir.join("Cargo.lock"));
let _ = fs::remove_file(start_dir.join("test_target/Cargo.lock"));
let _ = fs::remove_dir_all(start_dir.join("sysroot"));
// Builds libs
let mut rustflags = env
.get("RUSTFLAGS")
.cloned()
.unwrap_or_default();
if args.sysroot_panic_abort {
let mut rustflags = env.get("RUSTFLAGS").cloned().unwrap_or_default();
if config.sysroot_panic_abort {
rustflags.push_str(" -Cpanic=abort -Zpanic-abort-tests");
}
env.insert(
"RUSTFLAGS".to_string(),
format!("{} -Zmir-opt-level=3", rustflags),
);
let channel = if args.sysroot_release_channel {
run_command_with_output_and_env(
&[
&"cargo",
&"build",
&"--target",
&config.target,
&"--release",
],
None,
Some(&env),
)?;
rustflags.push_str(" -Z force-unstable-if-unmarked");
if config.no_default_features {
rustflags.push_str(" -Csymbol-mangling-version=v0");
}
let mut env = env.clone();
let mut args: Vec<&dyn AsRef<OsStr>> = vec![&"cargo", &"build", &"--target", &config.target];
if config.no_default_features {
rustflags.push_str(" -Csymbol-mangling-version=v0");
args.push(&"--no-default-features");
}
let channel = if config.sysroot_release_channel {
rustflags.push_str(" -Zmir-opt-level=3");
args.push(&"--release");
"release"
} else {
run_command_with_output_and_env(
&[
&"cargo",
&"build",
&"--target",
&config.target,
],
None,
Some(env),
)?;
"debug"
};
env.insert("RUSTFLAGS".to_string(), rustflags);
run_command_with_output_and_env(&args, Some(start_dir), Some(&env))?;
// Copy files to sysroot
let sysroot_path = format!("sysroot/lib/rustlib/{}/lib/", config.target_triple);
fs::create_dir_all(&sysroot_path)
.map_err(|error| format!("Failed to create directory `{}`: {:?}", sysroot_path, error))?;
let sysroot_path = start_dir.join(format!("sysroot/lib/rustlib/{}/lib/", config.target_triple));
fs::create_dir_all(&sysroot_path).map_err(|error| {
format!(
"Failed to create directory `{}`: {:?}",
sysroot_path.display(),
error
)
})?;
let copier = |dir_to_copy: &Path| {
// FIXME: should not use shell command!
run_command(&[&"cp", &"-r", &dir_to_copy, &sysroot_path], None).map(|_| ())
};
walk_dir(
&format!("target/{}/{}/deps", config.target_triple, channel),
start_dir.join(&format!("target/{}/{}/deps", config.target_triple, channel)),
copier,
copier,
)?;
// Copy the source files to the sysroot (Rust for Linux needs this).
let sysroot_src_path = "sysroot/lib/rustlib/src/rust";
fs::create_dir_all(&sysroot_src_path)
.map_err(|error| format!("Failed to create directory `{}`: {:?}", sysroot_src_path, error))?;
run_command(&[&"cp", &"-r", &"sysroot_src/library/", &sysroot_src_path], None)?;
let sysroot_src_path = start_dir.join("sysroot/lib/rustlib/src/rust");
fs::create_dir_all(&sysroot_src_path).map_err(|error| {
format!(
"Failed to create directory `{}`: {:?}",
sysroot_src_path.display(),
error
)
})?;
run_command(
&[
&"cp",
&"-r",
&start_dir.join("sysroot_src/library/"),
&sysroot_src_path,
],
None,
)?;
Ok(())
}
fn build_codegen(args: &BuildArg) -> Result<(), String> {
fn build_codegen(args: &mut BuildArg) -> Result<(), String> {
let mut env = HashMap::new();
env.insert("LD_LIBRARY_PATH".to_string(), args.gcc_path.clone());
env.insert("LIBRARY_PATH".to_string(), args.gcc_path.clone());
env.insert(
"LD_LIBRARY_PATH".to_string(),
args.config_info.gcc_path.clone(),
);
env.insert(
"LIBRARY_PATH".to_string(),
args.config_info.gcc_path.clone(),
);
if args.config_info.no_default_features {
env.insert(
"RUSTFLAGS".to_string(),
"-Csymbol-mangling-version=v0".to_string(),
);
}
let mut command: Vec<&dyn AsRef<OsStr>> = vec![&"cargo", &"rustc"];
if args.codegen_release_channel {
if args.config_info.channel == Channel::Release {
command.push(&"--release");
env.insert("CHANNEL".to_string(), "release".to_string());
env.insert("CARGO_INCREMENTAL".to_string(), "1".to_string());
} else {
env.insert("CHANNEL".to_string(), "debug".to_string());
}
if args.config_info.no_default_features {
command.push(&"--no-default-features");
}
let flags = args.flags.iter().map(|s| s.as_str()).collect::<Vec<_>>();
for flag in &flags {
command.push(flag);
}
run_command_with_output_and_env(&command, None, Some(&env))?;
let config = set_config(&mut env, &[], Some(&args.gcc_path))?;
args.config_info.setup(&mut env, false)?;
// We voluntarily ignore the error.
let _ = fs::remove_dir_all("target/out");
@ -236,19 +220,16 @@ fn build_codegen(args: &BuildArg) -> Result<(), String> {
})?;
println!("[BUILD] sysroot");
build_sysroot(
&mut env,
args,
&config,
)?;
build_sysroot(&env, &args.config_info)?;
Ok(())
}
pub fn run() -> Result<(), String> {
let args = match BuildArg::new()? {
let mut args = match BuildArg::new()? {
Some(args) => args,
None => return Ok(()),
};
build_codegen(&args)?;
args.config_info.setup_gcc_path()?;
build_codegen(&mut args)?;
Ok(())
}

View File

@ -0,0 +1,114 @@
use crate::config::ConfigInfo;
use crate::utils::{
get_toolchain, run_command_with_output_and_env_no_err, rustc_toolchain_version_info,
rustc_version_info,
};
use std::collections::HashMap;
use std::ffi::OsStr;
use std::path::PathBuf;
fn args() -> Result<Option<Vec<String>>, String> {
// We skip the binary and the "cargo" option.
if let Some("--help") = std::env::args().skip(2).next().as_deref() {
usage();
return Ok(None);
}
let args = std::env::args().skip(2).collect::<Vec<_>>();
if args.is_empty() {
return Err(
"Expected at least one argument for `cargo` subcommand, found none".to_string(),
);
}
Ok(Some(args))
}
fn usage() {
println!(
r#"
`cargo` command help:
[args] : Arguments to be passed to the cargo command
--help : Show this help
"#
)
}
pub fn run() -> Result<(), String> {
let args = match args()? {
Some(a) => a,
None => return Ok(()),
};
// We first need to go to the original location to ensure that the config setup will go as
// expected.
let current_dir = std::env::current_dir()
.and_then(|path| path.canonicalize())
.map_err(|error| format!("Failed to get current directory path: {:?}", error))?;
let current_exe = std::env::current_exe()
.and_then(|path| path.canonicalize())
.map_err(|error| format!("Failed to get current exe path: {:?}", error))?;
let mut parent_dir = current_exe
.components()
.map(|comp| comp.as_os_str())
.collect::<Vec<_>>();
// We run this script from "build_system/target/release/y", so we need to remove these elements.
for to_remove in &["y", "release", "target", "build_system"] {
if parent_dir
.last()
.map(|part| part == to_remove)
.unwrap_or(false)
{
parent_dir.pop();
} else {
return Err(format!(
"Build script not executed from `build_system/target/release/y` (in path {})",
current_exe.display(),
));
}
}
let parent_dir = PathBuf::from(parent_dir.join(&OsStr::new("/")));
std::env::set_current_dir(&parent_dir).map_err(|error| {
format!(
"Failed to go to `{}` folder: {:?}",
parent_dir.display(),
error
)
})?;
let mut env: HashMap<String, String> = std::env::vars().collect();
ConfigInfo::default().setup(&mut env, false)?;
let toolchain = get_toolchain()?;
let toolchain_version = rustc_toolchain_version_info(&toolchain)?;
let default_version = rustc_version_info(None)?;
if toolchain_version != default_version {
println!(
"rustc_codegen_gcc is built for {} but the default rustc version is {}.",
toolchain_version.short, default_version.short,
);
println!("Using {}.", toolchain_version.short);
}
// We go back to the original folder since we now have set up everything we needed.
std::env::set_current_dir(&current_dir).map_err(|error| {
format!(
"Failed to go back to `{}` folder: {:?}",
current_dir.display(),
error
)
})?;
let rustflags = env.get("RUSTFLAGS").cloned().unwrap_or_default();
env.insert("RUSTDOCFLAGS".to_string(), rustflags);
let toolchain = format!("+{}", toolchain);
let mut command: Vec<&dyn AsRef<OsStr>> = vec![&"cargo", &toolchain];
for arg in &args {
command.push(arg);
}
if run_command_with_output_and_env_no_err(&command, None, Some(&env)).is_err() {
std::process::exit(1);
}
Ok(())
}

View File

@ -0,0 +1,82 @@
use crate::utils::{remove_file, run_command};
use std::fs::remove_dir_all;
use std::path::Path;
#[derive(Default)]
enum CleanArg {
/// `clean all`
All,
/// `clean ui-tests`
UiTests,
/// `clean --help`
#[default]
Help,
}
impl CleanArg {
fn new() -> Result<Self, String> {
// We skip the binary and the "clean" option.
for arg in std::env::args().skip(2) {
return match arg.as_str() {
"all" => Ok(Self::All),
"ui-tests" => Ok(Self::UiTests),
"--help" => Ok(Self::Help),
a => Err(format!("Unknown argument `{}`", a)),
};
}
Ok(Self::default())
}
}
fn usage() {
println!(
r#"
`clean` command help:
all : Clean all data
ui-tests : Clean ui tests
--help : Show this help
"#
)
}
fn clean_all() -> Result<(), String> {
let dirs_to_remove = [
"target",
"build_sysroot/sysroot",
"build_sysroot/sysroot_src",
"build_sysroot/target",
];
for dir in dirs_to_remove {
let _ = remove_dir_all(dir);
}
let dirs_to_remove = ["regex", "rand", "simple-raytracer"];
for dir in dirs_to_remove {
let _ = remove_dir_all(Path::new(crate::BUILD_DIR).join(dir));
}
let files_to_remove = ["build_sysroot/Cargo.lock", "perf.data", "perf.data.old"];
for file in files_to_remove {
let _ = remove_file(file);
}
println!("Successfully ran `clean all`");
Ok(())
}
fn clean_ui_tests() -> Result<(), String> {
let path = Path::new(crate::BUILD_DIR).join("rust/build/x86_64-unknown-linux-gnu/test/ui/");
run_command(&[&"find", &path, &"-name", &"stamp", &"-delete"], None)?;
Ok(())
}
pub fn run() -> Result<(), String> {
match CleanArg::new()? {
CleanArg::All => clean_all()?,
CleanArg::UiTests => clean_ui_tests()?,
CleanArg::Help => usage(),
}
Ok(())
}

View File

@ -0,0 +1,79 @@
use crate::config::ConfigInfo;
use crate::utils::{git_clone, run_command_with_output};
use std::path::{Path, PathBuf};
fn show_usage() {
println!(
r#"
`clone-gcc` command help:
--out-path : Location where the GCC repository will be cloned (default: `./gcc`)"#
);
ConfigInfo::show_usage();
println!(" --help : Show this help");
}
#[derive(Default)]
struct Args {
out_path: PathBuf,
config_info: ConfigInfo,
}
impl Args {
fn new() -> Result<Option<Self>, String> {
let mut command_args = Self::default();
let mut out_path = None;
// We skip binary name and the `clone-gcc` command.
let mut args = std::env::args().skip(2);
while let Some(arg) = args.next() {
match arg.as_str() {
"--out-path" => match args.next() {
Some(path) if !path.is_empty() => out_path = Some(path),
_ => {
return Err("Expected an argument after `--out-path`, found nothing".into())
}
},
"--help" => {
show_usage();
return Ok(None);
}
arg => {
if !command_args.config_info.parse_argument(arg, &mut args)? {
return Err(format!("Unknown option {}", arg));
}
}
}
}
command_args.out_path = match out_path {
Some(p) => p.into(),
None => PathBuf::from("./gcc"),
};
return Ok(Some(command_args));
}
}
pub fn run() -> Result<(), String> {
let Some(args) = Args::new()? else {
return Ok(());
};
let result = git_clone("https://github.com/antoyo/gcc", Some(&args.out_path), false)?;
if result.ran_clone {
let gcc_commit = args.config_info.get_gcc_commit()?;
println!("Checking out GCC commit `{}`...", gcc_commit);
run_command_with_output(
&[&"git", &"checkout", &gcc_commit],
Some(Path::new(&result.repo_dir)),
)?;
} else {
println!(
"There is already a GCC folder in `{}`, leaving things as is...",
args.out_path.display()
);
}
Ok(())
}

View File

@ -1,149 +1,551 @@
use crate::utils::{get_gcc_path, get_os_name, get_rustc_host_triple};
use crate::utils::{
create_symlink, get_os_name, run_command_with_output, rustc_version_info, split_args,
};
use std::collections::HashMap;
use std::env as std_env;
use std::ffi::OsStr;
use std::fs;
use std::path::{Path, PathBuf};
use boml::{types::TomlValue, Toml};
#[derive(Default, PartialEq, Eq, Clone, Copy, Debug)]
pub enum Channel {
#[default]
Debug,
Release,
}
impl Channel {
pub fn as_str(self) -> &'static str {
match self {
Self::Debug => "debug",
Self::Release => "release",
}
}
}
fn failed_config_parsing(config_file: &Path, err: &str) -> Result<ConfigFile, String> {
Err(format!(
"Failed to parse `{}`: {}",
config_file.display(),
err
))
}
#[derive(Default)]
pub struct ConfigFile {
gcc_path: Option<String>,
download_gccjit: Option<bool>,
}
impl ConfigFile {
pub fn new(config_file: &Path) -> Result<Self, String> {
let content = fs::read_to_string(config_file).map_err(|_| {
format!(
"Failed to read `{}`. Take a look at `Readme.md` to see how to set up the project",
config_file.display(),
)
})?;
let toml = Toml::parse(&content).map_err(|err| {
format!(
"Error occurred around `{}`: {:?}",
&content[err.start..=err.end],
err.kind
)
})?;
let mut config = Self::default();
for (key, value) in toml.iter() {
match (key, value) {
("gcc-path", TomlValue::String(value)) => {
config.gcc_path = Some(value.as_str().to_string())
}
("gcc-path", _) => {
return failed_config_parsing(config_file, "Expected a string for `gcc-path`")
}
("download-gccjit", TomlValue::Boolean(value)) => {
config.download_gccjit = Some(*value)
}
("download-gccjit", _) => {
return failed_config_parsing(
config_file,
"Expected a boolean for `download-gccjit`",
)
}
_ => return failed_config_parsing(config_file, &format!("Unknown key `{}`", key)),
}
}
match (config.gcc_path.as_mut(), config.download_gccjit) {
(None, None | Some(false)) => {
return failed_config_parsing(
config_file,
"At least one of `gcc-path` or `download-gccjit` value must be set",
)
}
(Some(_), Some(true)) => {
println!(
"WARNING: both `gcc-path` and `download-gccjit` arguments are used, \
ignoring `gcc-path`"
);
}
(Some(gcc_path), _) => {
let path = Path::new(gcc_path);
*gcc_path = path
.canonicalize()
.map_err(|err| {
format!("Failed to get absolute path of `{}`: {:?}", gcc_path, err)
})?
.display()
.to_string();
}
_ => {}
}
Ok(config)
}
}
#[derive(Default, Debug)]
pub struct ConfigInfo {
pub target: String,
pub target_triple: String,
pub host_triple: String,
pub rustc_command: Vec<String>,
pub run_in_vm: bool,
pub cargo_target_dir: String,
pub dylib_ext: String,
pub sysroot_release_channel: bool,
pub channel: Channel,
pub sysroot_panic_abort: bool,
pub cg_backend_path: String,
pub sysroot_path: String,
pub gcc_path: String,
config_file: Option<String>,
// This is used in particular in rust compiler bootstrap because it doesn't run at the root
// of the `cg_gcc` folder, making it complicated for us to get access to local files we need
// like `libgccjit.version` or `config.toml`.
cg_gcc_path: Option<PathBuf>,
// Needed for the `info` command which doesn't want to actually download the lib if needed,
// just to set the `gcc_path` field to display it.
pub no_download: bool,
pub no_default_features: bool,
pub backend: Option<String>,
}
// Returns the beginning for the command line of rustc.
pub fn set_config(
env: &mut HashMap<String, String>,
test_flags: &[String],
gcc_path: Option<&str>,
) -> Result<ConfigInfo, String> {
env.insert("CARGO_INCREMENTAL".to_string(), "0".to_string());
let gcc_path = match gcc_path {
Some(path) => path.to_string(),
None => get_gcc_path()?,
};
env.insert("GCC_PATH".to_string(), gcc_path.clone());
let os_name = get_os_name()?;
let dylib_ext = match os_name.as_str() {
"Linux" => "so",
"Darwin" => "dylib",
os => return Err(format!("unsupported OS `{}`", os)),
};
let host_triple = get_rustc_host_triple()?;
let mut linker = None;
let mut target_triple = host_triple.clone();
let mut target = target_triple.clone();
// We skip binary name and the command.
let mut args = std::env::args().skip(2);
let mut set_target_triple = false;
let mut set_target = false;
while let Some(arg) = args.next() {
match arg.as_str() {
"--target-triple" => {
if let Some(arg) = args.next() {
target_triple = arg;
set_target_triple = true;
} else {
return Err(
"Expected a value after `--target-triple`, found nothing".to_string()
);
}
},
impl ConfigInfo {
/// Returns `true` if the argument was taken into account.
pub fn parse_argument(
&mut self,
arg: &str,
args: &mut impl Iterator<Item = String>,
) -> Result<bool, String> {
match arg {
"--target" => {
if let Some(arg) = args.next() {
target = arg;
set_target = true;
self.target = arg;
} else {
return Err("Expected a value after `--target`, found nothing".to_string());
}
}
"--target-triple" => match args.next() {
Some(arg) if !arg.is_empty() => self.target_triple = arg.to_string(),
_ => {
return Err(
"Expected a value after `--target`, found nothing".to_string()
);
"Expected a value after `--target-triple`, found nothing".to_string()
)
}
},
_ => (),
"--out-dir" => match args.next() {
Some(arg) if !arg.is_empty() => {
self.cargo_target_dir = arg.to_string();
}
_ => return Err("Expected a value after `--out-dir`, found nothing".to_string()),
},
"--config-file" => match args.next() {
Some(arg) if !arg.is_empty() => {
self.config_file = Some(arg.to_string());
}
_ => {
return Err("Expected a value after `--config-file`, found nothing".to_string())
}
},
"--release-sysroot" => self.sysroot_release_channel = true,
"--release" => self.channel = Channel::Release,
"--sysroot-panic-abort" => self.sysroot_panic_abort = true,
"--cg_gcc-path" => match args.next() {
Some(arg) if !arg.is_empty() => {
self.cg_gcc_path = Some(arg.into());
}
_ => {
return Err("Expected a value after `--cg_gcc-path`, found nothing".to_string())
}
},
"--use-backend" => match args.next() {
Some(backend) if !backend.is_empty() => self.backend = Some(backend),
_ => {
return Err(
"Expected an argument after `--use-backend`, found nothing".into()
)
}
},
"--no-default-features" => self.no_default_features = true,
_ => return Ok(false),
}
Ok(true)
}
pub fn rustc_command_vec(&self) -> Vec<&dyn AsRef<OsStr>> {
let mut command: Vec<&dyn AsRef<OsStr>> = Vec::with_capacity(self.rustc_command.len());
for arg in self.rustc_command.iter() {
command.push(arg);
}
command
}
pub fn get_gcc_commit(&self) -> Result<String, String> {
let commit_hash_file = self.compute_path("libgccjit.version");
let content = fs::read_to_string(&commit_hash_file).map_err(|_| {
format!(
"Failed to read `{}`. Take a look at `Readme.md` to see how to set up the project",
commit_hash_file.display(),
)
})?;
let commit = content.trim();
// This is a very simple check to ensure this is not a path. For the rest, it'll just fail
// when trying to download the file so we should be fine.
if commit.contains('/') || commit.contains('\\') {
return Err(format!(
"{}: invalid commit hash `{}`",
commit_hash_file.display(),
commit,
));
}
Ok(commit.to_string())
}
fn download_gccjit_if_needed(&mut self) -> Result<(), String> {
let output_dir = Path::new(crate::BUILD_DIR).join("libgccjit");
let commit = self.get_gcc_commit()?;
let output_dir = output_dir.join(&commit);
if !output_dir.is_dir() {
std::fs::create_dir_all(&output_dir).map_err(|err| {
format!(
"failed to create folder `{}`: {:?}",
output_dir.display(),
err,
)
})?;
}
let output_dir = output_dir.canonicalize().map_err(|err| {
format!(
"Failed to get absolute path of `{}`: {:?}",
output_dir.display(),
err
)
})?;
let libgccjit_so_name = "libgccjit.so";
let libgccjit_so = output_dir.join(libgccjit_so_name);
if !libgccjit_so.is_file() && !self.no_download {
// Download time!
let tempfile_name = format!("{}.download", libgccjit_so_name);
let tempfile = output_dir.join(&tempfile_name);
let is_in_ci = std::env::var("GITHUB_ACTIONS").is_ok();
let url = format!(
"https://github.com/antoyo/gcc/releases/download/master-{}/libgccjit.so",
commit,
);
println!("Downloading `{}`...", url);
download_gccjit(url, &output_dir, tempfile_name, !is_in_ci)?;
let libgccjit_so = output_dir.join(libgccjit_so_name);
// If we reach this point, it means the file was correctly downloaded, so let's
// rename it!
std::fs::rename(&tempfile, &libgccjit_so).map_err(|err| {
format!(
"Failed to rename `{}` into `{}`: {:?}",
tempfile.display(),
libgccjit_so.display(),
err,
)
})?;
println!("Downloaded libgccjit.so version {} successfully!", commit);
// We need to create a link named `libgccjit.so.0` because that's what the linker is
// looking for.
create_symlink(
&libgccjit_so,
output_dir.join(&format!("{}.0", libgccjit_so_name)),
)?;
}
self.gcc_path = output_dir.display().to_string();
println!("Using `{}` as path for libgccjit", self.gcc_path);
Ok(())
}
pub fn compute_path<P: AsRef<Path>>(&self, other: P) -> PathBuf {
match self.cg_gcc_path {
Some(ref path) => path.join(other),
None => PathBuf::new().join(other),
}
}
if set_target_triple && !set_target {
target = target_triple.clone();
pub fn setup_gcc_path(&mut self) -> Result<(), String> {
let config_file = match self.config_file.as_deref() {
Some(config_file) => config_file.into(),
None => self.compute_path("config.toml"),
};
let ConfigFile {
gcc_path,
download_gccjit,
} = ConfigFile::new(&config_file)?;
if let Some(true) = download_gccjit {
self.download_gccjit_if_needed()?;
return Ok(());
}
self.gcc_path = match gcc_path {
Some(path) => path,
None => {
return Err(format!(
"missing `gcc-path` value from `{}`",
config_file.display(),
))
}
};
Ok(())
}
if host_triple != target_triple {
linker = Some(format!("-Clinker={}-gcc", target_triple));
}
let current_dir =
std_env::current_dir().map_err(|error| format!("`current_dir` failed: {:?}", error))?;
let channel = if let Some(channel) = env.get("CHANNEL") {
channel.as_str()
} else {
"debug"
};
let cg_backend_path = current_dir
.join("target")
.join(channel)
.join(&format!("librustc_codegen_gcc.{}", dylib_ext));
let sysroot_path = current_dir.join("build_sysroot/sysroot");
let mut rustflags = Vec::new();
if let Some(cg_rustflags) = env.get("CG_RUSTFLAGS") {
rustflags.push(cg_rustflags.clone());
}
if let Some(linker) = linker {
rustflags.push(linker.to_string());
}
rustflags.extend_from_slice(&[
"-Csymbol-mangling-version=v0".to_string(),
"-Cdebuginfo=2".to_string(),
format!("-Zcodegen-backend={}", cg_backend_path.display()),
"--sysroot".to_string(),
sysroot_path.display().to_string(),
]);
pub fn setup(
&mut self,
env: &mut HashMap<String, String>,
use_system_gcc: bool,
) -> Result<(), String> {
env.insert("CARGO_INCREMENTAL".to_string(), "0".to_string());
// Since we don't support ThinLTO, disable LTO completely when not trying to do LTO.
// TODO(antoyo): remove when we can handle ThinLTO.
if !env.contains_key(&"FAT_LTO".to_string()) {
rustflags.push("-Clto=off".to_string());
}
rustflags.extend_from_slice(test_flags);
// FIXME(antoyo): remove once the atomic shim is gone
if os_name == "Darwin" {
rustflags.extend_from_slice(&[
"-Clink-arg=-undefined".to_string(),
"-Clink-arg=dynamic_lookup".to_string(),
if self.gcc_path.is_empty() && !use_system_gcc {
self.setup_gcc_path()?;
}
env.insert("GCC_PATH".to_string(), self.gcc_path.clone());
if self.cargo_target_dir.is_empty() {
match env.get("CARGO_TARGET_DIR").filter(|dir| !dir.is_empty()) {
Some(cargo_target_dir) => self.cargo_target_dir = cargo_target_dir.clone(),
None => self.cargo_target_dir = "target/out".to_string(),
}
}
let os_name = get_os_name()?;
self.dylib_ext = match os_name.as_str() {
"Linux" => "so",
"Darwin" => "dylib",
os => return Err(format!("unsupported OS `{}`", os)),
}
.to_string();
let rustc = match env.get("RUSTC") {
Some(r) if !r.is_empty() => r.to_string(),
_ => "rustc".to_string(),
};
self.host_triple = match rustc_version_info(Some(&rustc))?.host {
Some(host) => host,
None => return Err("no host found".to_string()),
};
if self.target_triple.is_empty() {
if let Some(overwrite) = env.get("OVERWRITE_TARGET_TRIPLE") {
self.target_triple = overwrite.clone();
}
}
if self.target_triple.is_empty() {
self.target_triple = self.host_triple.clone();
}
if self.target.is_empty() && !self.target_triple.is_empty() {
self.target = self.target_triple.clone();
}
let mut linker = None;
if self.host_triple != self.target_triple {
if self.target_triple.is_empty() {
return Err("Unknown non-native platform".to_string());
}
linker = Some(format!("-Clinker={}-gcc", self.target_triple));
self.run_in_vm = true;
}
let current_dir =
std_env::current_dir().map_err(|error| format!("`current_dir` failed: {:?}", error))?;
let channel = if self.channel == Channel::Release {
"release"
} else if let Some(channel) = env.get("CHANNEL") {
channel.as_str()
} else {
"debug"
};
let mut rustflags = Vec::new();
self.cg_backend_path = current_dir
.join("target")
.join(channel)
.join(&format!("librustc_codegen_gcc.{}", self.dylib_ext))
.display()
.to_string();
self.sysroot_path = current_dir
.join("build_sysroot/sysroot")
.display()
.to_string();
if let Some(backend) = &self.backend {
rustflags.push(format!("-Zcodegen-backend={}", backend));
} else {
rustflags.extend_from_slice(&[
"--sysroot".to_string(), self.sysroot_path.clone(),
format!("-Zcodegen-backend={}", self.cg_backend_path),
]);
}
// This environment variable is useful in case we want to change options of rustc commands.
if let Some(cg_rustflags) = env.get("CG_RUSTFLAGS") {
rustflags.extend_from_slice(&split_args(&cg_rustflags)?);
}
if let Some(test_flags) = env.get("TEST_FLAGS") {
rustflags.extend_from_slice(&split_args(&test_flags)?);
}
if let Some(linker) = linker {
rustflags.push(linker.to_string());
}
if self.no_default_features {
rustflags.push("-Csymbol-mangling-version=v0".to_string());
}
rustflags.push("-Cdebuginfo=2".to_string());
// Since we don't support ThinLTO, disable LTO completely when not trying to do LTO.
// TODO(antoyo): remove when we can handle ThinLTO.
if !env.contains_key(&"FAT_LTO".to_string()) {
rustflags.push("-Clto=off".to_string());
}
// FIXME(antoyo): remove once the atomic shim is gone
if os_name == "Darwin" {
rustflags.extend_from_slice(&[
"-Clink-arg=-undefined".to_string(),
"-Clink-arg=dynamic_lookup".to_string(),
]);
}
env.insert("RUSTFLAGS".to_string(), rustflags.join(" "));
// display metadata load errors
env.insert("RUSTC_LOG".to_string(), "warn".to_string());
let sysroot = current_dir.join(&format!(
"build_sysroot/sysroot/lib/rustlib/{}/lib",
self.target_triple,
));
let ld_library_path = format!(
"{target}:{sysroot}:{gcc_path}",
target = self.cargo_target_dir,
sysroot = sysroot.display(),
gcc_path = self.gcc_path,
);
env.insert("LIBRARY_PATH".to_string(), ld_library_path.clone());
env.insert("LD_LIBRARY_PATH".to_string(), ld_library_path.clone());
env.insert("DYLD_LIBRARY_PATH".to_string(), ld_library_path);
// NOTE: To avoid the -fno-inline errors, use /opt/gcc/bin/gcc instead of cc.
// To do so, add a symlink for cc to /opt/gcc/bin/gcc in our PATH.
// Another option would be to add the following Rust flag: -Clinker=/opt/gcc/bin/gcc
let path = std::env::var("PATH").unwrap_or_default();
env.insert(
"PATH".to_string(),
format!(
"/opt/gcc/bin:/opt/m68k-unknown-linux-gnu/bin{}{}",
if path.is_empty() { "" } else { ":" },
path
),
);
self.rustc_command = vec![rustc];
self.rustc_command.extend_from_slice(&rustflags);
self.rustc_command.extend_from_slice(&[
"-L".to_string(),
format!("crate={}", self.cargo_target_dir),
"--out-dir".to_string(),
self.cargo_target_dir.clone(),
]);
if !env.contains_key("RUSTC_LOG") {
env.insert("RUSTC_LOG".to_string(), "warn".to_string());
}
Ok(())
}
env.insert("RUSTFLAGS".to_string(), rustflags.join(" "));
// display metadata load errors
env.insert("RUSTC_LOG".to_string(), "warn".to_string());
let sysroot = current_dir.join(&format!(
"build_sysroot/sysroot/lib/rustlib/{}/lib",
target_triple
));
let ld_library_path = format!(
"{target}:{sysroot}:{gcc_path}",
target = current_dir.join("target/out").display(),
sysroot = sysroot.display(),
);
env.insert("LD_LIBRARY_PATH".to_string(), ld_library_path.clone());
env.insert("DYLD_LIBRARY_PATH".to_string(), ld_library_path);
// NOTE: To avoid the -fno-inline errors, use /opt/gcc/bin/gcc instead of cc.
// To do so, add a symlink for cc to /opt/gcc/bin/gcc in our PATH.
// Another option would be to add the following Rust flag: -Clinker=/opt/gcc/bin/gcc
let path = std::env::var("PATH").unwrap_or_default();
env.insert("PATH".to_string(), format!("/opt/gcc/bin:{}", path));
let mut rustc_command = vec!["rustc".to_string()];
rustc_command.extend_from_slice(&rustflags);
rustc_command.extend_from_slice(&[
"-L".to_string(),
"crate=target/out".to_string(),
"--out-dir".to_string(),
"target/out".to_string(),
]);
Ok(ConfigInfo {
target,
target_triple,
rustc_command,
})
pub fn show_usage() {
println!(
"\
--target-triple [arg] : Set the target triple to [arg]
--target [arg] : Set the target to [arg]
--out-dir : Location where the files will be generated
--release : Build in release mode
--release-sysroot : Build sysroot in release mode
--sysroot-panic-abort : Build the sysroot without unwinding support
--config-file : Location of the config file to be used
--cg_gcc-path : Location of the rustc_codegen_gcc root folder (used
when ran from another directory)
--no-default-features : Add `--no-default-features` flag to cargo commands
--use-backend : Useful only for rustc testsuite"
);
}
}
fn download_gccjit(
url: String,
output_dir: &Path,
tempfile_name: String,
with_progress_bar: bool,
) -> Result<(), String> {
// Try curl. If that fails and we are on windows, fallback to PowerShell.
let mut ret = run_command_with_output(
&[
&"curl",
&"--speed-time",
&"30",
&"--speed-limit",
&"10", // timeout if speed is < 10 bytes/sec for > 30 seconds
&"--connect-timeout",
&"30", // timeout if cannot connect within 30 seconds
&"-o",
&tempfile_name,
&"--retry",
&"3",
&"-SRfL",
if with_progress_bar {
&"--progress-bar"
} else {
&"-s"
},
&url.as_str(),
],
Some(&output_dir),
);
if ret.is_err() && cfg!(windows) {
eprintln!("Fallback to PowerShell");
ret = run_command_with_output(
&[
&"PowerShell.exe",
&"/nologo",
&"-Command",
&"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12;",
&format!(
"(New-Object System.Net.WebClient).DownloadFile('{}', '{}')",
url, tempfile_name,
)
.as_str(),
],
Some(&output_dir),
);
}
ret
}

View File

@ -0,0 +1,19 @@
use crate::config::ConfigInfo;
pub fn run() -> Result<(), String> {
let mut config = ConfigInfo::default();
// We skip binary name and the `info` command.
let mut args = std::env::args().skip(2);
while let Some(arg) = args.next() {
if arg == "--help" {
println!("Display the path where the libgccjit will be located");
return Ok(());
}
config.parse_argument(&arg, &mut args)?;
}
config.no_download = true;
config.setup_gcc_path()?;
println!("{}", config.gcc_path);
Ok(())
}

View File

@ -2,12 +2,18 @@ use std::env;
use std::process;
mod build;
mod cargo;
mod clean;
mod clone_gcc;
mod config;
mod info;
mod prepare;
mod rustc_info;
mod test;
mod utils;
const BUILD_DIR: &str = "build";
macro_rules! arg_error {
($($err:tt)*) => {{
eprintln!($($err)*);
@ -22,17 +28,25 @@ fn usage() {
"\
Available commands for build_system:
prepare : Run prepare command
build : Run build command
test : Run test command
--help : Show this message"
cargo : Run cargo command
clean : Run clean command
prepare : Run prepare command
build : Run build command
test : Run test command
info : Run info command
clone-gcc : Run clone-gcc command
--help : Show this message"
);
}
pub enum Command {
Cargo,
Clean,
CloneGcc,
Prepare,
Build,
Test,
Info,
}
fn main() {
@ -41,9 +55,13 @@ fn main() {
}
let command = match env::args().nth(1).as_deref() {
Some("cargo") => Command::Cargo,
Some("clean") => Command::Clean,
Some("prepare") => Command::Prepare,
Some("build") => Command::Build,
Some("test") => Command::Test,
Some("info") => Command::Info,
Some("clone-gcc") => Command::CloneGcc,
Some("--help") => {
usage();
process::exit(0);
@ -57,11 +75,15 @@ fn main() {
};
if let Err(e) = match command {
Command::Cargo => cargo::run(),
Command::Clean => clean::run(),
Command::Prepare => prepare::run(),
Command::Build => build::run(),
Command::Test => test::run(),
Command::Info => info::run(),
Command::CloneGcc => clone_gcc::run(),
} {
eprintln!("Command failed to run: {e:?}");
eprintln!("Command failed to run: {e}");
process::exit(1);
}
}

View File

@ -1,10 +1,16 @@
use crate::rustc_info::get_rustc_path;
use crate::utils::{cargo_install, git_clone, run_command, run_command_with_output, walk_dir};
use crate::utils::{
cargo_install, git_clone_root_dir, remove_file, run_command, run_command_with_output, walk_dir,
};
use std::fs;
use std::path::Path;
fn prepare_libcore(sysroot_path: &Path, libgccjit12_patches: bool, cross_compile: bool) -> Result<(), String> {
fn prepare_libcore(
sysroot_path: &Path,
libgccjit12_patches: bool,
cross_compile: bool,
) -> Result<(), String> {
let rustc_path = match get_rustc_path() {
Some(path) => path,
None => return Err("`rustc` path not found".to_string()),
@ -88,10 +94,14 @@ fn prepare_libcore(sysroot_path: &Path, libgccjit12_patches: bool, cross_compile
},
)?;
if cross_compile {
walk_dir("cross_patches", |_| Ok(()), |file_path: &Path| {
patches.push(file_path.to_path_buf());
Ok(())
})?;
walk_dir(
"patches/cross_patches",
|_| Ok(()),
|file_path: &Path| {
patches.push(file_path.to_path_buf());
Ok(())
},
)?;
}
if libgccjit12_patches {
walk_dir(
@ -121,6 +131,30 @@ fn prepare_libcore(sysroot_path: &Path, libgccjit12_patches: bool, cross_compile
)?;
}
println!("Successfully prepared libcore for building");
Ok(())
}
// TODO: remove when we can ignore warnings in rustdoc tests.
fn prepare_rand() -> Result<(), String> {
// Apply patch for the rand crate.
let file_path = "patches/crates/0001-Remove-deny-warnings.patch";
let rand_dir = Path::new("build/rand");
println!("[GIT] apply `{}`", file_path);
let path = Path::new("../..").join(file_path);
run_command_with_output(&[&"git", &"apply", &path], Some(rand_dir))?;
run_command_with_output(&[&"git", &"add", &"-A"], Some(rand_dir))?;
run_command_with_output(
&[
&"git",
&"commit",
&"--no-gpg-sign",
&"-m",
&format!("Patch {}", path.display()),
],
Some(rand_dir),
)?;
Ok(())
}
@ -129,8 +163,7 @@ fn build_raytracer(repo_dir: &Path) -> Result<(), String> {
run_command(&[&"cargo", &"build"], Some(repo_dir))?;
let mv_target = repo_dir.join("raytracer_cg_llvm");
if mv_target.is_file() {
std::fs::remove_file(&mv_target)
.map_err(|e| format!("Failed to remove file `{}`: {e:?}", mv_target.display()))?;
remove_file(&mv_target)?;
}
run_command(
&[&"mv", &"target/debug/main", &"raytracer_cg_llvm"],
@ -143,28 +176,13 @@ fn clone_and_setup<F>(repo_url: &str, checkout_commit: &str, extra: Option<F>) -
where
F: Fn(&Path) -> Result<(), String>,
{
let clone_result = git_clone(repo_url, None)?;
let clone_result = git_clone_root_dir(repo_url, &Path::new(crate::BUILD_DIR), false)?;
if !clone_result.ran_clone {
println!("`{}` has already been cloned", clone_result.repo_name);
}
let repo_path = Path::new(&clone_result.repo_name);
let repo_path = Path::new(crate::BUILD_DIR).join(&clone_result.repo_name);
run_command(&[&"git", &"checkout", &"--", &"."], Some(&repo_path))?;
run_command(&[&"git", &"checkout", &checkout_commit], Some(&repo_path))?;
let filter = format!("-{}-", clone_result.repo_name);
walk_dir(
"crate_patches",
|_| Ok(()),
|file_path| {
let patch = file_path.as_os_str().to_str().unwrap();
if patch.contains(&filter) && patch.ends_with(".patch") {
run_command_with_output(
&[&"git", &"am", &file_path.canonicalize().unwrap()],
Some(&repo_path),
)?;
}
Ok(())
},
)?;
if let Some(extra) = extra {
extra(&repo_path)?;
}
@ -210,8 +228,7 @@ impl PrepareArg {
--only-libcore : Only setup libcore and don't clone other repositories
--cross : Apply the patches needed to do cross-compilation
--libgccjit12-patches : Apply patches needed for libgccjit12
--help : Show this help
"#
--help : Show this help"#
)
}
}
@ -230,7 +247,7 @@ pub fn run() -> Result<(), String> {
let to_clone = &[
(
"https://github.com/rust-random/rand.git",
"0f933f9c7176e53b2a3c7952ded484e1783f0bf1",
"1f4507a8e1cf8050e4ceef95eeda8f64645b6719",
None,
),
(
@ -248,6 +265,8 @@ pub fn run() -> Result<(), String> {
for (repo_url, checkout_commit, cb) in to_clone {
clone_and_setup(repo_url, checkout_commit, *cb)?;
}
prepare_rand()?;
}
println!("Successfully ran `prepare`");

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@ use std::collections::HashMap;
use std::ffi::OsStr;
use std::fmt::Debug;
use std::fs;
use std::path::Path;
use std::path::{Path, PathBuf};
use std::process::{Command, ExitStatus, Output};
fn get_command_inner(
@ -29,22 +29,40 @@ fn check_exit_status(
input: &[&dyn AsRef<OsStr>],
cwd: Option<&Path>,
exit_status: ExitStatus,
output: Option<&Output>,
show_err: bool,
) -> Result<(), String> {
if exit_status.success() {
Ok(())
} else {
Err(format!(
"Command `{}`{} exited with status {:?}",
input
.iter()
.map(|s| s.as_ref().to_str().unwrap())
.collect::<Vec<_>>()
.join(" "),
cwd.map(|cwd| format!(" (running in folder `{}`)", cwd.display()))
.unwrap_or_default(),
exit_status.code(),
))
return Ok(());
}
let mut error = format!(
"Command `{}`{} exited with status {:?}",
input
.iter()
.map(|s| s.as_ref().to_str().unwrap())
.collect::<Vec<_>>()
.join(" "),
cwd.map(|cwd| format!(" (running in folder `{}`)", cwd.display()))
.unwrap_or_default(),
exit_status.code()
);
let input = input.iter().map(|i| i.as_ref()).collect::<Vec<&OsStr>>();
if show_err {
eprintln!("Command `{:?}` failed", input);
}
if let Some(output) = output {
let stdout = String::from_utf8_lossy(&output.stdout);
if !stdout.is_empty() {
error.push_str("\n==== STDOUT ====\n");
error.push_str(&*stdout);
}
let stderr = String::from_utf8_lossy(&output.stderr);
if !stderr.is_empty() {
error.push_str("\n==== STDERR ====\n");
error.push_str(&*stderr);
}
}
Err(error)
}
fn command_error<D: Debug>(input: &[&dyn AsRef<OsStr>], cwd: &Option<&Path>, error: D) -> String {
@ -73,7 +91,7 @@ pub fn run_command_with_env(
let output = get_command_inner(input, cwd, env)
.output()
.map_err(|e| command_error(input, &cwd, e))?;
check_exit_status(input, cwd, output.status)?;
check_exit_status(input, cwd, output.status, Some(&output), true)?;
Ok(output)
}
@ -86,7 +104,7 @@ pub fn run_command_with_output(
.map_err(|e| command_error(input, &cwd, e))?
.wait()
.map_err(|e| command_error(input, &cwd, e))?;
check_exit_status(input, cwd, exit_status)?;
check_exit_status(input, cwd, exit_status, None, true)?;
Ok(())
}
@ -100,7 +118,21 @@ pub fn run_command_with_output_and_env(
.map_err(|e| command_error(input, &cwd, e))?
.wait()
.map_err(|e| command_error(input, &cwd, e))?;
check_exit_status(input, cwd, exit_status)?;
check_exit_status(input, cwd, exit_status, None, true)?;
Ok(())
}
pub fn run_command_with_output_and_env_no_err(
input: &[&dyn AsRef<OsStr>],
cwd: Option<&Path>,
env: Option<&HashMap<String, String>>,
) -> Result<(), String> {
let exit_status = get_command_inner(input, cwd, env)
.spawn()
.map_err(|e| command_error(input, &cwd, e))?
.wait()
.map_err(|e| command_error(input, &cwd, e))?;
check_exit_status(input, cwd, exit_status, None, false)?;
Ok(())
}
@ -143,80 +175,157 @@ pub fn get_os_name() -> Result<String, String> {
}
}
pub fn get_rustc_host_triple() -> Result<String, String> {
let output = run_command(&[&"rustc", &"-vV"], None)?;
let content = std::str::from_utf8(&output.stdout).unwrap_or("");
for line in content.split('\n').map(|line| line.trim()) {
if !line.starts_with("host:") {
continue;
}
return Ok(line.split(':').nth(1).unwrap().trim().to_string());
}
Err("Cannot find host triple".to_string())
#[derive(Default, PartialEq)]
pub struct RustcVersionInfo {
pub short: String,
pub version: String,
pub host: Option<String>,
pub commit_hash: Option<String>,
pub commit_date: Option<String>,
}
pub fn get_gcc_path() -> Result<String, String> {
let content = match fs::read_to_string("gcc_path") {
Ok(content) => content,
Err(_) => {
return Err(
"Please put the path to your custom build of libgccjit in the file \
`gcc_path`, see Readme.md for details"
.into(),
)
pub fn rustc_toolchain_version_info(toolchain: &str) -> Result<RustcVersionInfo, String> {
rustc_version_info_inner(None, Some(toolchain))
}
pub fn rustc_version_info(rustc: Option<&str>) -> Result<RustcVersionInfo, String> {
rustc_version_info_inner(rustc, None)
}
fn rustc_version_info_inner(
rustc: Option<&str>,
toolchain: Option<&str>,
) -> Result<RustcVersionInfo, String> {
let output = if let Some(toolchain) = toolchain {
run_command(&[&rustc.unwrap_or("rustc"), &toolchain, &"-vV"], None)
} else {
run_command(&[&rustc.unwrap_or("rustc"), &"-vV"], None)
}?;
let content = std::str::from_utf8(&output.stdout).unwrap_or("");
let mut info = RustcVersionInfo::default();
let mut lines = content.split('\n');
info.short = match lines.next() {
Some(s) => s.to_string(),
None => return Err("failed to retrieve rustc version".to_string()),
};
for line in lines.map(|line| line.trim()) {
match line.split_once(':') {
Some(("host", data)) => info.host = Some(data.trim().to_string()),
Some(("release", data)) => info.version = data.trim().to_string(),
Some(("commit-hash", data)) => info.commit_hash = Some(data.trim().to_string()),
Some(("commit-date", data)) => info.commit_date = Some(data.trim().to_string()),
_ => {}
}
}
if info.version.is_empty() {
Err("failed to retrieve rustc version".to_string())
} else {
Ok(info)
}
}
pub fn get_toolchain() -> Result<String, String> {
let content = match fs::read_to_string("rust-toolchain") {
Ok(content) => content,
Err(_) => return Err("No `rust-toolchain` file found".to_string()),
};
match content
.split('\n')
.map(|line| line.trim())
.filter(|line| !line.is_empty())
.filter_map(|line| {
if !line.starts_with("channel") {
return None;
}
line.split('"').skip(1).next()
})
.next()
{
Some(gcc_path) => {
let path = Path::new(gcc_path);
if !path.exists() {
Err(format!(
"Path `{}` contained in the `gcc_path` file doesn't exist",
gcc_path,
))
} else {
Ok(gcc_path.into())
}
}
None => Err("No path found in `gcc_path` file".into()),
Some(toolchain) => Ok(toolchain.to_string()),
None => Err("Couldn't find `channel` in `rust-toolchain` file".to_string()),
}
}
pub struct CloneResult {
pub ran_clone: bool,
pub repo_name: String,
pub repo_dir: String,
}
pub fn git_clone(to_clone: &str, dest: Option<&Path>) -> Result<CloneResult, String> {
let repo_name = to_clone.split('/').last().unwrap();
let repo_name = match repo_name.strip_suffix(".git") {
Some(n) => n.to_string(),
None => repo_name.to_string(),
};
let dest = dest
.map(|dest| dest.join(&repo_name))
.unwrap_or_else(|| Path::new(&repo_name).into());
fn git_clone_inner(
to_clone: &str,
dest: &Path,
shallow_clone: bool,
repo_name: String,
) -> Result<CloneResult, String> {
if dest.is_dir() {
return Ok(CloneResult {
ran_clone: false,
repo_name,
repo_dir: dest.display().to_string(),
});
}
run_command_with_output(&[&"git", &"clone", &to_clone, &dest], None)?;
let mut command: Vec<&dyn AsRef<OsStr>> = vec![&"git", &"clone", &to_clone, &dest];
if shallow_clone {
command.push(&"--depth");
command.push(&"1");
}
run_command_with_output(&command, None)?;
Ok(CloneResult {
ran_clone: true,
repo_name,
repo_dir: dest.display().to_string(),
})
}
fn get_repo_name(url: &str) -> String {
let repo_name = url.split('/').last().unwrap();
match repo_name.strip_suffix(".git") {
Some(n) => n.to_string(),
None => repo_name.to_string(),
}
}
pub fn git_clone(
to_clone: &str,
dest: Option<&Path>,
shallow_clone: bool,
) -> Result<CloneResult, String> {
let repo_name = get_repo_name(to_clone);
let tmp: PathBuf;
let dest = match dest {
Some(dest) => dest,
None => {
tmp = repo_name.clone().into();
&tmp
}
};
git_clone_inner(to_clone, dest, shallow_clone, repo_name)
}
/// This function differs from `git_clone` in how it handles *where* the repository will be cloned.
/// In `git_clone`, it is cloned in the provided path. In this function, the path you provide is
/// the parent folder. So if you pass "a" as folder and try to clone "b.git", it will be cloned into
/// `a/b`.
pub fn git_clone_root_dir(
to_clone: &str,
dest_parent_dir: &Path,
shallow_clone: bool,
) -> Result<CloneResult, String> {
let repo_name = get_repo_name(to_clone);
git_clone_inner(
to_clone,
&dest_parent_dir.join(&repo_name),
shallow_clone,
repo_name,
)
}
pub fn walk_dir<P, D, F>(dir: P, mut dir_cb: D, mut file_cb: F) -> Result<(), String>
where
P: AsRef<Path>,
@ -238,3 +347,105 @@ where
}
Ok(())
}
pub fn split_args(args: &str) -> Result<Vec<String>, String> {
let mut out = Vec::new();
let mut start = 0;
let args = args.trim();
let mut iter = args.char_indices().peekable();
while let Some((pos, c)) = iter.next() {
if c == ' ' {
out.push(args[start..pos].to_string());
let mut found_start = false;
while let Some((pos, c)) = iter.peek() {
if *c != ' ' {
start = *pos;
found_start = true;
break;
} else {
iter.next();
}
}
if !found_start {
return Ok(out);
}
} else if c == '"' || c == '\'' {
let end = c;
let mut found_end = false;
while let Some((_, c)) = iter.next() {
if c == end {
found_end = true;
break;
} else if c == '\\' {
// We skip the escaped character.
iter.next();
}
}
if !found_end {
return Err(format!(
"Didn't find `{}` at the end of `{}`",
end,
&args[start..]
));
}
} else if c == '\\' {
// We skip the escaped character.
iter.next();
}
}
let s = args[start..].trim();
if !s.is_empty() {
out.push(s.to_string());
}
Ok(out)
}
pub fn remove_file<P: AsRef<Path> + ?Sized>(file_path: &P) -> Result<(), String> {
std::fs::remove_file(file_path).map_err(|error| {
format!(
"Failed to remove `{}`: {:?}",
file_path.as_ref().display(),
error
)
})
}
pub fn create_symlink<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) -> Result<(), String> {
#[cfg(windows)]
let symlink = std::os::windows::fs::symlink_file;
#[cfg(not(windows))]
let symlink = std::os::unix::fs::symlink;
symlink(&original, &link).map_err(|err| {
format!(
"failed to create a symlink `{}` to `{}`: {:?}",
original.as_ref().display(),
link.as_ref().display(),
err,
)
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_split_args() {
// Missing `"` at the end.
assert!(split_args("\"tada").is_err());
// Missing `'` at the end.
assert!(split_args("\'tada").is_err());
assert_eq!(
split_args("a \"b\" c"),
Ok(vec!["a".to_string(), "\"b\"".to_string(), "c".to_string()])
);
// Trailing whitespace characters.
assert_eq!(
split_args(" a \"b\" c "),
Ok(vec!["a".to_string(), "\"b\"".to_string(), "c".to_string()])
);
}
}

View File

@ -1,23 +0,0 @@
#!/usr/bin/env bash
if [ -z $CHANNEL ]; then
export CHANNEL='debug'
fi
pushd $(dirname "$0") >/dev/null
source config.sh
# read nightly compiler from rust-toolchain file
TOOLCHAIN=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/')
popd >/dev/null
if [[ $(${RUSTC} -V) != $(${RUSTC} +${TOOLCHAIN} -V) ]]; then
echo "rustc_codegen_gcc is build for $(rustc +${TOOLCHAIN} -V) but the default rustc version is $(rustc -V)."
echo "Using $(rustc +${TOOLCHAIN} -V)."
fi
cmd=$1
shift
RUSTDOCFLAGS="$RUSTFLAGS" cargo +${TOOLCHAIN} $cmd $@

View File

@ -1,6 +0,0 @@
#!/usr/bin/env bash
set -e
set -v
rm -rf target/ build_sysroot/{sysroot/,sysroot_src/,target/,Cargo.lock} perf.data{,.old}
rm -rf regex/ simple-raytracer/

View File

@ -0,0 +1,2 @@
gcc-path = "gcc-build/gcc"
# download-gccjit = true

View File

@ -1,85 +0,0 @@
set -e
export CARGO_INCREMENTAL=0
if [ -f ./gcc_path ]; then
export GCC_PATH=$(cat gcc_path)
elif (( $use_system_gcc == 1 )); then
echo 'Using system GCC'
else
echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
exit 1
fi
if [[ -z "$RUSTC" ]]; then
export RUSTC="rustc"
fi
unamestr=`uname`
if [[ "$unamestr" == 'Linux' ]]; then
dylib_ext='so'
elif [[ "$unamestr" == 'Darwin' ]]; then
dylib_ext='dylib'
else
echo "Unsupported os"
exit 1
fi
HOST_TRIPLE=$($RUSTC -vV | grep host | cut -d: -f2 | tr -d " ")
# TODO: remove $OVERWRITE_TARGET_TRIPLE when config.sh is removed.
TARGET_TRIPLE="${OVERWRITE_TARGET_TRIPLE:-$HOST_TRIPLE}"
linker=''
RUN_WRAPPER=''
if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
RUN_WRAPPER=run_in_vm
if [[ "$TARGET_TRIPLE" == "m68k-unknown-linux-gnu" ]]; then
linker='-Clinker=m68k-unknown-linux-gnu-gcc'
elif [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then
# We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
linker='-Clinker=aarch64-linux-gnu-gcc'
else
echo "Unknown non-native platform"
fi
fi
# Since we don't support ThinLTO, disable LTO completely when not trying to do LTO.
# TODO(antoyo): remove when we can handle ThinLTO.
disable_lto_flags=''
if [[ ! -v FAT_LTO ]]; then
disable_lto_flags='-Clto=off'
fi
if [[ -z "$BUILTIN_BACKEND" ]]; then
export RUSTFLAGS="$CG_RUSTFLAGS $linker -Csymbol-mangling-version=v0 -Cdebuginfo=2 $disable_lto_flags -Zcodegen-backend=$(pwd)/target/${CHANNEL:-debug}/librustc_codegen_gcc.$dylib_ext --sysroot $(pwd)/build_sysroot/sysroot $TEST_FLAGS"
else
export RUSTFLAGS="$CG_RUSTFLAGS $linker -Csymbol-mangling-version=v0 -Cdebuginfo=2 $disable_lto_flags -Zcodegen-backend=gcc $TEST_FLAGS -Cpanic=abort"
if [[ ! -z "$RUSTC_SYSROOT" ]]; then
export RUSTFLAGS="$RUSTFLAGS --sysroot $RUSTC_SYSROOT"
fi
fi
# FIXME(antoyo): remove once the atomic shim is gone
if [[ unamestr == 'Darwin' ]]; then
export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
fi
if [[ -z "$cargo_target_dir" ]]; then
RUST_CMD="$RUSTC $RUSTFLAGS -L crate=target/out --out-dir target/out"
cargo_target_dir="target/out"
else
RUST_CMD="$RUSTC $RUSTFLAGS -L crate=$cargo_target_dir --out-dir $cargo_target_dir"
fi
export RUSTC_LOG=warn # display metadata load errors
export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/$TARGET_TRIPLE/lib"
if [[ ! -z "$:$GCC_PATH" ]]; then
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$GCC_PATH"
fi
export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
# NOTE: To avoid the -fno-inline errors, use /opt/gcc/bin/gcc instead of cc.
# To do so, add a symlink for cc to /opt/gcc/bin/gcc in our PATH.
# Another option would be to add the following Rust flag: -Clinker=/opt/gcc/bin/gcc
export PATH="/opt/gcc/bin:/opt/m68k-unknown-linux-gnu/bin:$PATH"

View File

@ -1,32 +0,0 @@
From a8fb97120d71252538b6b026695df40d02696bdb Mon Sep 17 00:00:00 2001
From: bjorn3 <bjorn3@users.noreply.github.com>
Date: Sat, 15 Aug 2020 20:04:38 +0200
Subject: [PATCH] [rand] Disable failing test
---
src/distributions/uniform.rs | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/distributions/uniform.rs b/src/distributions/uniform.rs
index 480b859..c80bb6f 100644
--- a/src/distributions/uniform.rs
+++ b/src/distributions/uniform.rs
@@ -1085,7 +1085,7 @@ mod tests {
_ => panic!("`UniformDurationMode` was not serialized/deserialized correctly")
}
}
-
+
#[test]
#[cfg(feature = "serde1")]
fn test_uniform_serialization() {
@@ -1314,6 +1314,7 @@ mod tests {
not(target_arch = "wasm32"),
not(target_arch = "asmjs")
))]
+ #[ignore] // FIXME
fn test_float_assertions() {
use super::SampleUniform;
use std::panic::catch_unwind;
--
2.20.1

View File

@ -0,0 +1 @@
INPUT(libLLVM.so.18.1-rust-1.78.0-nightly)

View File

@ -0,0 +1,3 @@
# How to debug GCC LTO
Run do the command with `-v -save-temps` and then extract the `lto1` line from the output and run that under the debugger.

View File

@ -0,0 +1,74 @@
# Debugging libgccjit
Sometimes, libgccjit will crash and output an error like this:
```
during RTL pass: expand
libgccjit.so: error: in expmed_mode_index, at expmed.h:249
0x7f0da2e61a35 expmed_mode_index
../../../gcc/gcc/expmed.h:249
0x7f0da2e61aa4 expmed_op_cost_ptr
../../../gcc/gcc/expmed.h:271
0x7f0da2e620dc sdiv_cost_ptr
../../../gcc/gcc/expmed.h:540
0x7f0da2e62129 sdiv_cost
../../../gcc/gcc/expmed.h:558
0x7f0da2e73c12 expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int)
../../../gcc/gcc/expmed.c:4335
0x7f0da2ea1423 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier)
../../../gcc/gcc/expr.c:9240
0x7f0da2cd1a1e expand_gimple_stmt_1
../../../gcc/gcc/cfgexpand.c:3796
0x7f0da2cd1c30 expand_gimple_stmt
../../../gcc/gcc/cfgexpand.c:3857
0x7f0da2cd90a9 expand_gimple_basic_block
../../../gcc/gcc/cfgexpand.c:5898
0x7f0da2cdade8 execute
../../../gcc/gcc/cfgexpand.c:6582
```
To see the code which causes this error, call the following function:
```c
gcc_jit_context_dump_to_file(ctxt, "/tmp/output.c", 1 /* update_locations */)
```
This will create a C-like file and add the locations into the IR pointing to this C file.
Then, rerun the program and it will output the location in the second line:
```
libgccjit.so: /tmp/something.c:61322:0: error: in expmed_mode_index, at expmed.h:249
```
Or add a breakpoint to `add_error` in gdb and print the line number using:
```
p loc->m_line
p loc->m_filename->m_buffer
```
To print a debug representation of a tree:
```c
debug_tree(expr);
```
(defined in print-tree.h)
To print a debug representation of a gimple struct:
```c
debug_gimple_stmt(gimple_struct)
```
To get the `rustc` command to run in `gdb`, add the `--verbose` flag to `cargo build`.
To have the correct file paths in `gdb` instead of `/usr/src/debug/gcc/libstdc++-v3/libsupc++/eh_personality.cc`:
Maybe by calling the following at the beginning of gdb:
```
set substitute-path /usr/src/debug/gcc /path/to/gcc-repo/gcc
```
TODO(antoyo): but that's not what I remember I was doing.

View File

@ -0,0 +1,27 @@
# Common errors
This file lists errors that were encountered and how to fix them.
### `failed to build archive` error
When you get this error:
```
error: failed to build archive: failed to open object file: No such file or directory (os error 2)
```
That can be caused by the fact that you try to compile with `lto = "fat"`, but you didn't compile the sysroot with LTO.
(Not sure if that's the reason since I cannot reproduce anymore. Maybe it happened when forgetting setting `FAT_LTO`.)
### ld: cannot find crtbegin.o
When compiling an executable with libgccijt, if setting the `*LIBRARY_PATH` variables to the install directory, you will get the following errors:
```
ld: cannot find crtbegin.o: No such file or directory
ld: cannot find -lgcc: No such file or directory
ld: cannot find -lgcc: No such file or directory
libgccjit.so: error: error invoking gcc driver
```
To fix this, set the variables to `gcc-build/build/gcc`.

View File

@ -0,0 +1,52 @@
# git subtree sync
`rustc_codegen_gcc` is a subtree of the rust compiler. As such, it needs to be
sync from time to time to ensure changes that happened on their side are also
included on our side.
### How to install a forked git-subtree
Using git-subtree with `rustc` requires a patched git to make it work.
The PR that is needed is [here](https://github.com/gitgitgadget/git/pull/493).
Use the following instructions to install it:
```bash
git clone git@github.com:tqc/git.git
cd git
git checkout tqc/subtree
make
make install
cd contrib/subtree
make
cp git-subtree ~/bin
```
### Syncing with rust compiler
Do a sync with this command:
```bash
PATH="$HOME/bin:$PATH" ~/bin/git-subtree push -P compiler/rustc_codegen_gcc/ ../rustc_codegen_gcc/ sync_branch_name
cd ../rustc_codegen_gcc
git checkout master
git pull
git checkout sync_branch_name
git merge master
```
To send the changes to the rust repo:
```bash
cd ../rust
git pull origin master
git checkout -b subtree-update_cg_gcc_YYYY-MM-DD
PATH="$HOME/bin:$PATH" ~/bin/git-subtree pull --prefix=compiler/rustc_codegen_gcc/ https://github.com/rust-lang/rustc_codegen_gcc.git master
git push
# Immediately merge the merge commit into cg_gcc to prevent merge conflicts when syncing from rust-lang/rust later.
PATH="$HOME/bin:$PATH" ~/bin/git-subtree push -P compiler/rustc_codegen_gcc/ ../rustc_codegen_gcc/ sync_branch_name
```
TODO: write a script that does the above.
https://rust-lang.zulipchat.com/#narrow/stream/301329-t-devtools/topic/subtree.20madness/near/258877725

View File

@ -0,0 +1,72 @@
# Tips
The following shows how to do different random small things we encountered and thought could
be useful.
### How to send arguments to the GCC linker
```
CG_RUSTFLAGS="-Clink-args=-save-temps -v" ../y.sh cargo build
```
### How to see the personality functions in the asm dump
```
CG_RUSTFLAGS="-Clink-arg=-save-temps -v -Clink-arg=-dA" ../y.sh cargo build
```
### How to see the LLVM IR for a sysroot crate
```
cargo build -v --target x86_64-unknown-linux-gnu -Zbuild-std
# Take the command from the output and add --emit=llvm-ir
```
### To prevent the linker from unmangling symbols
Run with:
```
COLLECT_NO_DEMANGLE=1
```
### How to use a custom-build rustc
* Build the stage2 compiler (`rustup toolchain link debug-current build/x86_64-unknown-linux-gnu/stage2`).
* Clean and rebuild the codegen with `debug-current` in the file `rust-toolchain`.
### How to use [mem-trace](https://github.com/antoyo/mem-trace)
`rustc` needs to be built without `jemalloc` so that `mem-trace` can overload `malloc` since `jemalloc` is linked statically, so a `LD_PRELOAD`-ed library won't a chance to intercept the calls to `malloc`.
### How to generate GIMPLE
If you need to check what gccjit is generating (GIMPLE), then take a look at how to
generate it in [gimple.md](./doc/gimple.md).
### How to build a cross-compiling libgccjit
#### Building libgccjit
* Follow the instructions on [this repo](https://github.com/cross-cg-gcc-tools/cross-gcc).
#### Configuring rustc_codegen_gcc
* Run `./y.sh prepare --cross` so that the sysroot is patched for the cross-compiling case.
* Set the path to the cross-compiling libgccjit in `gcc-path` (in `config.toml`).
* Make sure you have the linker for your target (for instance `m68k-unknown-linux-gnu-gcc`) in your `$PATH`. Currently, the linker name is hardcoded as being `$TARGET-gcc`. Specify the target when building the sysroot: `./y.sh build --target-triple m68k-unknown-linux-gnu`.
* Build your project by specifying the target: `OVERWRITE_TARGET_TRIPLE=m68k-unknown-linux-gnu ../y.sh cargo build --target m68k-unknown-linux-gnu`.
If the target is not yet supported by the Rust compiler, create a [target specification file](https://docs.rust-embedded.org/embedonomicon/custom-target.html) (note that the `arch` specified in this file must be supported by the rust compiler).
Then, you can use it the following way:
* Add the target specification file using `--target` as an **absolute** path to build the sysroot: `./y.sh build --target-triple m68k-unknown-linux-gnu --target $(pwd)/m68k-unknown-linux-gnu.json`
* Build your project by specifying the target specification file: `OVERWRITE_TARGET_TRIPLE=m68k-unknown-linux-gnu ../y.sh cargo build --target path/to/m68k-unknown-linux-gnu.json`.
If you get the following error:
```
/usr/bin/ld: unrecognised emulation mode: m68kelf
```
Make sure you set `gcc-path` (in `config.toml`) to the install directory.

View File

@ -1,6 +1,6 @@
#![feature(
no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types,
decl_macro, rustc_attrs, transparent_unions, auto_traits,
decl_macro, rustc_attrs, transparent_unions, auto_traits, freeze_impls,
thread_local
)]
#![no_core]

View File

@ -0,0 +1 @@
b6f163f52

View File

@ -20,8 +20,6 @@ codegen_gcc_dynamic_linking_with_lto =
cannot prefer dynamic linking when performing LTO
.note = only 'staticlib', 'bin', and 'cdylib' outputs are supported with LTO
codegen_gcc_load_bitcode = failed to load bitcode of module "{$name}"
codegen_gcc_lto_disallowed = lto can only be run for executables, cdylibs and static library outputs
codegen_gcc_lto_dylib = lto cannot be used for `dylib` crate type without `-Zdylib-lto`

View File

@ -39,6 +39,4 @@ index 42a26ae..5ac1042 100644
+#![cfg(test)]
#![feature(alloc_layout_extra)]
#![feature(array_chunks)]
#![feature(array_methods)]
--
2.21.0 (Apple Git-122)
#![feature(array_windows)]

View File

@ -0,0 +1,24 @@
From f4a31d2c57cdbd578b778ab70eb2a0cfb248652c Mon Sep 17 00:00:00 2001
From: Antoni Boucher <bouanto@zoho.com>
Date: Tue, 5 Mar 2024 12:39:44 -0500
Subject: [PATCH] Remove #[deny(warnings)]
---
src/lib.rs | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/lib.rs b/src/lib.rs
index 8ade2881d5..e26c595e38 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -47,7 +47,6 @@
)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
-#![doc(test(attr(allow(unused_variables), deny(warnings))))]
#![no_std]
#![cfg_attr(feature = "simd_support", feature(stdsimd, portable_simd))]
#![cfg_attr(doc_cfg, feature(doc_cfg))]
--
2.44.0

View File

@ -21,19 +21,3 @@ index 5b21355..cb0c49b 100644
[dependencies]
alloc = { path = "../alloc", public = true }
diff --git a/library/test/Cargo.toml b/library/test/Cargo.toml
index 91a1abd..a58c160 100644
--- a/library/test/Cargo.toml
+++ b/library/test/Cargo.toml
@@ -4,7 +4,7 @@ version = "0.0.0"
edition = "2021"
[lib]
-crate-type = ["dylib", "rlib"]
+crate-type = ["rlib"]
[dependencies]
getopts = { version = "0.2.21", features = ['rustc-dep-of-std'] }
--
2.42.0

View File

@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2023-11-17"
channel = "nightly-2024-03-05"
components = ["rust-src", "rustc-dev", "llvm-tools-preview"]

View File

@ -1,29 +0,0 @@
#!/usr/bin/env bash
set -e
case $1 in
"prepare")
TOOLCHAIN=$(date +%Y-%m-%d)
echo "=> Installing new nightly"
rustup toolchain install --profile minimal nightly-${TOOLCHAIN} # Sanity check to see if the nightly exists
echo nightly-${TOOLCHAIN} > rust-toolchain
echo "=> Uninstalling all old nightlies"
for nightly in $(rustup toolchain list | grep nightly | grep -v $TOOLCHAIN | grep -v nightly-x86_64); do
rustup toolchain uninstall $nightly
done
./clean_all.sh
./y.sh prepare
;;
"commit")
git add rust-toolchain
git commit -m "Rustup to $(rustc -V)"
;;
*)
echo "Unknown command '$1'"
echo "Usage: ./rustup.sh prepare|commit"
;;
esac

View File

@ -18,17 +18,16 @@ impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn get_param(&mut self, index: usize) -> Self::Value {
let func = self.current_func();
let param = func.get_param(index as i32);
let on_stack =
if let Some(on_stack_param_indices) = self.on_stack_function_params.borrow().get(&func) {
on_stack_param_indices.contains(&index)
}
else {
false
};
let on_stack = if let Some(on_stack_param_indices) =
self.on_stack_function_params.borrow().get(&func)
{
on_stack_param_indices.contains(&index)
} else {
false
};
if on_stack {
param.to_lvalue().get_address(None)
}
else {
} else {
param.to_rvalue()
}
}
@ -37,13 +36,14 @@ impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
impl GccType for CastTarget {
fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
let rest_gcc_unit = self.rest.unit.gcc_type(cx);
let (rest_count, rem_bytes) =
if self.rest.unit.size.bytes() == 0 {
(0, 0)
}
else {
(self.rest.total.bytes() / self.rest.unit.size.bytes(), self.rest.total.bytes() % self.rest.unit.size.bytes())
};
let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
(0, 0)
} else {
(
self.rest.total.bytes() / self.rest.unit.size.bytes(),
self.rest.total.bytes() % self.rest.unit.size.bytes(),
)
};
if self.prefix.iter().all(|x| x.is_none()) {
// Simplify to a single unit when there is no prefix and size <= unit size
@ -61,9 +61,7 @@ impl GccType for CastTarget {
let mut args: Vec<_> = self
.prefix
.iter()
.flat_map(|option_reg| {
option_reg.map(|reg| reg.gcc_type(cx))
})
.flat_map(|option_reg| option_reg.map(|reg| reg.gcc_type(cx)))
.chain((0..rest_count).map(|_| rest_gcc_unit))
.collect();
@ -86,12 +84,10 @@ impl GccType for Reg {
fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
match self.kind {
RegKind::Integer => cx.type_ix(self.size.bits()),
RegKind::Float => {
match self.size.bits() {
32 => cx.type_f32(),
64 => cx.type_f64(),
_ => bug!("unsupported float: {:?}", self),
}
RegKind::Float => match self.size.bits() {
32 => cx.type_f32(),
64 => cx.type_f64(),
_ => bug!("unsupported float: {:?}", self),
},
RegKind::Vector => unimplemented!(), //cx.type_vector(cx.type_i8(), self.size.bytes()),
}
@ -119,19 +115,18 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
// This capacity calculation is approximate.
let mut argument_tys = Vec::with_capacity(
self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 }
self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 },
);
let return_type =
match self.ret.mode {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
PassMode::Cast { ref cast, .. } => cast.gcc_type(cx),
PassMode::Indirect { .. } => {
argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
cx.type_void()
}
};
let return_type = match self.ret.mode {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
PassMode::Cast { ref cast, .. } => cast.gcc_type(cx),
PassMode::Indirect { .. } => {
argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
cx.type_void()
}
};
#[cfg(feature = "master")]
let mut non_null_args = Vec::new();
@ -149,17 +144,23 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
ty
};
#[cfg(not(feature = "master"))]
let apply_attrs = |ty: Type<'gcc>, _attrs: &ArgAttributes, _arg_index: usize| {
ty
};
let apply_attrs = |ty: Type<'gcc>, _attrs: &ArgAttributes, _arg_index: usize| ty;
for arg in self.args.iter() {
let arg_ty = match arg.mode {
PassMode::Ignore => continue,
PassMode::Pair(a, b) => {
let arg_pos = argument_tys.len();
argument_tys.push(apply_attrs(arg.layout.scalar_pair_element_gcc_type(cx, 0), &a, arg_pos));
argument_tys.push(apply_attrs(arg.layout.scalar_pair_element_gcc_type(cx, 1), &b, arg_pos + 1));
argument_tys.push(apply_attrs(
arg.layout.scalar_pair_element_gcc_type(cx, 0),
&a,
arg_pos,
));
argument_tys.push(apply_attrs(
arg.layout.scalar_pair_element_gcc_type(cx, 1),
&b,
arg_pos + 1,
));
continue;
}
PassMode::Cast { ref cast, pad_i32 } => {
@ -174,14 +175,17 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
// This is a "byval" argument, so we don't apply the `restrict` attribute on it.
on_stack_param_indices.insert(argument_tys.len());
arg.memory_ty(cx)
},
PassMode::Direct(attrs) => apply_attrs(arg.layout.immediate_gcc_type(cx), &attrs, argument_tys.len()),
}
PassMode::Direct(attrs) => {
apply_attrs(arg.layout.immediate_gcc_type(cx), &attrs, argument_tys.len())
}
PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs, argument_tys.len())
}
PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
assert!(!on_stack);
let ty = apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs, argument_tys.len());
let ty =
apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs, argument_tys.len());
apply_attrs(ty, &meta_attrs, argument_tys.len())
}
};
@ -207,15 +211,14 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
// FIXME(antoyo): Should we do something with `FnAbiGcc::fn_attributes`?
let FnAbiGcc {
return_type,
arguments_type,
is_c_variadic,
let FnAbiGcc { return_type, arguments_type, is_c_variadic, on_stack_param_indices, .. } =
self.gcc_type(cx);
let pointer_type =
cx.context.new_function_pointer_type(None, return_type, &arguments_type, is_c_variadic);
cx.on_stack_params.borrow_mut().insert(
pointer_type.dyncast_function_ptr_type().expect("function ptr type"),
on_stack_param_indices,
..
} = self.gcc_type(cx);
let pointer_type = cx.context.new_function_pointer_type(None, return_type, &arguments_type, is_c_variadic);
cx.on_stack_params.borrow_mut().insert(pointer_type.dyncast_function_ptr_type().expect("function ptr type"), on_stack_param_indices);
);
pointer_type
}
}

View File

@ -1,4 +1,4 @@
#[cfg(feature="master")]
#[cfg(feature = "master")]
use gccjit::FnAttribute;
use gccjit::{Context, FunctionType, GlobalKind, ToRValue, Type};
use rustc_ast::expand::allocator::{
@ -11,15 +11,20 @@ use rustc_session::config::OomStrategy;
use crate::GccContext;
pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) {
pub(crate) unsafe fn codegen(
tcx: TyCtxt<'_>,
mods: &mut GccContext,
_module_name: &str,
kind: AllocatorKind,
alloc_error_handler_kind: AllocatorKind,
) {
let context = &mods.context;
let usize =
match tcx.sess.target.pointer_width {
16 => context.new_type::<u16>(),
32 => context.new_type::<u32>(),
64 => context.new_type::<u64>(),
tws => bug!("Unsupported target word size for int: {}", tws),
};
let usize = match tcx.sess.target.pointer_width {
16 => context.new_type::<u16>(),
32 => context.new_type::<u32>(),
64 => context.new_type::<u64>(),
tws => bug!("Unsupported target word size for int: {}", tws),
};
let i8 = context.new_type::<i8>();
let i8p = i8.make_pointer();
@ -58,7 +63,7 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam
tcx,
context,
"__rust_alloc_error_handler",
&alloc_error_handler_name(alloc_error_handler_kind),
alloc_error_handler_name(alloc_error_handler_kind),
&[usize, usize],
None,
);
@ -85,24 +90,42 @@ fn create_wrapper_function(
) {
let void = context.new_type::<()>();
let args: Vec<_> = types.iter().enumerate()
.map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
let args: Vec<_> = types
.iter()
.enumerate()
.map(|(index, typ)| context.new_parameter(None, *typ, format!("param{}", index)))
.collect();
let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, from_name, false);
let func = context.new_function(
None,
FunctionType::Exported,
output.unwrap_or(void),
&args,
from_name,
false,
);
if tcx.sess.default_hidden_visibility() {
#[cfg(feature="master")]
#[cfg(feature = "master")]
func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
}
if tcx.sess.must_emit_unwind_tables() {
// TODO(antoyo): emit unwind tables.
}
let args: Vec<_> = types.iter().enumerate()
.map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
let args: Vec<_> = types
.iter()
.enumerate()
.map(|(index, typ)| context.new_parameter(None, *typ, format!("param{}", index)))
.collect();
let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, to_name, false);
#[cfg(feature="master")]
let callee = context.new_function(
None,
FunctionType::Extern,
output.unwrap_or(void),
&args,
to_name,
false,
);
#[cfg(feature = "master")]
callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
let block = func.new_block("entry");
@ -116,8 +139,7 @@ fn create_wrapper_function(
//llvm::LLVMSetTailCall(ret, True);
if output.is_some() {
block.end_with_return(None, ret);
}
else {
} else {
block.end_with_void_return(None);
}

View File

@ -2,7 +2,10 @@ use gccjit::{LValue, RValue, ToRValue, Type};
use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_codegen_ssa::mir::operand::OperandValue;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
use rustc_codegen_ssa::traits::{
AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef,
InlineAsmOperandRef,
};
use rustc_middle::{bug, ty::Instance};
use rustc_span::Span;
@ -11,11 +14,10 @@ use rustc_target::asm::*;
use std::borrow::Cow;
use crate::builder::Builder;
use crate::callee::get_fn;
use crate::context::CodegenCx;
use crate::errors::UnwindingInlineAsm;
use crate::type_of::LayoutGccExt;
use crate::callee::get_fn;
// Rust asm! and GCC Extended Asm semantics differ substantially.
//
@ -68,7 +70,6 @@ use crate::callee::get_fn;
const ATT_SYNTAX_INS: &str = ".att_syntax noprefix\n\t";
const INTEL_SYNTAX_INS: &str = "\n\t.intel_syntax noprefix";
struct AsmOutOperand<'a, 'tcx, 'gcc> {
rust_idx: usize,
constraint: &'a str,
@ -76,13 +77,13 @@ struct AsmOutOperand<'a, 'tcx, 'gcc> {
readwrite: bool,
tmp_var: LValue<'gcc>,
out_place: Option<PlaceRef<'tcx, RValue<'gcc>>>
out_place: Option<PlaceRef<'tcx, RValue<'gcc>>>,
}
struct AsmInOperand<'a, 'tcx> {
rust_idx: usize,
constraint: Cow<'a, str>,
val: RValue<'tcx>
val: RValue<'tcx>,
}
impl AsmOutOperand<'_, '_, '_> {
@ -95,23 +96,29 @@ impl AsmOutOperand<'_, '_, '_> {
res.push('&');
}
res.push_str(&self.constraint);
res.push_str(self.constraint);
res
}
}
enum ConstraintOrRegister {
Constraint(&'static str),
Register(&'static str)
Register(&'static str),
}
impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], instance: Instance<'_>, dest: Option<Self::BasicBlock>, _catch_funclet: Option<(Self::BasicBlock, Option<&Self::Funclet>)>) {
fn codegen_inline_asm(
&mut self,
template: &[InlineAsmTemplatePiece],
rust_operands: &[InlineAsmOperandRef<'tcx, Self>],
options: InlineAsmOptions,
span: &[Span],
instance: Instance<'_>,
dest: Option<Self::BasicBlock>,
_catch_funclet: Option<(Self::BasicBlock, Option<&Self::Funclet>)>,
) {
if options.contains(InlineAsmOptions::MAY_UNWIND) {
self.sess().dcx()
.create_err(UnwindingInlineAsm { span: span[0] })
.emit();
self.sess().dcx().create_err(UnwindingInlineAsm { span: span[0] }).emit();
return;
}
@ -161,32 +168,40 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
use ConstraintOrRegister::*;
let (constraint, ty) = match (reg_to_gcc(reg), place) {
(Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx)),
(Constraint(constraint), Some(place)) => {
(constraint, place.layout.gcc_type(self.cx))
}
// When `reg` is a class and not an explicit register but the out place is not specified,
// we need to create an unused output variable to assign the output to. This var
// needs to be of a type that's "compatible" with the register class, but specific type
// doesn't matter.
(Constraint(constraint), None) => (constraint, dummy_output_type(self.cx, reg.reg_class())),
(Constraint(constraint), None) => {
(constraint, dummy_output_type(self.cx, reg.reg_class()))
}
(Register(_), Some(_)) => {
// left for the next pass
continue
},
continue;
}
(Register(reg_name), None) => {
// `clobber_abi` can add lots of clobbers that are not supported by the target,
// such as AVX-512 registers, so we just ignore unsupported registers
let is_target_supported = reg.reg_class().supported_types(asm_arch).iter()
.any(|&(_, feature)| {
if let Some(feature) = feature {
self.tcx.asm_target_features(instance.def_id()).contains(&feature)
} else {
true // Register class is unconditionally supported
}
});
let is_target_supported =
reg.reg_class().supported_types(asm_arch).iter().any(
|&(_, feature)| {
if let Some(feature) = feature {
self.tcx
.asm_target_features(instance.def_id())
.contains(&feature)
} else {
true // Register class is unconditionally supported
}
},
);
if is_target_supported && !clobbers.contains(&reg_name) {
clobbers.push(reg_name);
}
continue
continue;
}
};
@ -197,7 +212,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
late,
readwrite: false,
tmp_var,
out_place: place
out_place: place,
});
}
@ -206,23 +221,22 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
inputs.push(AsmInOperand {
constraint: Cow::Borrowed(constraint),
rust_idx,
val: value.immediate()
val: value.immediate(),
});
}
else {
} else {
// left for the next pass
continue
continue;
}
}
InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
let constraint = if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
constraint
}
else {
// left for the next pass
continue
};
let constraint =
if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
constraint
} else {
// left for the next pass
continue;
};
// Rustc frontend guarantees that input and output types are "compatible",
// so we can just use input var's type for the output variable.
@ -253,7 +267,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
inputs.push(AsmInOperand {
constraint,
rust_idx,
val: in_value.immediate()
val: in_value.immediate(),
});
}
}
@ -271,7 +285,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
InlineAsmOperandRef::SymStatic { def_id } => {
// TODO(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O).
constants_len += self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len();
constants_len +=
self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len();
}
InlineAsmOperandRef::Label { label } => {
@ -288,10 +303,9 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
let out_place = if let Some(place) = place {
place
}
else {
} else {
// processed in the previous pass
continue
continue;
};
let ty = out_place.layout.gcc_type(self.cx);
@ -299,12 +313,12 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
tmp_var.set_register_name(reg_name);
outputs.push(AsmOutOperand {
constraint: "r".into(),
constraint: "r",
rust_idx,
late,
readwrite: false,
tmp_var,
out_place: Some(out_place)
out_place: Some(out_place),
});
}
@ -322,7 +336,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
inputs.push(AsmInOperand {
constraint: "r".into(),
rust_idx,
val: reg_var.to_rvalue()
val: reg_var.to_rvalue(),
});
}
@ -338,7 +352,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
tmp_var.set_register_name(reg_name);
outputs.push(AsmOutOperand {
constraint: "r".into(),
constraint: "r",
rust_idx,
late,
readwrite: false,
@ -350,7 +364,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
inputs.push(AsmInOperand {
constraint,
rust_idx,
val: in_value.immediate()
val: in_value.immediate(),
});
}
@ -385,7 +399,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
// 3. Build the template string
let mut template_str = String::with_capacity(estimate_template_length(template, constants_len, att_dialect));
let mut template_str =
String::with_capacity(estimate_template_length(template, constants_len, att_dialect));
if att_dialect {
template_str.push_str(ATT_SYNTAX_INS);
}
@ -395,16 +410,15 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
InlineAsmTemplatePiece::String(ref string) => {
for char in string.chars() {
// TODO(antoyo): might also need to escape | if rustc doesn't do it.
let escaped_char =
match char {
'%' => "%%",
'{' => "%{",
'}' => "%}",
_ => {
template_str.push(char);
continue;
},
};
let escaped_char = match char {
'%' => "%%",
'{' => "%{",
'}' => "%}",
_ => {
template_str.push(char);
continue;
}
};
template_str.push_str(escaped_char);
}
}
@ -420,9 +434,10 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
};
match rust_operands[operand_idx] {
InlineAsmOperandRef::Out { reg, .. } => {
InlineAsmOperandRef::Out { reg, .. } => {
let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
let gcc_index = outputs.iter()
let gcc_index = outputs
.iter()
.position(|op| operand_idx == op.rust_idx)
.expect("wrong rust index");
push_to_template(modifier, gcc_index);
@ -430,7 +445,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
InlineAsmOperandRef::In { reg, .. } => {
let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
let in_gcc_index = inputs.iter()
let in_gcc_index = inputs
.iter()
.position(|op| operand_idx == op.rust_idx)
.expect("wrong rust index");
let gcc_index = in_gcc_index + outputs.len();
@ -441,7 +457,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
// The input register is tied to the output, so we can just use the index of the output register
let gcc_index = outputs.iter()
let gcc_index = outputs
.iter()
.position(|op| operand_idx == op.rust_idx)
.expect("wrong rust index");
push_to_template(modifier, gcc_index);
@ -521,7 +538,9 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}
if dest.is_none() && options.contains(InlineAsmOptions::NORETURN) {
let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable");
let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) };
let builtin_unreachable: RValue<'gcc> = unsafe {
std::mem::transmute(builtin_unreachable)
};
self.call(self.type_void(), None, None, builtin_unreachable, &[], None);
}
@ -542,19 +561,23 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}
}
fn estimate_template_length(template: &[InlineAsmTemplatePiece], constants_len: usize, att_dialect: bool) -> usize {
let len: usize = template.iter().map(|piece| {
match *piece {
InlineAsmTemplatePiece::String(ref string) => {
string.len()
fn estimate_template_length(
template: &[InlineAsmTemplatePiece],
constants_len: usize,
att_dialect: bool,
) -> usize {
let len: usize = template
.iter()
.map(|piece| {
match *piece {
InlineAsmTemplatePiece::String(ref string) => string.len(),
InlineAsmTemplatePiece::Placeholder { .. } => {
// '%' + 1 char modifier + 1 char index
3
}
}
InlineAsmTemplatePiece::Placeholder { .. } => {
// '%' + 1 char modifier + 1 char index
3
}
}
})
.sum();
})
.sum();
// increase it by 5% to account for possible '%' signs that'll be duplicated
// I pulled the number out of blue, but should be fair enough
@ -587,7 +610,7 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
_ => unimplemented!(),
}
},
}
// They can be retrieved from https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
InlineAsmRegOrRegClass::RegClass(reg) => match reg {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
@ -635,7 +658,7 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
| InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
unreachable!("clobber-only")
},
}
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
@ -662,7 +685,7 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::Err => unreachable!(),
}
},
};
ConstraintOrRegister::Constraint(constraint)
@ -678,7 +701,7 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
| InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
unimplemented!()
}
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)=> cx.type_i32(),
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
@ -711,7 +734,7 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
| InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
unreachable!("clobber-only")
},
}
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => cx.type_f32(),
@ -729,9 +752,9 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("LLVM backend does not support SPIR-V")
},
}
InlineAsmRegClass::S390x(
S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr
S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr,
) => cx.type_i32(),
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
InlineAsmRegClass::Err => unreachable!(),
@ -739,7 +762,13 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
}
impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef<'tcx>], options: InlineAsmOptions, _line_spans: &[Span]) {
fn codegen_global_asm(
&self,
template: &[InlineAsmTemplatePiece],
operands: &[GlobalAsmOperandRef<'tcx>],
options: InlineAsmOptions,
_line_spans: &[Span],
) {
let asm_arch = self.tcx.sess.asm_arch.unwrap();
// Default to Intel syntax on x86
@ -757,15 +786,17 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let mut index = 0;
while index < string.len() {
// NOTE: gcc does not allow inline comment, so remove them.
let comment_index = string[index..].find("//")
let comment_index = string[index..]
.find("//")
.map(|comment_index| comment_index + index)
.unwrap_or(string.len());
template_str.push_str(&string[index..comment_index]);
index = string[comment_index..].find('\n')
index = string[comment_index..]
.find('\n')
.map(|index| index + comment_index)
.unwrap_or(string.len());
}
},
}
InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
match operands[operand_idx] {
GlobalAsmOperandRef::Const { ref string } => {
@ -807,14 +838,22 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
}
}
fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option<char>) -> Option<char> {
fn modifier_to_gcc(
arch: InlineAsmArch,
reg: InlineAsmRegClass,
modifier: Option<char>,
) -> Option<char> {
// The modifiers can be retrieved from
// https://gcc.gnu.org/onlinedocs/gcc/Modifiers.html#Modifiers
match reg {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
| InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
if modifier == Some('v') { None } else { modifier }
if modifier == Some('v') {
None
} else {
modifier
}
}
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
unreachable!("clobber-only")
@ -846,7 +885,13 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option
}
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
None => if arch == InlineAsmArch::X86_64 { Some('q') } else { Some('k') },
None => {
if arch == InlineAsmArch::X86_64 {
Some('q')
} else {
Some('k')
}
}
Some('l') => Some('b'),
Some('h') => Some('h'),
Some('x') => Some('w'),

View File

@ -1,21 +1,24 @@
#[cfg(feature="master")]
#[cfg(feature = "master")]
use gccjit::FnAttribute;
use gccjit::Function;
use rustc_attr::InstructionSetAttr;
#[cfg(feature="master")]
#[cfg(feature = "master")]
use rustc_attr::InlineAttr;
use rustc_middle::ty;
#[cfg(feature="master")]
use rustc_attr::InstructionSetAttr;
#[cfg(feature = "master")]
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::ty;
use rustc_span::symbol::sym;
use crate::{context::CodegenCx, errors::TiedTargetFeatures};
use crate::gcc_util::{check_tied_features, to_gcc_features};
use crate::{context::CodegenCx, errors::TiedTargetFeatures};
/// Get GCC attribute for the provided inline heuristic.
#[cfg(feature="master")]
#[cfg(feature = "master")]
#[inline]
fn inline_attr<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, inline: InlineAttr) -> Option<FnAttribute<'gcc>> {
fn inline_attr<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
inline: InlineAttr,
) -> Option<FnAttribute<'gcc>> {
match inline {
InlineAttr::Hint => Some(FnAttribute::Inline),
InlineAttr::Always => Some(FnAttribute::AlwaysInline),
@ -34,24 +37,22 @@ fn inline_attr<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, inline: InlineAttr) -> Op
/// attributes.
pub fn from_fn_attrs<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
#[cfg_attr(not(feature="master"), allow(unused_variables))]
func: Function<'gcc>,
#[cfg_attr(not(feature = "master"), allow(unused_variables))] func: Function<'gcc>,
instance: ty::Instance<'tcx>,
) {
let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
#[cfg(feature="master")]
#[cfg(feature = "master")]
{
let inline =
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
InlineAttr::Never
}
else if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) {
InlineAttr::Hint
}
else {
codegen_fn_attrs.inline
};
let inline = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
InlineAttr::Never
} else if codegen_fn_attrs.inline == InlineAttr::None
&& instance.def.requires_inline(cx.tcx)
{
InlineAttr::Hint
} else {
codegen_fn_attrs.inline
};
if let Some(attr) = inline_attr(cx, inline) {
if let FnAttribute::AlwaysInline = attr {
func.add_attribute(FnAttribute::Inline);
@ -70,18 +71,21 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
}
}
let function_features =
codegen_fn_attrs.target_features.iter().map(|features| features.as_str()).collect::<Vec<&str>>();
let function_features = codegen_fn_attrs
.target_features
.iter()
.map(|features| features.as_str())
.collect::<Vec<&str>>();
if let Some(features) = check_tied_features(cx.tcx.sess, &function_features.iter().map(|features| (*features, true)).collect()) {
let span = cx.tcx
if let Some(features) = check_tied_features(
cx.tcx.sess,
&function_features.iter().map(|features| (*features, true)).collect(),
) {
let span = cx
.tcx
.get_attr(instance.def_id(), sym::target_feature)
.map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span);
cx.tcx.dcx().create_err(TiedTargetFeatures {
features: features.join(", "),
span,
})
.emit();
cx.tcx.dcx().create_err(TiedTargetFeatures { features: features.join(", "), span }).emit();
return;
}
@ -105,24 +109,25 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
// compiling Rust for Linux:
// SSE register return with SSE disabled
// TODO(antoyo): support soft-float and retpoline-external-thunk.
if feature.contains("soft-float") || feature.contains("retpoline-external-thunk") || *feature == "-sse" {
if feature.contains("soft-float")
|| feature.contains("retpoline-external-thunk")
|| *feature == "-sse"
{
return None;
}
if feature.starts_with('-') {
Some(format!("no{}", feature))
}
else if feature.starts_with('+') {
} else if feature.starts_with('+') {
Some(feature[1..].to_string())
}
else {
} else {
Some(feature.to_string())
}
})
.collect::<Vec<_>>()
.join(",");
if !target_features.is_empty() {
#[cfg(feature="master")]
#[cfg(feature = "master")]
func.add_attribute(FnAttribute::Target(&target_features));
}
}

View File

@ -1,7 +1,6 @@
/// GCC requires to use the same toolchain for the whole compilation when doing LTO.
/// So, we need the same version/commit of the linker (gcc) and lto front-end binaries (lto1,
/// lto-wrapper, liblto_plugin.so).
// FIXME(antoyo): the executables compiled with LTO are bigger than those compiled without LTO.
// Since it is the opposite for cg_llvm, check if this is normal.
//
@ -17,7 +16,6 @@
// /usr/bin/ld: warning: type of symbol `_RNvNvNvNtCs5JWOrf9uCus_5rayon11thread_pool19WORKER_THREAD_STATE7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
// /usr/bin/ld: warning: type of symbol `_RNvNvNvNvNtNtNtCsAj5i4SGTR7_3std4sync4mpmc5waker17current_thread_id5DUMMY7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
// /usr/bin/ld: warning: incremental linking of LTO and non-LTO objects; using -flinker-output=nolto-rel which will bypass whole program optimization
use std::ffi::CString;
use std::fs::{self, File};
use std::path::{Path, PathBuf};
@ -30,18 +28,16 @@ use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
use rustc_data_structures::memmap::Mmap;
use rustc_errors::{FatalError, DiagCtxt};
use rustc_errors::{DiagCtxt, FatalError};
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::dep_graph::WorkProduct;
use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
use rustc_session::config::{CrateType, Lto};
use tempfile::{TempDir, tempdir};
use tempfile::{tempdir, TempDir};
use crate::back::write::save_temp_bitcode;
use crate::errors::{
DynamicLinkingWithLTO, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
};
use crate::{GccCodegenBackend, GccContext, to_gcc_opt_level};
use crate::errors::{DynamicLinkingWithLTO, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib};
use crate::{to_gcc_opt_level, GccCodegenBackend, GccContext};
/// We keep track of the computed LTO cache keys from the previous
/// session to determine which CGUs we can reuse.
@ -61,7 +57,10 @@ struct LtoData {
tmp_path: TempDir,
}
fn prepare_lto(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt) -> Result<LtoData, FatalError> {
fn prepare_lto(
cgcx: &CodegenContext<GccCodegenBackend>,
dcx: &DiagCtxt,
) -> Result<LtoData, FatalError> {
let export_threshold = match cgcx.lto {
// We're just doing LTO for our one crate
Lto::ThinLocal => SymbolExportLevel::Rust,
@ -72,14 +71,13 @@ fn prepare_lto(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt) -> Resu
Lto::No => panic!("didn't request LTO but we're doing LTO"),
};
let tmp_path =
match tempdir() {
Ok(tmp_path) => tmp_path,
Err(error) => {
eprintln!("Cannot create temporary directory: {}", error);
return Err(FatalError);
},
};
let tmp_path = match tempdir() {
Ok(tmp_path) => tmp_path,
Err(error) => {
eprintln!("Cannot create temporary directory: {}", error);
return Err(FatalError);
}
};
let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
if info.level.is_below_threshold(export_threshold) || info.used {
@ -108,11 +106,10 @@ fn prepare_lto(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt) -> Resu
if !crate_type_allows_lto(*crate_type) {
dcx.emit_err(LtoDisallowed);
return Err(FatalError);
} else if *crate_type == CrateType::Dylib {
if !cgcx.opts.unstable_opts.dylib_lto {
dcx.emit_err(LtoDylib);
return Err(FatalError);
}
}
if *crate_type == CrateType::Dylib && !cgcx.opts.unstable_opts.dylib_lto {
dcx.emit_err(LtoDylib);
return Err(FatalError);
}
}
@ -125,8 +122,7 @@ fn prepare_lto(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt) -> Resu
let exported_symbols =
cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
{
let _timer =
cgcx.prof.generic_activity("GCC_lto_generate_symbols_below_threshold");
let _timer = cgcx.prof.generic_activity("GCC_lto_generate_symbols_below_threshold");
symbols_below_threshold
.extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
}
@ -170,10 +166,9 @@ fn prepare_lto(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt) -> Resu
}
fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> {
fs::write(path, obj)
.map_err(|error| LtoBitcodeFromRlib {
gcc_err: format!("write object file to temp dir: {}", error)
})
fs::write(path, obj).map_err(|error| LtoBitcodeFromRlib {
gcc_err: format!("write object file to temp dir: {}", error),
})
}
/// Performs fat LTO by merging all modules into a single one and returning it
@ -186,13 +181,25 @@ pub(crate) fn run_fat(
let dcx = cgcx.create_dcx();
let lto_data = prepare_lto(cgcx, &dcx)?;
/*let symbols_below_threshold =
lto_data.symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();*/
fat_lto(cgcx, &dcx, modules, cached_modules, lto_data.upstream_modules, lto_data.tmp_path,
lto_data.symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();*/
fat_lto(
cgcx,
&dcx,
modules,
cached_modules,
lto_data.upstream_modules,
lto_data.tmp_path,
//&symbols_below_threshold,
)
}
fn fat_lto(cgcx: &CodegenContext<GccCodegenBackend>, _dcx: &DiagCtxt, modules: Vec<FatLtoInput<GccCodegenBackend>>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, tmp_path: TempDir,
fn fat_lto(
cgcx: &CodegenContext<GccCodegenBackend>,
_dcx: &DiagCtxt,
modules: Vec<FatLtoInput<GccCodegenBackend>>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
tmp_path: TempDir,
//symbols_below_threshold: &[*const libc::c_char],
) -> Result<LtoModuleCodegen<GccCodegenBackend>, FatalError> {
let _timer = cgcx.prof.generic_activity("GCC_fat_lto_build_monolithic_module");
@ -298,10 +305,15 @@ fn fat_lto(cgcx: &CodegenContext<GccCodegenBackend>, _dcx: &DiagCtxt, modules: V
match bc_decoded {
SerializedModule::Local(ref module_buffer) => {
module.module_llvm.should_combine_object_files = true;
module.module_llvm.context.add_driver_option(module_buffer.0.to_str().expect("path"));
},
module
.module_llvm
.context
.add_driver_option(module_buffer.0.to_str().expect("path"));
}
SerializedModule::FromRlib(_) => unimplemented!("from rlib"),
SerializedModule::FromUncompressedFile(_) => unimplemented!("from uncompressed file"),
SerializedModule::FromUncompressedFile(_) => {
unimplemented!("from uncompressed file")
}
}
serialized_bitcode.push(bc_decoded);
}
@ -309,13 +321,13 @@ fn fat_lto(cgcx: &CodegenContext<GccCodegenBackend>, _dcx: &DiagCtxt, modules: V
// Internalize everything below threshold to help strip out more modules and such.
/*unsafe {
let ptr = symbols_below_threshold.as_ptr();
llvm::LLVMRustRunRestrictionPass(
llmod,
ptr as *const *const libc::c_char,
symbols_below_threshold.len() as libc::size_t,
);*/
save_temp_bitcode(cgcx, &module, "lto.after-restriction");
let ptr = symbols_below_threshold.as_ptr();
llvm::LLVMRustRunRestrictionPass(
llmod,
ptr as *const *const libc::c_char,
symbols_below_threshold.len() as libc::size_t,
);*/
save_temp_bitcode(cgcx, &module, "lto.after-restriction");
//}
}

View File

@ -1,19 +1,24 @@
use std::{env, fs};
use gccjit::OutputKind;
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
use rustc_codegen_ssa::back::link::ensure_removed;
use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, ModuleConfig};
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
use rustc_errors::DiagCtxt;
use rustc_fs_util::link_or_copy;
use rustc_session::config::OutputType;
use rustc_span::fatal_error::FatalError;
use rustc_target::spec::SplitDebuginfo;
use crate::{GccCodegenBackend, GccContext};
use crate::errors::CopyBitcode;
use crate::{GccCodegenBackend, GccContext};
pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt, module: ModuleCodegen<GccContext>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
pub(crate) unsafe fn codegen(
cgcx: &CodegenContext<GccCodegenBackend>,
dcx: &DiagCtxt,
module: ModuleCodegen<GccContext>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
let _timer = cgcx.prof.generic_activity_with_arg("GCC_module_codegen", &*module.name);
{
let context = &module.module_llvm.context;
@ -51,7 +56,8 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &Dia
.generic_activity_with_arg("GCC_module_codegen_emit_bitcode", &*module.name);
context.add_command_line_option("-flto=auto");
context.add_command_line_option("-flto-partition=one");
context.compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str"));
context
.compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str"));
}
if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
@ -65,18 +71,19 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &Dia
context.add_command_line_option("-flto-partition=one");
context.add_command_line_option("-ffat-lto-objects");
// TODO(antoyo): Send -plugin/usr/lib/gcc/x86_64-pc-linux-gnu/11.1.0/liblto_plugin.so to linker (this should be done when specifying the appropriate rustc cli argument).
context.compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str"));
context
.compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str"));
}
}
if config.emit_ir {
unimplemented!();
let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
std::fs::write(out, "").expect("write file");
}
if config.emit_asm {
let _timer = cgcx
.prof
.generic_activity_with_arg("GCC_module_codegen_emit_asm", &*module.name);
let _timer =
cgcx.prof.generic_activity_with_arg("GCC_module_codegen_emit_asm", &*module.name);
let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
}
@ -89,7 +96,9 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &Dia
if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") {
println!("Module {}", module.name);
}
if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1") || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) {
if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1")
|| env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name)
{
println!("Dumping reproducer {}", module.name);
let _ = fs::create_dir("/tmp/reproducers");
// FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
@ -117,10 +126,15 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &Dia
context.add_driver_option("-fuse-linker-plugin");
// NOTE: this doesn't actually generate an executable. With the above flags, it combines the .o files together in another .o.
context.compile_to_file(OutputKind::Executable, obj_out.to_str().expect("path to str"));
}
else {
context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
context.compile_to_file(
OutputKind::Executable,
obj_out.to_str().expect("path to str"),
);
} else {
context.compile_to_file(
OutputKind::ObjectFile,
obj_out.to_str().expect("path to str"),
);
}
}
@ -148,11 +162,19 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &Dia
))
}
pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _dcx: &DiagCtxt, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> {
pub(crate) fn link(
_cgcx: &CodegenContext<GccCodegenBackend>,
_dcx: &DiagCtxt,
mut _modules: Vec<ModuleCodegen<GccContext>>,
) -> Result<ModuleCodegen<GccContext>, FatalError> {
unimplemented!();
}
pub(crate) fn save_temp_bitcode(cgcx: &CodegenContext<GccCodegenBackend>, _module: &ModuleCodegen<GccContext>, _name: &str) {
pub(crate) fn save_temp_bitcode(
cgcx: &CodegenContext<GccCodegenBackend>,
_module: &ModuleCodegen<GccContext>,
_name: &str,
) {
if !cgcx.save_temps {
return;
}

View File

@ -2,29 +2,26 @@ use std::collections::HashSet;
use std::env;
use std::time::Instant;
use gccjit::{
FunctionType,
GlobalKind,
};
use rustc_middle::dep_graph;
use rustc_middle::ty::TyCtxt;
#[cfg(feature="master")]
use rustc_middle::mir::mono::Visibility;
use rustc_middle::mir::mono::Linkage;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
use gccjit::{FunctionType, GlobalKind};
use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
use rustc_codegen_ssa::mono_item::MonoItemExt;
use rustc_codegen_ssa::traits::DebugInfoMethods;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
use rustc_middle::dep_graph;
use rustc_middle::mir::mono::Linkage;
#[cfg(feature = "master")]
use rustc_middle::mir::mono::Visibility;
use rustc_middle::ty::TyCtxt;
use rustc_session::config::DebugInfo;
use rustc_span::Symbol;
use rustc_target::spec::PanicStrategy;
use crate::{LockedTargetInfo, gcc_util, new_context};
use crate::GccContext;
use crate::builder::Builder;
use crate::context::CodegenCx;
use crate::GccContext;
use crate::{gcc_util, new_context, LockedTargetInfo};
#[cfg(feature="master")]
#[cfg(feature = "master")]
pub fn visibility_to_gcc(linkage: Visibility) -> gccjit::Visibility {
match linkage {
Visibility::Default => gccjit::Visibility::Default,
@ -66,7 +63,11 @@ pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
}
}
pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: LockedTargetInfo) -> (ModuleCodegen<GccContext>, u64) {
pub fn compile_codegen_unit(
tcx: TyCtxt<'_>,
cgu_name: Symbol,
target_info: LockedTargetInfo,
) -> (ModuleCodegen<GccContext>, u64) {
let prof_timer = tcx.prof.generic_activity("codegen_module");
let start_time = Instant::now();
@ -85,7 +86,10 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: Lock
// the time we needed for codegenning it.
let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, target_info): (Symbol, LockedTargetInfo)) -> ModuleCodegen<GccContext> {
fn module_codegen(
tcx: TyCtxt<'_>,
(cgu_name, target_info): (Symbol, LockedTargetInfo),
) -> ModuleCodegen<GccContext> {
let cgu = tcx.codegen_unit(cgu_name);
// Instantiate monomorphizations without filling out definitions yet...
let context = new_context(tcx);
@ -95,7 +99,12 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: Lock
context.add_driver_option("-fexceptions");
}
let disabled_features: HashSet<_> = tcx.sess.opts.cg.target_feature.split(',')
let disabled_features: HashSet<_> = tcx
.sess
.opts
.cg
.target_feature
.split(',')
.filter(|feature| feature.starts_with('-'))
.map(|string| &string[1..])
.collect();
@ -129,7 +138,13 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: Lock
context.add_command_line_option(&format!("-march={}", target_cpu));
}
if tcx.sess.opts.unstable_opts.function_sections.unwrap_or(tcx.sess.target.function_sections) {
if tcx
.sess
.opts
.unstable_opts
.function_sections
.unwrap_or(tcx.sess.target.function_sections)
{
context.add_command_line_option("-ffunction-sections");
context.add_command_line_option("-fdata-sections");
}
@ -152,19 +167,17 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: Lock
if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") {
context.set_dump_initial_gimple(true);
}
context.set_debug_info(true);
if env::var("CG_GCCJIT_DUMP_EVERYTHING").as_deref() == Ok("1") {
context.set_dump_everything(true);
}
if env::var("CG_GCCJIT_KEEP_INTERMEDIATES").as_deref() == Ok("1") {
context.set_keep_intermediates(true);
}
if env::var("CG_GCCJIT_VERBOSE").as_deref() == Ok("1") {
context.add_driver_option("-v");
}
// NOTE: The codegen generates unrechable blocks.
// NOTE: The codegen generates unreachable blocks.
context.set_allow_unreachable_blocks(true);
{
@ -192,11 +205,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: Lock
ModuleCodegen {
name: cgu_name.to_string(),
module_llvm: GccContext {
context,
should_combine_object_files: false,
temp_dir: None,
},
module_llvm: GccContext { context, should_combine_object_files: false, temp_dir: None },
kind: ModuleKind::Regular,
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,8 @@
#[cfg(feature="master")]
#[cfg(feature = "master")]
use gccjit::{FnAttribute, Visibility};
use gccjit::{FunctionType, Function};
use rustc_middle::ty::{self, Instance, TypeVisitableExt};
use gccjit::{Function, FunctionType};
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
use rustc_middle::ty::{self, Instance, TypeVisitableExt};
use crate::attributes;
use crate::context::CodegenCx;
@ -28,145 +28,144 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
let func =
if let Some(_func) = cx.get_declared_value(&sym) {
// FIXME(antoyo): we never reach this because get_declared_value only returns global variables
// and here we try to get a function.
unreachable!();
/*
// Create a fn pointer with the new signature.
let ptrty = fn_abi.ptr_to_gcc_type(cx);
let func = if let Some(_func) = cx.get_declared_value(&sym) {
// FIXME(antoyo): we never reach this because get_declared_value only returns global variables
// and here we try to get a function.
unreachable!();
/*
// Create a fn pointer with the new signature.
let ptrty = fn_abi.ptr_to_gcc_type(cx);
// This is subtle and surprising, but sometimes we have to bitcast
// the resulting fn pointer. The reason has to do with external
// functions. If you have two crates that both bind the same C
// library, they may not use precisely the same types: for
// example, they will probably each declare their own structs,
// which are distinct types from LLVM's point of view (nominal
// types).
//
// Now, if those two crates are linked into an application, and
// they contain inlined code, you can wind up with a situation
// where both of those functions wind up being loaded into this
// application simultaneously. In that case, the same function
// (from LLVM's point of view) requires two types. But of course
// LLVM won't allow one function to have two types.
//
// What we currently do, therefore, is declare the function with
// one of the two types (whichever happens to come first) and then
// bitcast as needed when the function is referenced to make sure
// it has the type we expect.
//
// This can occur on either a crate-local or crate-external
// reference. It also occurs when testing libcore and in some
// other weird situations. Annoying.
if cx.val_ty(func) != ptrty {
// TODO(antoyo): cast the pointer.
func
}
else {
func
}*/
// This is subtle and surprising, but sometimes we have to bitcast
// the resulting fn pointer. The reason has to do with external
// functions. If you have two crates that both bind the same C
// library, they may not use precisely the same types: for
// example, they will probably each declare their own structs,
// which are distinct types from LLVM's point of view (nominal
// types).
//
// Now, if those two crates are linked into an application, and
// they contain inlined code, you can wind up with a situation
// where both of those functions wind up being loaded into this
// application simultaneously. In that case, the same function
// (from LLVM's point of view) requires two types. But of course
// LLVM won't allow one function to have two types.
//
// What we currently do, therefore, is declare the function with
// one of the two types (whichever happens to come first) and then
// bitcast as needed when the function is referenced to make sure
// it has the type we expect.
//
// This can occur on either a crate-local or crate-external
// reference. It also occurs when testing libcore and in some
// other weird situations. Annoying.
if cx.val_ty(func) != ptrty {
// TODO(antoyo): cast the pointer.
func
}
else {
cx.linkage.set(FunctionType::Extern);
let func = cx.declare_fn(&sym, &fn_abi);
func
}*/
} else {
cx.linkage.set(FunctionType::Extern);
let func = cx.declare_fn(&sym, &fn_abi);
attributes::from_fn_attrs(cx, func, instance);
attributes::from_fn_attrs(cx, func, instance);
let instance_def_id = instance.def_id();
let instance_def_id = instance.def_id();
// TODO(antoyo): set linkage and attributes.
// TODO(antoyo): set linkage and attributes.
// Apply an appropriate linkage/visibility value to our item that we
// just declared.
//
// This is sort of subtle. Inside our codegen unit we started off
// compilation by predefining all our own `MonoItem` instances. That
// is, everything we're codegenning ourselves is already defined. That
// means that anything we're actually codegenning in this codegen unit
// will have hit the above branch in `get_declared_value`. As a result,
// we're guaranteed here that we're declaring a symbol that won't get
// defined, or in other words we're referencing a value from another
// codegen unit or even another crate.
//
// So because this is a foreign value we blanket apply an external
// linkage directive because it's coming from a different object file.
// The visibility here is where it gets tricky. This symbol could be
// referencing some foreign crate or foreign library (an `extern`
// block) in which case we want to leave the default visibility. We may
// also, though, have multiple codegen units. It could be a
// monomorphization, in which case its expected visibility depends on
// whether we are sharing generics or not. The important thing here is
// that the visibility we apply to the declaration is the same one that
// has been applied to the definition (wherever that definition may be).
let is_generic = instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
// Apply an appropriate linkage/visibility value to our item that we
// just declared.
//
// This is sort of subtle. Inside our codegen unit we started off
// compilation by predefining all our own `MonoItem` instances. That
// is, everything we're codegenning ourselves is already defined. That
// means that anything we're actually codegenning in this codegen unit
// will have hit the above branch in `get_declared_value`. As a result,
// we're guaranteed here that we're declaring a symbol that won't get
// defined, or in other words we're referencing a value from another
// codegen unit or even another crate.
//
// So because this is a foreign value we blanket apply an external
// linkage directive because it's coming from a different object file.
// The visibility here is where it gets tricky. This symbol could be
// referencing some foreign crate or foreign library (an `extern`
// block) in which case we want to leave the default visibility. We may
// also, though, have multiple codegen units. It could be a
// monomorphization, in which case its expected visibility depends on
// whether we are sharing generics or not. The important thing here is
// that the visibility we apply to the declaration is the same one that
// has been applied to the definition (wherever that definition may be).
let is_generic =
instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
if is_generic {
// This is a monomorphization. Its expected visibility depends
// on whether we are in share-generics mode.
if is_generic {
// This is a monomorphization. Its expected visibility depends
// on whether we are in share-generics mode.
if cx.tcx.sess.opts.share_generics() {
// We are in share_generics mode.
if cx.tcx.sess.opts.share_generics() {
// We are in share_generics mode.
if let Some(instance_def_id) = instance_def_id.as_local() {
// This is a definition from the current crate. If the
// definition is unreachable for downstream crates or
// the current crate does not re-export generics, the
// definition of the instance will have been declared
// as `hidden`.
if cx.tcx.is_unreachable_local_definition(instance_def_id)
|| !cx.tcx.local_crate_exports_generics()
{
#[cfg(feature="master")]
func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
}
} else {
// This is a monomorphization of a generic function
// defined in an upstream crate.
if instance.upstream_monomorphization(tcx).is_some() {
// This is instantiated in another crate. It cannot
// be `hidden`.
} else {
// This is a local instantiation of an upstream definition.
// If the current crate does not re-export it
// (because it is a C library or an executable), it
// will have been declared `hidden`.
if !cx.tcx.local_crate_exports_generics() {
#[cfg(feature="master")]
func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
}
}
}
} else {
// When not sharing generics, all instances are in the same
// crate and have hidden visibility
#[cfg(feature="master")]
func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
}
} else {
// This is a non-generic function
if cx.tcx.is_codegened_item(instance_def_id) {
// This is a function that is instantiated in the local crate
if instance_def_id.is_local() {
// This is function that is defined in the local crate.
// If it is not reachable, it is hidden.
if !cx.tcx.is_reachable_non_generic(instance_def_id) {
#[cfg(feature="master")]
func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
}
} else {
// This is a function from an upstream crate that has
// been instantiated here. These are always hidden.
#[cfg(feature="master")]
if let Some(instance_def_id) = instance_def_id.as_local() {
// This is a definition from the current crate. If the
// definition is unreachable for downstream crates or
// the current crate does not re-export generics, the
// definition of the instance will have been declared
// as `hidden`.
if cx.tcx.is_unreachable_local_definition(instance_def_id)
|| !cx.tcx.local_crate_exports_generics()
{
#[cfg(feature = "master")]
func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
}
} else {
// This is a monomorphization of a generic function
// defined in an upstream crate.
if instance.upstream_monomorphization(tcx).is_some() {
// This is instantiated in another crate. It cannot
// be `hidden`.
} else {
// This is a local instantiation of an upstream definition.
// If the current crate does not re-export it
// (because it is a C library or an executable), it
// will have been declared `hidden`.
if !cx.tcx.local_crate_exports_generics() {
#[cfg(feature = "master")]
func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
}
}
}
} else {
// When not sharing generics, all instances are in the same
// crate and have hidden visibility
#[cfg(feature = "master")]
func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
}
} else {
// This is a non-generic function
if cx.tcx.is_codegened_item(instance_def_id) {
// This is a function that is instantiated in the local crate
if instance_def_id.is_local() {
// This is function that is defined in the local crate.
// If it is not reachable, it is hidden.
if !cx.tcx.is_reachable_non_generic(instance_def_id) {
#[cfg(feature = "master")]
func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
}
} else {
// This is a function from an upstream crate that has
// been instantiated here. These are always hidden.
#[cfg(feature = "master")]
func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
}
}
}
func
};
func
};
cx.function_instances.borrow_mut().insert(instance, func);

View File

@ -1,14 +1,9 @@
use gccjit::LValue;
use gccjit::{RValue, Type, ToRValue};
use rustc_codegen_ssa::traits::{
BaseTypeMethods,
ConstMethods,
MiscMethods,
StaticMethods,
};
use rustc_middle::mir::Mutability;
use rustc_middle::ty::layout::{LayoutOf};
use gccjit::{RValue, ToRValue, Type};
use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, MiscMethods, StaticMethods};
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
use rustc_middle::mir::Mutability;
use rustc_middle::ty::layout::LayoutOf;
use rustc_target::abi::{self, HasDataLayout, Pointer};
use crate::consts::const_alloc_to_gcc;
@ -40,9 +35,7 @@ pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) ->
let byte_type = context.new_type::<u8>();
let typ = context.new_array_type(None, byte_type, bytes.len() as u64);
let elements: Vec<_> =
bytes.iter()
.map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32))
.collect();
bytes.iter().map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32)).collect();
context.new_array_constructor(None, typ, &elements)
}
@ -54,23 +47,20 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> {
if type_is_pointer(typ) {
self.context.new_null(typ)
}
else {
} else {
self.const_int(typ, 0)
}
}
fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> {
let local = self.current_func.borrow().expect("func")
.new_local(None, typ, "undefined");
let local = self.current_func.borrow().expect("func").new_local(None, typ, "undefined");
if typ.is_struct().is_some() {
// NOTE: hack to workaround a limitation of the rustc API: see comment on
// CodegenCx.structs_as_pointer
let pointer = local.get_address(None);
self.structs_as_pointer.borrow_mut().insert(pointer);
pointer
}
else {
} else {
local.to_rvalue()
}
}
@ -143,16 +133,15 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
.or_insert_with(|| (s.to_owned(), self.global_string(s)))
.1;
let len = s.len();
let cs = self.const_ptrcast(str_global.get_address(None),
let cs = self.const_ptrcast(
str_global.get_address(None),
self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self)),
);
(cs, self.const_usize(len as u64))
}
fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> {
let fields: Vec<_> = values.iter()
.map(|value| value.get_type())
.collect();
let fields: Vec<_> = values.iter().map(|value| value.get_type()).collect();
// TODO(antoyo): cache the type? It's anonymous, so probably not.
let typ = self.type_struct(&fields, packed);
let struct_type = typ.is_struct().expect("struct type");
@ -178,9 +167,10 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
// FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
// the paths for floating-point values.
if ty == self.float_type {
return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
}
else if ty == self.double_type {
return self
.context
.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
} else if ty == self.double_type {
return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64));
}
@ -192,8 +182,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
// FIXME(antoyo): fix bitcast to work in constant contexts.
// TODO(antoyo): perhaps only use bitcast for pointers?
self.context.new_cast(None, value, ty)
}
else {
} else {
// TODO(bjorn3): assert size is correct
self.const_bitcast(value, ty)
}
@ -201,42 +190,41 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
Scalar::Ptr(ptr, _size) => {
let (prov, offset) = ptr.into_parts(); // we know the `offset` is relative
let alloc_id = prov.alloc_id();
let base_addr =
match self.tcx.global_alloc(alloc_id) {
GlobalAlloc::Memory(alloc) => {
let init = const_alloc_to_gcc(self, alloc);
let alloc = alloc.inner();
let value =
match alloc.mutability {
Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
_ => self.static_addr_of(init, alloc.align, None),
};
if !self.sess().fewer_names() {
// TODO(antoyo): set value name.
}
value
},
GlobalAlloc::Function(fn_instance) => {
self.get_fn_addr(fn_instance)
},
GlobalAlloc::VTable(ty, trait_ref) => {
let alloc = self.tcx.global_alloc(self.tcx.vtable_allocation((ty, trait_ref))).unwrap_memory();
let init = const_alloc_to_gcc(self, alloc);
self.static_addr_of(init, alloc.inner().align, None)
let base_addr = match self.tcx.global_alloc(alloc_id) {
GlobalAlloc::Memory(alloc) => {
let init = const_alloc_to_gcc(self, alloc);
let alloc = alloc.inner();
let value = match alloc.mutability {
Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
_ => self.static_addr_of(init, alloc.align, None),
};
if !self.sess().fewer_names() {
// TODO(antoyo): set value name.
}
GlobalAlloc::Static(def_id) => {
assert!(self.tcx.is_static(def_id));
self.get_static(def_id).get_address(None)
},
};
value
}
GlobalAlloc::Function(fn_instance) => self.get_fn_addr(fn_instance),
GlobalAlloc::VTable(ty, trait_ref) => {
let alloc = self
.tcx
.global_alloc(self.tcx.vtable_allocation((ty, trait_ref)))
.unwrap_memory();
let init = const_alloc_to_gcc(self, alloc);
self.static_addr_of(init, alloc.inner().align, None)
}
GlobalAlloc::Static(def_id) => {
assert!(self.tcx.is_static(def_id));
self.get_static(def_id).get_address(None)
}
};
let ptr_type = base_addr.get_type();
let base_addr = self.const_bitcast(base_addr, self.usize_type);
let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
let offset =
self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
let ptr = self.const_bitcast(base_addr + offset, ptr_type);
if !matches!(layout.primitive(), Pointer(_)) {
self.const_bitcast(ptr.dereference(None).to_rvalue(), ty)
}
else {
} else {
self.const_bitcast(ptr, ty)
}
}
@ -261,7 +249,9 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
}
fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value {
self.context.new_array_access(None, base_addr, self.const_usize(offset.bytes())).get_address(None)
self.context
.new_array_access(None, base_addr, self.const_usize(offset.bytes()))
.get_address(None)
}
}
@ -284,35 +274,25 @@ impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
if self.is_u8(cx) {
cx.i8_type
}
else if self.is_u16(cx) {
} else if self.is_u16(cx) {
cx.i16_type
}
else if self.is_u32(cx) {
} else if self.is_u32(cx) {
cx.i32_type
}
else if self.is_u64(cx) {
} else if self.is_u64(cx) {
cx.i64_type
}
else if self.is_u128(cx) {
} else if self.is_u128(cx) {
cx.i128_type
}
else if self.is_uchar(cx) {
} else if self.is_uchar(cx) {
cx.char_type
}
else if self.is_ushort(cx) {
} else if self.is_ushort(cx) {
cx.short_type
}
else if self.is_uint(cx) {
} else if self.is_uint(cx) {
cx.int_type
}
else if self.is_ulong(cx) {
} else if self.is_ulong(cx) {
cx.long_type
}
else if self.is_ulonglong(cx) {
} else if self.is_ulonglong(cx) {
cx.longlong_type
}
else {
} else {
self.clone()
}
}
@ -320,41 +300,31 @@ impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
if self.is_i8(cx) {
cx.u8_type
}
else if self.is_i16(cx) {
} else if self.is_i16(cx) {
cx.u16_type
}
else if self.is_i32(cx) {
} else if self.is_i32(cx) {
cx.u32_type
}
else if self.is_i64(cx) {
} else if self.is_i64(cx) {
cx.u64_type
}
else if self.is_i128(cx) {
} else if self.is_i128(cx) {
cx.u128_type
}
else if self.is_char(cx) {
} else if self.is_char(cx) {
cx.uchar_type
}
else if self.is_short(cx) {
} else if self.is_short(cx) {
cx.ushort_type
}
else if self.is_int(cx) {
} else if self.is_int(cx) {
cx.uint_type
}
else if self.is_long(cx) {
} else if self.is_long(cx) {
cx.ulong_type
}
else if self.is_longlong(cx) {
} else if self.is_longlong(cx) {
cx.ulonglong_type
}
else {
} else {
self.clone()
}
}
}
pub trait TypeReflection<'gcc, 'tcx> {
pub trait TypeReflection<'gcc, 'tcx> {
fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;

View File

@ -2,12 +2,14 @@
use gccjit::{FnAttribute, VarAttribute, Visibility};
use gccjit::{Function, GlobalKind, LValue, RValue, ToRValue};
use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods};
use rustc_middle::span_bug;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::mir::interpret::{
self, read_target_uint, ConstAllocation, ErrorHandled, Scalar as InterpScalar,
};
use rustc_middle::mir::mono::MonoItem;
use rustc_middle::ty::{self, Instance, Ty};
use rustc_middle::span_bug;
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint};
use rustc_middle::ty::{self, Instance, Ty};
use rustc_span::def_id::DefId;
use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange};
@ -16,7 +18,11 @@ use crate::context::CodegenCx;
use crate::errors::InvalidMinimumAlignment;
use crate::type_of::LayoutGccExt;
fn set_global_alignment<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, gv: LValue<'gcc>, mut align: Align) {
fn set_global_alignment<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
gv: LValue<'gcc>,
mut align: Align,
) {
// The target may require greater alignment for globals than the type does.
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
// which can force it to be smaller. Rust doesn't support this yet.
@ -48,7 +54,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
}
let global_value = self.static_addr_of_mut(cv, align, kind);
#[cfg(feature = "master")]
self.global_lvalues.borrow().get(&global_value)
self.global_lvalues
.borrow()
.get(&global_value)
.expect("`static_addr_of_mut` did not add the global to `self.global_lvalues`")
.global_set_readonly();
self.const_globals.borrow_mut().insert(cv, global_value);
@ -58,25 +66,20 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
let attrs = self.tcx.codegen_fn_attrs(def_id);
let value =
match codegen_static_initializer(&self, def_id) {
Ok((value, _)) => value,
// Error has already been reported
Err(_) => return,
};
let value = match codegen_static_initializer(&self, def_id) {
Ok((value, _)) => value,
// Error has already been reported
Err(_) => return,
};
let global = self.get_static(def_id);
// boolean SSA values are i1, but they have to be stored in i8 slots,
// otherwise some LLVM optimization passes don't work as expected
let val_llty = self.val_ty(value);
let value =
if val_llty == self.type_i1() {
unimplemented!();
}
else {
value
};
if val_llty == self.type_i1() {
unimplemented!();
};
let instance = Instance::mono(self.tcx, def_id);
let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
@ -89,11 +92,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
// As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory.
if !is_mutable {
if self.type_is_freeze(ty) {
#[cfg(feature = "master")]
global.global_set_readonly();
}
if !is_mutable && self.type_is_freeze(ty) {
#[cfg(feature = "master")]
global.global_set_readonly();
}
if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
@ -149,7 +150,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
// TODO(antoyo): set link section.
}
if attrs.flags.contains(CodegenFnAttrFlags::USED) || attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) {
if attrs.flags.contains(CodegenFnAttrFlags::USED)
|| attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER)
{
self.add_used_global(global.to_rvalue());
}
}
@ -166,29 +169,33 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
}
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
#[cfg_attr(not(feature="master"), allow(unused_variables))]
#[cfg_attr(not(feature = "master"), allow(unused_variables))]
pub fn add_used_function(&self, function: Function<'gcc>) {
#[cfg(feature = "master")]
function.add_attribute(FnAttribute::Used);
}
pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
let global =
match kind {
Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind);
// TODO(antoyo): check if it's okay that no link_section is set.
pub fn static_addr_of_mut(
&self,
cv: RValue<'gcc>,
align: Align,
kind: Option<&str>,
) -> RValue<'gcc> {
let global = match kind {
Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind);
// TODO(antoyo): check if it's okay that no link_section is set.
let typ = self.val_ty(cv).get_aligned(align.bytes());
let global = self.declare_private_global(&name[..], typ);
global
}
_ => {
let typ = self.val_ty(cv).get_aligned(align.bytes());
let global = self.declare_unnamed_global(typ);
global
},
};
let typ = self.val_ty(cv).get_aligned(align.bytes());
let global = self.declare_private_global(&name[..], typ);
global
}
_ => {
let typ = self.val_ty(cv).get_aligned(align.bytes());
let global = self.declare_unnamed_global(typ);
global
}
};
global.global_set_initializer_rvalue(cv);
// TODO(antoyo): set unnamed address.
let rvalue = global.get_address(None);
@ -215,8 +222,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
let sym = self.tcx.symbol_name(instance).name;
let global =
if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
let global = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
let llty = self.layout_of(ty).gcc_type(self);
if let Some(global) = self.get_declared_value(sym) {
if self.val_ty(global) != self.type_ptr_to(llty) {
@ -235,7 +241,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
if !self.tcx.is_reachable_non_generic(def_id) {
#[cfg(feature = "master")]
global.add_attribute(VarAttribute::Visibility(Visibility::Hidden));
global.add_string_attribute(VarAttribute::Visibility(Visibility::Hidden));
}
global
@ -278,7 +284,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
}
pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAllocation<'tcx>) -> RValue<'gcc> {
pub fn const_alloc_to_gcc<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
alloc: ConstAllocation<'tcx>,
) -> RValue<'gcc> {
let alloc = alloc.inner();
let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1);
let dl = cx.data_layout();
@ -300,14 +309,14 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
llvals.push(cx.const_bytes(bytes));
}
let ptr_offset =
read_target_uint( dl.endian,
// This `inspect` is okay since it is within the bounds of the allocation, it doesn't
// affect interpreter execution (we inspect the result after interpreter execution),
// and we properly interpret the provenance as a relocation pointer offset.
alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
)
.expect("const_alloc_to_llvm: could not read relocation pointer")
let ptr_offset = read_target_uint(
dl.endian,
// This `inspect` is okay since it is within the bounds of the allocation, it doesn't
// affect interpreter execution (we inspect the result after interpreter execution),
// and we properly interpret the provenance as a relocation pointer offset.
alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
)
.expect("const_alloc_to_llvm: could not read relocation pointer")
as u64;
let address_space = cx.tcx.global_alloc(alloc_id).address_space(cx);
@ -317,7 +326,10 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
interpret::Pointer::new(prov, Size::from_bytes(ptr_offset)),
&cx.tcx,
),
abi::Scalar::Initialized { value: Primitive::Pointer(address_space), valid_range: WrappingRange::full(dl.pointer_size) },
abi::Scalar::Initialized {
value: Primitive::Pointer(address_space),
valid_range: WrappingRange::full(dl.pointer_size),
},
cx.type_i8p_ext(address_space),
));
next_offset = offset + pointer_size;
@ -337,17 +349,29 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
cx.const_struct(&llvals, true)
}
pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id: DefId) -> Result<(RValue<'gcc>, ConstAllocation<'tcx>), ErrorHandled> {
pub fn codegen_static_initializer<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
def_id: DefId,
) -> Result<(RValue<'gcc>, ConstAllocation<'tcx>), ErrorHandled> {
let alloc = cx.tcx.eval_static_initializer(def_id)?;
Ok((const_alloc_to_gcc(cx, alloc), alloc))
}
fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str) -> LValue<'gcc> {
fn check_and_apply_linkage<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
attrs: &CodegenFnAttrs,
ty: Ty<'tcx>,
sym: &str,
) -> LValue<'gcc> {
let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
let gcc_type = cx.layout_of(ty).gcc_type(cx);
if let Some(linkage) = attrs.import_linkage {
// Declare a symbol `foo` with the desired linkage.
let global1 = cx.declare_global_with_linkage(&sym, cx.type_i8(), base::global_linkage_to_gcc(linkage));
let global1 = cx.declare_global_with_linkage(
&sym,
cx.type_i8(),
base::global_linkage_to_gcc(linkage),
);
// Declare an internal global `extern_with_linkage_foo` which
// is initialized with the address of `foo`. If `foo` is
@ -363,8 +387,7 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg
global2.global_set_initializer_rvalue(value);
// TODO(antoyo): use global_set_initializer() when it will work.
global2
}
else {
} else {
// Generate an external declaration.
// FIXME(nagisa): investigate whether it can be changed into define_global

View File

@ -1,22 +1,25 @@
use std::cell::{Cell, RefCell};
use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Type};
use rustc_codegen_ssa::base::wants_msvc_seh;
use rustc_codegen_ssa::traits::{
BackendTypes,
BaseTypeMethods,
MiscMethods,
use gccjit::{
Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, Location, RValue, Type,
};
use rustc_codegen_ssa::base::wants_msvc_seh;
use rustc_codegen_ssa::errors as ssa_errors;
use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, MiscMethods};
use rustc_data_structures::base_n;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_middle::span_bug;
use rustc_middle::mir::mono::CodegenUnit;
use rustc_middle::span_bug;
use rustc_middle::ty::layout::{
FnAbiError, FnAbiOf, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError,
LayoutOfHelpers, TyAndLayout,
};
use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
use rustc_middle::ty::layout::{FnAbiError, FnAbiOf, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers};
use rustc_session::Session;
use rustc_span::{Span, source_map::respan};
use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx};
use rustc_span::{source_map::respan, Span};
use rustc_target::abi::{
call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx,
};
use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
use crate::callee::get_fn;
@ -81,7 +84,8 @@ pub struct CodegenCx<'gcc, 'tcx> {
/// Cache function instances of monomorphic and polymorphic items
pub function_instances: RefCell<FxHashMap<Instance<'tcx>, Function<'gcc>>>,
/// Cache generated vtables
pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
pub vtables:
RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
// TODO(antoyo): improve the SSA API to not require those.
/// Mapping from function pointer type to indexes of on stack parameters.
@ -121,24 +125,28 @@ pub struct CodegenCx<'gcc, 'tcx> {
}
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>, supports_128bit_integers: bool) -> Self {
pub fn new(
context: &'gcc Context<'gcc>,
codegen_unit: &'tcx CodegenUnit<'tcx>,
tcx: TyCtxt<'tcx>,
supports_128bit_integers: bool,
) -> Self {
let check_overflow = tcx.sess.overflow_checks();
let create_type = |ctype, rust_type| {
let layout = tcx.layout_of(ParamEnv::reveal_all().and(rust_type)).unwrap();
let align = layout.align.abi.bytes();
#[cfg(feature="master")]
#[cfg(feature = "master")]
{
context.new_c_type(ctype).get_aligned(align)
}
#[cfg(not(feature="master"))]
#[cfg(not(feature = "master"))]
{
// Since libgccjit 12 doesn't contain the fix to compare aligned integer types,
// only align u128 and i128.
if layout.ty.int_size_and_signed(tcx).0.bytes() == 16 {
context.new_c_type(ctype).get_aligned(align)
}
else {
} else {
context.new_c_type(ctype)
}
}
@ -153,24 +161,22 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let u32_type = create_type(CType::UInt32t, tcx.types.u32);
let u64_type = create_type(CType::UInt64t, tcx.types.u64);
let (i128_type, u128_type) =
if supports_128bit_integers {
let i128_type = create_type(CType::Int128t, tcx.types.i128);
let u128_type = create_type(CType::UInt128t, tcx.types.u128);
(i128_type, u128_type)
}
else {
/*let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.i128)).unwrap();
let i128_align = layout.align.abi.bytes();
let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.u128)).unwrap();
let u128_align = layout.align.abi.bytes();*/
let (i128_type, u128_type) = if supports_128bit_integers {
let i128_type = create_type(CType::Int128t, tcx.types.i128);
let u128_type = create_type(CType::UInt128t, tcx.types.u128);
(i128_type, u128_type)
} else {
/*let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.i128)).unwrap();
let i128_align = layout.align.abi.bytes();
let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.u128)).unwrap();
let u128_align = layout.align.abi.bytes();*/
// TODO(antoyo): re-enable the alignment when libgccjit fixed the issue in
// gcc_jit_context_new_array_constructor (it should not use reinterpret_cast).
let i128_type = context.new_array_type(None, i64_type, 2)/*.get_aligned(i128_align)*/;
let u128_type = context.new_array_type(None, u64_type, 2)/*.get_aligned(u128_align)*/;
(i128_type, u128_type)
};
// TODO(antoyo): re-enable the alignment when libgccjit fixed the issue in
// gcc_jit_context_new_array_constructor (it should not use reinterpret_cast).
let i128_type = context.new_array_type(None, i64_type, 2)/*.get_aligned(i128_align)*/;
let u128_type = context.new_array_type(None, u64_type, 2)/*.get_aligned(u128_align)*/;
(i128_type, u128_type)
};
let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
@ -196,16 +202,65 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let mut functions = FxHashMap::default();
let builtins = [
"__builtin_unreachable", "abort", "__builtin_expect", /*"__builtin_expect_with_probability",*/
"__builtin_constant_p", "__builtin_add_overflow", "__builtin_mul_overflow", "__builtin_saddll_overflow",
/*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
"__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow",
"__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow",
"__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos",
"powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf",
"fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf",
"ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round",
"__builtin_unreachable",
"abort",
"__builtin_expect", /*"__builtin_expect_with_probability",*/
"__builtin_constant_p",
"__builtin_add_overflow",
"__builtin_mul_overflow",
"__builtin_saddll_overflow",
/*"__builtin_sadd_overflow",*/
"__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
"__builtin_ssubll_overflow",
/*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow",
"__builtin_uaddll_overflow",
"__builtin_uadd_overflow",
"__builtin_umulll_overflow",
"__builtin_umul_overflow",
"__builtin_usubll_overflow",
"__builtin_usub_overflow",
"sqrtf",
"sqrt",
"__builtin_powif",
"__builtin_powi",
"sinf",
"sin",
"cosf",
"cos",
"powf",
"pow",
"expf",
"exp",
"exp2f",
"exp2",
"logf",
"log",
"log10f",
"log10",
"log2f",
"log2",
"fmaf",
"fma",
"fabsf",
"fabs",
"fminf",
"fmin",
"fmaxf",
"fmax",
"copysignf",
"copysign",
"floorf",
"floor",
"ceilf",
"ceil",
"truncf",
"trunc",
"rintf",
"rint",
"nearbyintf",
"nearbyint",
"roundf",
"round",
];
for builtin in builtins.iter() {
@ -282,8 +337,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> {
let function: Function<'gcc> = unsafe { std::mem::transmute(value) };
debug_assert!(self.functions.borrow().values().any(|value| *value == function),
"{:?} ({:?}) is not a function", value, value.get_type());
debug_assert!(
self.functions.borrow().values().any(|value| *value == function),
"{:?} ({:?}) is not a function",
value,
value.get_type()
);
function
}
@ -305,13 +364,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
}
self.supports_128bit_integers &&
(self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
self.supports_128bit_integers
&& (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
}
pub fn is_non_native_int_type(&self, typ: Type<'gcc>) -> bool {
!self.supports_128bit_integers &&
(self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
!self.supports_128bit_integers
&& (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
}
pub fn is_native_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
@ -319,18 +378,23 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
pub fn is_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
self.is_native_int_type(typ) || self.is_non_native_int_type(typ) || typ.is_compatible_with(self.bool_type)
self.is_native_int_type(typ)
|| self.is_non_native_int_type(typ)
|| typ.is_compatible_with(self.bool_type)
}
pub fn sess(&self) -> &'tcx Session {
&self.tcx.sess
}
pub fn bitcast_if_needed(&self, value: RValue<'gcc>, expected_type: Type<'gcc>) -> RValue<'gcc> {
pub fn bitcast_if_needed(
&self,
value: RValue<'gcc>,
expected_type: Type<'gcc>,
) -> RValue<'gcc> {
if value.get_type() != expected_type {
self.context.new_bitcast(None, value, expected_type)
}
else {
} else {
value
}
}
@ -345,12 +409,14 @@ impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> {
type Funclet = (); // TODO(antoyo)
type DIScope = (); // TODO(antoyo)
type DILocation = (); // TODO(antoyo)
type DILocation = Location<'gcc>;
type DIVariable = (); // TODO(antoyo)
}
impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>> {
fn vtables(
&self,
) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>> {
&self.vtables
}
@ -364,13 +430,11 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
let func_name = self.tcx.symbol_name(instance).name;
let func =
if self.intrinsics.borrow().contains_key(func_name) {
self.intrinsics.borrow()[func_name].clone()
}
else {
get_fn(self, instance)
};
let func = if self.intrinsics.borrow().contains_key(func_name) {
self.intrinsics.borrow()[func_name].clone()
} else {
get_fn(self, instance)
};
let ptr = func.get_address(None);
// TODO(antoyo): don't do this twice: i.e. in declare_fn and here.
@ -407,37 +471,32 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
return llpersonality;
}
let tcx = self.tcx;
let func =
match tcx.lang_items().eh_personality() {
Some(def_id) if !wants_msvc_seh(self.sess()) => {
let instance =
ty::Instance::resolve(
tcx,
ty::ParamEnv::reveal_all(),
def_id,
ty::List::empty(),
)
.unwrap().unwrap();
let func = match tcx.lang_items().eh_personality() {
Some(def_id) if !wants_msvc_seh(self.sess()) => {
let instance = ty::Instance::expect_resolve(
tcx,
ty::ParamEnv::reveal_all(),
def_id,
ty::List::empty(),
);
let symbol_name = tcx.symbol_name(instance).name;
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
self.linkage.set(FunctionType::Extern);
let func = self.declare_fn(symbol_name, &fn_abi);
let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
func
},
_ => {
let name =
if wants_msvc_seh(self.sess()) {
"__CxxFrameHandler3"
}
else {
"rust_eh_personality"
};
let func = self.declare_func(name, self.type_i32(), &[], true);
unsafe { std::mem::transmute(func) }
}
};
let symbol_name = tcx.symbol_name(instance).name;
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
self.linkage.set(FunctionType::Extern);
let func = self.declare_fn(symbol_name, &fn_abi);
let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
func
}
_ => {
let name = if wants_msvc_seh(self.sess()) {
"__CxxFrameHandler3"
} else {
"rust_eh_personality"
};
let func = self.declare_func(name, self.type_i32(), &[], true);
unsafe { std::mem::transmute(func) }
}
};
// TODO(antoyo): apply target cpu attributes.
self.eh_personality.set(Some(func));
func
@ -467,8 +526,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let entry_name = self.sess().target.entry_name.as_ref();
if self.get_declared_value(entry_name).is_none() {
Some(self.declare_entry_fn(entry_name, fn_type, ()))
}
else {
} else {
// If the symbol already exists, it is an error: for example, the user wrote
// #[no_mangle] extern "C" fn main(..) {..}
// instead of #[start]

View File

@ -1,9 +1,14 @@
use gccjit::RValue;
use rustc_codegen_ssa::mir::debuginfo::{FunctionDebugContext, VariableKind};
use crate::rustc_index::Idx;
use gccjit::{Location, RValue};
use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
use rustc_codegen_ssa::traits::{DebugInfoBuilderMethods, DebugInfoMethods};
use rustc_middle::mir;
use rustc_data_structures::sync::Lrc;
use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec;
use rustc_middle::mir::{self, Body, SourceScope};
use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty};
use rustc_span::{SourceFile, Span, Symbol};
use rustc_session::config::DebugInfo;
use rustc_span::{BytePos, Pos, SourceFile, SourceFileAndLine, Span, Symbol};
use rustc_target::abi::call::FnAbi;
use rustc_target::abi::Size;
use std::ops::Range;
@ -11,31 +16,183 @@ use std::ops::Range;
use crate::builder::Builder;
use crate::context::CodegenCx;
pub(super) const UNKNOWN_LINE_NUMBER: u32 = 0;
pub(super) const UNKNOWN_COLUMN_NUMBER: u32 = 0;
impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
// FIXME(eddyb) find a common convention for all of the debuginfo-related
// names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
fn dbg_var_addr(
&mut self,
_dbg_var: Self::DIVariable,
_scope_metadata: Self::DIScope,
_dbg_loc: Self::DILocation,
_variable_alloca: Self::Value,
_direct_offset: Size,
_indirect_offsets: &[Size],
_fragment: Option<Range<Size>>,
) {
unimplemented!();
// FIXME(tempdragon): Not sure if this is correct, probably wrong but still keep it here.
#[cfg(feature = "master")]
_variable_alloca.set_location(_dbg_loc);
}
fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
// TODO(antoyo): insert reference to gdb debug scripts section global.
}
fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {
unimplemented!();
/// FIXME(tempdragon): Currently, this function is not yet implemented. It seems that the
/// debug name and the mangled name should both be included in the LValues.
/// Besides, a function to get the rvalue type(m_is_lvalue) should also be included.
fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {}
fn set_dbg_loc(&mut self, dbg_loc: Self::DILocation) {
self.location = Some(dbg_loc);
}
}
/// Generate the `debug_context` in an MIR Body.
/// # Souce of Origin
/// Copied from `create_scope_map.rs` of rustc_codegen_llvm
fn compute_mir_scopes<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
instance: Instance<'tcx>,
mir: &Body<'tcx>,
debug_context: &mut FunctionDebugContext<'tcx, (), Location<'gcc>>,
) {
// Find all scopes with variables defined in them.
let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
let mut vars = BitSet::new_empty(mir.source_scopes.len());
// FIXME(eddyb) take into account that arguments always have debuginfo,
// irrespective of their name (assuming full debuginfo is enabled).
// NOTE(eddyb) actually, on second thought, those are always in the
// function scope, which always exists.
for var_debug_info in &mir.var_debug_info {
vars.insert(var_debug_info.source_info.scope);
}
Some(vars)
} else {
// Nothing to emit, of course.
None
};
let mut instantiated = BitSet::new_empty(mir.source_scopes.len());
// Instantiate all scopes.
for idx in 0..mir.source_scopes.len() {
let scope = SourceScope::new(idx);
make_mir_scope(cx, instance, mir, &variables, debug_context, &mut instantiated, scope);
}
assert!(instantiated.count() == mir.source_scopes.len());
}
/// Update the `debug_context`, adding new scope to it,
/// if it's not added as is denoted in `instantiated`.
///
/// # Souce of Origin
/// Copied from `create_scope_map.rs` of rustc_codegen_llvm
/// FIXME(tempdragon/?): Add Scope Support Here.
fn make_mir_scope<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
instance: Instance<'tcx>,
mir: &Body<'tcx>,
variables: &Option<BitSet<SourceScope>>,
debug_context: &mut FunctionDebugContext<'tcx, (), Location<'gcc>>,
instantiated: &mut BitSet<SourceScope>,
scope: SourceScope,
) {
if instantiated.contains(scope) {
return;
}
fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) {
unimplemented!();
let scope_data = &mir.source_scopes[scope];
let parent_scope = if let Some(parent) = scope_data.parent_scope {
make_mir_scope(cx, instance, mir, variables, debug_context, instantiated, parent);
debug_context.scopes[parent]
} else {
// The root is the function itself.
let file = cx.sess().source_map().lookup_source_file(mir.span.lo());
debug_context.scopes[scope] = DebugScope {
file_start_pos: file.start_pos,
file_end_pos: file.end_position(),
..debug_context.scopes[scope]
};
instantiated.insert(scope);
return;
};
if let Some(vars) = variables {
if !vars.contains(scope) && scope_data.inlined.is_none() {
// Do not create a DIScope if there are no variables defined in this
// MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
debug_context.scopes[scope] = parent_scope;
instantiated.insert(scope);
return;
}
}
let loc = cx.lookup_debug_loc(scope_data.span.lo());
// FIXME(tempdragon): Add the scope related code here if the scope is supported.
let dbg_scope = ();
let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
// FIXME(eddyb) this doesn't account for the macro-related
// `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does.
let callsite_scope = parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
cx.dbg_loc(callsite_scope, parent_scope.inlined_at, callsite_span)
});
let p_inlined_at = parent_scope.inlined_at;
// TODO(tempdragon): dbg_scope: Add support for scope extension here.
inlined_at.or(p_inlined_at);
debug_context.scopes[scope] = DebugScope {
dbg_scope,
inlined_at,
file_start_pos: loc.file.start_pos,
file_end_pos: loc.file.end_position(),
};
instantiated.insert(scope);
}
/// A source code location used to generate debug information.
// FIXME(eddyb) rename this to better indicate it's a duplicate of
// `rustc_span::Loc` rather than `DILocation`, perhaps by making
// `lookup_char_pos` return the right information instead.
pub struct DebugLoc {
/// Information about the original source file.
pub file: Lrc<SourceFile>,
/// The (1-based) line number.
pub line: u32,
/// The (1-based) column number.
pub col: u32,
}
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
/// Looks up debug source information about a `BytePos`.
// FIXME(eddyb) rename this to better indicate it's a duplicate of
// `lookup_char_pos` rather than `dbg_loc`, perhaps by making
// `lookup_char_pos` return the right information instead.
// Source of Origin: cg_llvm
pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
Ok(SourceFileAndLine { sf: file, line }) => {
let line_pos = file.lines()[line];
// Use 1-based indexing.
let line = (line + 1) as u32;
let col = (file.relative_position(pos) - line_pos).to_u32() + 1;
(file, line, col)
}
Err(file) => (file, UNKNOWN_LINE_NUMBER, UNKNOWN_COLUMN_NUMBER),
};
// For MSVC, omit the column number.
// Otherwise, emit it. This mimics clang behaviour.
// See discussion in https://github.com/rust-lang/rust/issues/42921
if self.sess().target.is_like_msvc {
DebugLoc { file, line, col: UNKNOWN_COLUMN_NUMBER }
} else {
DebugLoc { file, line, col }
}
}
}
@ -51,13 +208,31 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn create_function_debug_context(
&self,
_instance: Instance<'tcx>,
_fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
_llfn: RValue<'gcc>,
_mir: &mir::Body<'tcx>,
instance: Instance<'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
llfn: RValue<'gcc>,
mir: &mir::Body<'tcx>,
) -> Option<FunctionDebugContext<'tcx, Self::DIScope, Self::DILocation>> {
// TODO(antoyo)
None
if self.sess().opts.debuginfo == DebugInfo::None {
return None;
}
// Initialize fn debug context (including scopes).
let empty_scope = DebugScope {
dbg_scope: self.dbg_scope_fn(instance, fn_abi, Some(llfn)),
inlined_at: None,
file_start_pos: BytePos(0),
file_end_pos: BytePos(0),
};
let mut fn_debug_context = FunctionDebugContext {
scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes.as_slice()),
inlined_function_scopes: Default::default(),
};
// Fill in all the scopes, with the information from the MIR body.
compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
Some(fn_debug_context)
}
fn extend_scope_to_file(
@ -65,11 +240,11 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
_scope_metadata: Self::DIScope,
_file: &SourceFile,
) -> Self::DIScope {
unimplemented!();
// TODO(antoyo): implement.
}
fn debuginfo_finalize(&self) {
// TODO(antoyo)
self.context.set_debug_info(true)
}
fn create_dbg_var(
@ -80,7 +255,6 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
_variable_kind: VariableKind,
_span: Span,
) -> Self::DIVariable {
unimplemented!();
}
fn dbg_scope_fn(
@ -89,15 +263,40 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
_fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
_maybe_definition_llfn: Option<RValue<'gcc>>,
) -> Self::DIScope {
unimplemented!();
// TODO(antoyo): implement.
}
fn dbg_loc(
&self,
_scope: Self::DIScope,
_inlined_at: Option<Self::DILocation>,
_span: Span,
span: Span,
) -> Self::DILocation {
unimplemented!();
let pos = span.lo();
let DebugLoc { file, line, col } = self.lookup_debug_loc(pos);
let loc = match &file.name {
rustc_span::FileName::Real(name) => match name {
rustc_span::RealFileName::LocalPath(name) => {
if let Some(name) = name.to_str() {
self.context.new_location(name, line as i32, col as i32)
} else {
Location::null()
}
}
rustc_span::RealFileName::Remapped { local_path, virtual_name: _ } => {
if let Some(name) = local_path.as_ref() {
if let Some(name) = name.to_str() {
self.context.new_location(name, line as i32, col as i32)
} else {
Location::null()
}
} else {
Location::null()
}
}
},
_ => Location::null(),
};
loc
}
}

View File

@ -1,6 +1,6 @@
use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type};
#[cfg(feature="master")]
#[cfg(feature = "master")]
use gccjit::{FnAttribute, ToRValue};
use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type};
use rustc_codegen_ssa::traits::BaseTypeMethods;
use rustc_middle::ty::Ty;
use rustc_span::Symbol;
@ -11,7 +11,13 @@ use crate::context::CodegenCx;
use crate::intrinsic::llvm;
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
pub fn get_or_insert_global(
&self,
name: &str,
ty: Type<'gcc>,
is_tls: bool,
link_section: Option<Symbol>,
) -> LValue<'gcc> {
if self.globals.borrow().contains_key(name) {
let typ = self.globals.borrow()[name].get_type();
let global = self.context.new_global(None, GlobalKind::Imported, typ, name);
@ -22,8 +28,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
global.set_link_section(link_section.as_str());
}
global
}
else {
} else {
self.declare_global(name, ty, GlobalKind::Exported, is_tls, link_section)
}
}
@ -33,19 +38,37 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
self.context.new_global(None, GlobalKind::Internal, ty, &name)
}
pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> LValue<'gcc> {
pub fn declare_global_with_linkage(
&self,
name: &str,
ty: Type<'gcc>,
linkage: GlobalKind,
) -> LValue<'gcc> {
let global = self.context.new_global(None, linkage, ty, name);
let global_address = global.get_address(None);
self.globals.borrow_mut().insert(name.to_string(), global_address);
global
}
pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> Function<'gcc> {
pub fn declare_func(
&self,
name: &str,
return_type: Type<'gcc>,
params: &[Type<'gcc>],
variadic: bool,
) -> Function<'gcc> {
self.linkage.set(FunctionType::Extern);
declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic)
}
pub fn declare_global(&self, name: &str, ty: Type<'gcc>, global_kind: GlobalKind, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
pub fn declare_global(
&self,
name: &str,
ty: Type<'gcc>,
global_kind: GlobalKind,
is_tls: bool,
link_section: Option<Symbol>,
) -> LValue<'gcc> {
let global = self.context.new_global(None, global_kind, ty, name);
if is_tls {
global.set_tls_model(self.tls_model);
@ -65,13 +88,25 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
global
}
pub fn declare_entry_fn(&self, name: &str, _fn_type: Type<'gcc>, callconv: () /*llvm::CCallConv*/) -> RValue<'gcc> {
pub fn declare_entry_fn(
&self,
name: &str,
_fn_type: Type<'gcc>,
callconv: (), /*llvm::CCallConv*/
) -> RValue<'gcc> {
// TODO(antoyo): use the fn_type parameter.
let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
let return_type = self.type_i32();
let variadic = false;
self.linkage.set(FunctionType::Exported);
let func = declare_raw_fn(self, name, callconv, return_type, &[self.type_i32(), const_string], variadic);
let func = declare_raw_fn(
self,
name,
callconv,
return_type,
&[self.type_i32(), const_string],
variadic,
);
// NOTE: it is needed to set the current_func here as well, because get_fn() is not called
// for the main function.
*self.current_func.borrow_mut() = Some(func);
@ -85,19 +120,32 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
arguments_type,
is_c_variadic,
on_stack_param_indices,
#[cfg(feature="master")]
#[cfg(feature = "master")]
fn_attributes,
} = fn_abi.gcc_type(self);
let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, &arguments_type, is_c_variadic);
let func = declare_raw_fn(
self,
name,
(), /*fn_abi.llvm_cconv()*/
return_type,
&arguments_type,
is_c_variadic,
);
self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices);
#[cfg(feature="master")]
#[cfg(feature = "master")]
for fn_attr in fn_attributes {
func.add_attribute(fn_attr);
}
func
}
pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
pub fn define_global(
&self,
name: &str,
ty: Type<'gcc>,
is_tls: bool,
link_section: Option<Symbol>,
) -> LValue<'gcc> {
self.get_or_insert_global(name, ty, is_tls, link_section)
}
@ -111,62 +159,84 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
///
/// If theres a value with the same name already declared, the function will
/// update the declaration and return existing Value instead.
fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> {
fn declare_raw_fn<'gcc>(
cx: &CodegenCx<'gcc, '_>,
name: &str,
_callconv: (), /*llvm::CallConv*/
return_type: Type<'gcc>,
param_types: &[Type<'gcc>],
variadic: bool,
) -> Function<'gcc> {
if name.starts_with("llvm.") {
let intrinsic = llvm::intrinsic(name, cx);
cx.intrinsics.borrow_mut().insert(name.to_string(), intrinsic);
return intrinsic;
}
let func =
if cx.functions.borrow().contains_key(name) {
cx.functions.borrow()[name]
}
else {
let params: Vec<_> = param_types.into_iter().enumerate()
.map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name.
let func = if cx.functions.borrow().contains_key(name) {
cx.functions.borrow()[name]
} else {
let params: Vec<_> = param_types
.into_iter()
.enumerate()
.map(|(index, param)| {
cx.context.new_parameter(None, *param, &format!("param{}", index))
}) // TODO(antoyo): set name.
.collect();
#[cfg(not(feature = "master"))]
let name = mangle_name(name);
let func =
cx.context.new_function(None, cx.linkage.get(), return_type, &params, &name, variadic);
cx.functions.borrow_mut().insert(name.to_string(), func);
#[cfg(feature = "master")]
if name == "rust_eh_personality" {
// NOTE: GCC will sometimes change the personality function set on a function from
// rust_eh_personality to __gcc_personality_v0 as an optimization.
// As such, we need to create a weak alias from __gcc_personality_v0 to
// rust_eh_personality in order to avoid a linker error.
// This needs to be weak in order to still allow using the standard
// __gcc_personality_v0 when the linking to it.
// Since aliases don't work (maybe because of a bug in LTO partitioning?), we
// create a wrapper function that calls rust_eh_personality.
let params: Vec<_> = param_types
.into_iter()
.enumerate()
.map(|(index, param)| {
cx.context.new_parameter(None, *param, &format!("param{}", index))
}) // TODO(antoyo): set name.
.collect();
let func = cx.context.new_function(None, cx.linkage.get(), return_type, &params, mangle_name(name), variadic);
cx.functions.borrow_mut().insert(name.to_string(), func);
let gcc_func = cx.context.new_function(
None,
FunctionType::Exported,
return_type,
&params,
"__gcc_personality_v0",
variadic,
);
#[cfg(feature="master")]
if name == "rust_eh_personality" {
// NOTE: GCC will sometimes change the personality function set on a function from
// rust_eh_personality to __gcc_personality_v0 as an optimization.
// As such, we need to create a weak alias from __gcc_personality_v0 to
// rust_eh_personality in order to avoid a linker error.
// This needs to be weak in order to still allow using the standard
// __gcc_personality_v0 when the linking to it.
// Since aliases don't work (maybe because of a bug in LTO partitioning?), we
// create a wrapper function that calls rust_eh_personality.
// We need a normal extern function for the crates that access rust_eh_personality
// without defining it, otherwise we'll get a compiler error.
//
// For the crate defining it, that needs to be a weak alias instead.
gcc_func.add_attribute(FnAttribute::Weak);
let params: Vec<_> = param_types.into_iter().enumerate()
.map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name.
.collect();
let gcc_func = cx.context.new_function(None, FunctionType::Exported, return_type, &params, "__gcc_personality_v0", variadic);
// We need a normal extern function for the crates that access rust_eh_personality
// without defining it, otherwise we'll get a compiler error.
//
// For the crate defining it, that needs to be a weak alias instead.
gcc_func.add_attribute(FnAttribute::Weak);
let block = gcc_func.new_block("start");
let mut args = vec![];
for param in &params {
args.push(param.to_rvalue());
}
let call = cx.context.new_call(None, func, &args);
if return_type == cx.type_void() {
block.add_eval(None, call);
block.end_with_void_return(None);
}
else {
block.end_with_return(None, call);
}
let block = gcc_func.new_block("start");
let mut args = vec![];
for param in &params {
args.push(param.to_rvalue());
}
let call = cx.context.new_call(None, func, &args);
if return_type == cx.type_void() {
block.add_eval(None, call);
block.end_with_void_return(None);
} else {
block.end_with_return(None, call);
}
}
func
};
func
};
// TODO(antoyo): set function calling convention.
// TODO(antoyo): set unnamed address.
@ -179,15 +249,24 @@ fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*ll
}
// FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _.
// Unsupported characters: `$` and `.`.
pub fn mangle_name(name: &str) -> String {
name.replace(|char: char| {
if !char.is_alphanumeric() && char != '_' {
debug_assert!("$.*".contains(char), "Unsupported char in function name {}: {}", name, char);
true
}
else {
false
}
}, "_")
// Unsupported characters: `$`, `.` and `*`.
// FIXME(antoyo): `*` might not be expected: https://github.com/rust-lang/rust/issues/116979#issuecomment-1840926865
#[cfg(not(feature = "master"))]
fn mangle_name(name: &str) -> String {
name.replace(
|char: char| {
if !char.is_alphanumeric() && char != '_' {
debug_assert!(
"$.*".contains(char),
"Unsupported char in function name {}: {}",
name,
char
);
true
} else {
false
}
},
"_",
)
}

View File

@ -1,9 +1,6 @@
use rustc_errors::{
DiagCtxt, DiagArgValue, Diag, EmissionGuarantee, IntoDiagnostic, IntoDiagnosticArg, Level,
};
use rustc_errors::{Diag, DiagCtxt, Diagnostic, EmissionGuarantee, Level};
use rustc_macros::{Diagnostic, Subdiagnostic};
use rustc_span::Span;
use std::borrow::Cow;
use crate::fluent_generated as fluent;
@ -31,18 +28,6 @@ pub(crate) enum PossibleFeature<'a> {
None,
}
struct ExitCode(Option<i32>);
impl IntoDiagnosticArg for ExitCode {
fn into_diagnostic_arg(self) -> DiagArgValue {
let ExitCode(exit_code) = self;
match exit_code {
Some(t) => t.into_diagnostic_arg(),
None => DiagArgValue::Str(Cow::Borrowed("<signal>")),
}
}
}
#[derive(Diagnostic)]
#[diag(codegen_gcc_lto_not_supported)]
pub(crate) struct LTONotSupported;
@ -80,12 +65,6 @@ pub(crate) struct CopyBitcode {
#[note]
pub(crate) struct DynamicLinkingWithLTO;
#[derive(Diagnostic)]
#[diag(codegen_gcc_load_bitcode)]
pub(crate) struct LoadBitcode {
name: String,
}
#[derive(Diagnostic)]
#[diag(codegen_gcc_lto_disallowed)]
pub(crate) struct LtoDisallowed;
@ -110,8 +89,8 @@ pub(crate) struct TargetFeatureDisableOrEnable<'a> {
#[help(codegen_gcc_missing_features)]
pub(crate) struct MissingFeatures;
impl<G: EmissionGuarantee> IntoDiagnostic<'_, G> for TargetFeatureDisableOrEnable<'_> {
fn into_diagnostic(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> {
impl<G: EmissionGuarantee> Diagnostic<'_, G> for TargetFeatureDisableOrEnable<'_> {
fn into_diag(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> {
let mut diag = Diag::new(dcx, level, fluent::codegen_gcc_target_feature_disable_or_enable);
if let Some(span) = self.span {
diag.span(span);

View File

@ -1,4 +1,4 @@
#[cfg(feature="master")]
#[cfg(feature = "master")]
use gccjit::Context;
use smallvec::{smallvec, SmallVec};
@ -7,7 +7,10 @@ use rustc_middle::bug;
use rustc_session::Session;
use rustc_target::target_features::RUSTC_SPECIFIC_FEATURES;
use crate::errors::{PossibleFeature, TargetFeatureDisableOrEnable, UnknownCTargetFeature, UnknownCTargetFeaturePrefix};
use crate::errors::{
PossibleFeature, TargetFeatureDisableOrEnable, UnknownCTargetFeature,
UnknownCTargetFeaturePrefix,
};
/// The list of GCC features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
/// `--target` and similar).
@ -44,7 +47,10 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri
// -Ctarget-features
let supported_features = sess.target.supported_target_features();
let mut featsmap = FxHashMap::default();
let feats = sess.opts.cg.target_feature
let feats = sess
.opts
.cg
.target_feature
.split(',')
.filter_map(|s| {
let enable_disable = match s.chars().next() {
@ -69,16 +75,14 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri
None
}
});
let unknown_feature =
if let Some(rust_feature) = rust_feature {
UnknownCTargetFeature {
feature,
rust_feature: PossibleFeature::Some { rust_feature },
}
let unknown_feature = if let Some(rust_feature) = rust_feature {
UnknownCTargetFeature {
feature,
rust_feature: PossibleFeature::Some { rust_feature },
}
else {
UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None }
};
} else {
UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None }
};
sess.dcx().emit_warn(unknown_feature);
}
@ -95,18 +99,18 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri
// passing requests down to GCC. This means that all in-language
// features also work on the command line instead of having two
// different names when the GCC name and the Rust name differ.
Some(to_gcc_features(sess, feature)
.iter()
.flat_map(|feat| to_gcc_features(sess, feat).into_iter())
.map(|feature| {
if enable_disable == '-' {
format!("-{}", feature)
}
else {
feature.to_string()
}
})
.collect::<Vec<_>>(),
Some(
to_gcc_features(sess, feature)
.iter()
.flat_map(|feat| to_gcc_features(sess, feat).into_iter())
.map(|feature| {
if enable_disable == '-' {
format!("-{}", feature)
} else {
feature.to_string()
}
})
.collect::<Vec<_>>(),
)
})
.flatten();
@ -184,7 +188,10 @@ pub fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]>
// Given a map from target_features to whether they are enabled or disabled,
// ensure only valid combinations are allowed.
pub fn check_tied_features(sess: &Session, features: &FxHashMap<&str, bool>) -> Option<&'static [&'static str]> {
pub fn check_tied_features(
sess: &Session,
features: &FxHashMap<&str, bool>,
) -> Option<&'static [&'static str]> {
for tied in sess.target.tied_target_features() {
// Tied features must be set to the same value, or not set at all
let mut tied_iter = tied.iter();
@ -199,7 +206,7 @@ pub fn check_tied_features(sess: &Session, features: &FxHashMap<&str, bool>) ->
fn arch_to_gcc(name: &str) -> &str {
match name {
"M68020" => "68020",
_ => name,
_ => name,
}
}
@ -208,15 +215,13 @@ fn handle_native(name: &str) -> &str {
return arch_to_gcc(name);
}
#[cfg(feature="master")]
#[cfg(feature = "master")]
{
// Get the native arch.
let context = Context::default();
context.get_target_info().arch().unwrap()
.to_str()
.unwrap()
context.get_target_info().arch().unwrap().to_str().unwrap()
}
#[cfg(not(feature="master"))]
#[cfg(not(feature = "master"))]
unimplemented!();
}

File diff suppressed because it is too large Load Diff

View File

@ -151,8 +151,10 @@ match name {
"llvm.amdgcn.msad.u8" => "__builtin_amdgcn_msad_u8",
"llvm.amdgcn.perm" => "__builtin_amdgcn_perm",
"llvm.amdgcn.permlane16" => "__builtin_amdgcn_permlane16",
"llvm.amdgcn.permlane16.var" => "__builtin_amdgcn_permlane16_var",
"llvm.amdgcn.permlane64" => "__builtin_amdgcn_permlane64",
"llvm.amdgcn.permlanex16" => "__builtin_amdgcn_permlanex16",
"llvm.amdgcn.permlanex16.var" => "__builtin_amdgcn_permlanex16_var",
"llvm.amdgcn.qsad.pk.u16.u8" => "__builtin_amdgcn_qsad_pk_u16_u8",
"llvm.amdgcn.queue.ptr" => "__builtin_amdgcn_queue_ptr",
"llvm.amdgcn.rcp.legacy" => "__builtin_amdgcn_rcp_legacy",
@ -160,11 +162,20 @@ match name {
"llvm.amdgcn.readlane" => "__builtin_amdgcn_readlane",
"llvm.amdgcn.rsq.legacy" => "__builtin_amdgcn_rsq_legacy",
"llvm.amdgcn.s.barrier" => "__builtin_amdgcn_s_barrier",
"llvm.amdgcn.s.barrier.init" => "__builtin_amdgcn_s_barrier_init",
"llvm.amdgcn.s.barrier.join" => "__builtin_amdgcn_s_barrier_join",
"llvm.amdgcn.s.barrier.leave" => "__builtin_amdgcn_s_barrier_leave",
"llvm.amdgcn.s.barrier.signal" => "__builtin_amdgcn_s_barrier_signal",
"llvm.amdgcn.s.barrier.signal.isfirst" => "__builtin_amdgcn_s_barrier_signal_isfirst",
"llvm.amdgcn.s.barrier.signal.isfirst.var" => "__builtin_amdgcn_s_barrier_signal_isfirst_var",
"llvm.amdgcn.s.barrier.signal.var" => "__builtin_amdgcn_s_barrier_signal_var",
"llvm.amdgcn.s.barrier.wait" => "__builtin_amdgcn_s_barrier_wait",
"llvm.amdgcn.s.dcache.inv" => "__builtin_amdgcn_s_dcache_inv",
"llvm.amdgcn.s.dcache.inv.vol" => "__builtin_amdgcn_s_dcache_inv_vol",
"llvm.amdgcn.s.dcache.wb" => "__builtin_amdgcn_s_dcache_wb",
"llvm.amdgcn.s.dcache.wb.vol" => "__builtin_amdgcn_s_dcache_wb_vol",
"llvm.amdgcn.s.decperflevel" => "__builtin_amdgcn_s_decperflevel",
"llvm.amdgcn.s.get.barrier.state" => "__builtin_amdgcn_s_get_barrier_state",
"llvm.amdgcn.s.get.waveid.in.workgroup" => "__builtin_amdgcn_s_get_waveid_in_workgroup",
"llvm.amdgcn.s.getpc" => "__builtin_amdgcn_s_getpc",
"llvm.amdgcn.s.getreg" => "__builtin_amdgcn_s_getreg",
@ -176,8 +187,10 @@ match name {
"llvm.amdgcn.s.setprio" => "__builtin_amdgcn_s_setprio",
"llvm.amdgcn.s.setreg" => "__builtin_amdgcn_s_setreg",
"llvm.amdgcn.s.sleep" => "__builtin_amdgcn_s_sleep",
"llvm.amdgcn.s.sleep.var" => "__builtin_amdgcn_s_sleep_var",
"llvm.amdgcn.s.wait.event.export.ready" => "__builtin_amdgcn_s_wait_event_export_ready",
"llvm.amdgcn.s.waitcnt" => "__builtin_amdgcn_s_waitcnt",
"llvm.amdgcn.s.wakeup.barrier" => "__builtin_amdgcn_s_wakeup_barrier",
"llvm.amdgcn.sad.hi.u8" => "__builtin_amdgcn_sad_hi_u8",
"llvm.amdgcn.sad.u16" => "__builtin_amdgcn_sad_u16",
"llvm.amdgcn.sad.u8" => "__builtin_amdgcn_sad_u8",
@ -314,6 +327,8 @@ match name {
// bpf
"llvm.bpf.btf.type.id" => "__builtin_bpf_btf_type_id",
"llvm.bpf.compare" => "__builtin_bpf_compare",
"llvm.bpf.getelementptr.and.load" => "__builtin_bpf_getelementptr_and_load",
"llvm.bpf.getelementptr.and.store" => "__builtin_bpf_getelementptr_and_store",
"llvm.bpf.load.byte" => "__builtin_bpf_load_byte",
"llvm.bpf.load.half" => "__builtin_bpf_load_half",
"llvm.bpf.load.word" => "__builtin_bpf_load_word",
@ -5776,14 +5791,6 @@ match name {
"llvm.s390.verimf" => "__builtin_s390_verimf",
"llvm.s390.verimg" => "__builtin_s390_verimg",
"llvm.s390.verimh" => "__builtin_s390_verimh",
"llvm.s390.verllb" => "__builtin_s390_verllb",
"llvm.s390.verllf" => "__builtin_s390_verllf",
"llvm.s390.verllg" => "__builtin_s390_verllg",
"llvm.s390.verllh" => "__builtin_s390_verllh",
"llvm.s390.verllvb" => "__builtin_s390_verllvb",
"llvm.s390.verllvf" => "__builtin_s390_verllvf",
"llvm.s390.verllvg" => "__builtin_s390_verllvg",
"llvm.s390.verllvh" => "__builtin_s390_verllvh",
"llvm.s390.vfaeb" => "__builtin_s390_vfaeb",
"llvm.s390.vfaef" => "__builtin_s390_vfaef",
"llvm.s390.vfaeh" => "__builtin_s390_vfaeh",
@ -5815,7 +5822,7 @@ match name {
"llvm.s390.vistrh" => "__builtin_s390_vistrh",
"llvm.s390.vlbb" => "__builtin_s390_vlbb",
"llvm.s390.vll" => "__builtin_s390_vll",
"llvm.s390.vlrl" => "__builtin_s390_vlrl",
"llvm.s390.vlrl" => "__builtin_s390_vlrlr",
"llvm.s390.vmaeb" => "__builtin_s390_vmaeb",
"llvm.s390.vmaef" => "__builtin_s390_vmaef",
"llvm.s390.vmaeh" => "__builtin_s390_vmaeh",
@ -5885,7 +5892,7 @@ match name {
"llvm.s390.vstrczb" => "__builtin_s390_vstrczb",
"llvm.s390.vstrczf" => "__builtin_s390_vstrczf",
"llvm.s390.vstrczh" => "__builtin_s390_vstrczh",
"llvm.s390.vstrl" => "__builtin_s390_vstrl",
"llvm.s390.vstrl" => "__builtin_s390_vstrlr",
"llvm.s390.vsumb" => "__builtin_s390_vsumb",
"llvm.s390.vsumgf" => "__builtin_s390_vsumgf",
"llvm.s390.vsumgh" => "__builtin_s390_vsumgh",

View File

@ -3,94 +3,185 @@ use std::borrow::Cow;
use gccjit::{Function, FunctionPtrType, RValue, ToRValue, UnaryOp};
use rustc_codegen_ssa::traits::BuilderMethods;
use crate::{context::CodegenCx, builder::Builder};
use crate::{builder::Builder, context::CodegenCx};
pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, gcc_func: FunctionPtrType<'gcc>, mut args: Cow<'b, [RValue<'gcc>]>, func_name: &str, original_function_name: Option<&String>) -> Cow<'b, [RValue<'gcc>]> {
pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
builder: &Builder<'a, 'gcc, 'tcx>,
gcc_func: FunctionPtrType<'gcc>,
mut args: Cow<'b, [RValue<'gcc>]>,
func_name: &str,
original_function_name: Option<&String>,
) -> Cow<'b, [RValue<'gcc>]> {
// Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing
// arguments here.
if gcc_func.get_param_count() != args.len() {
match &*func_name {
// NOTE: the following intrinsics have a different number of parameters in LLVM and GCC.
"__builtin_ia32_prold512_mask" | "__builtin_ia32_pmuldq512_mask" | "__builtin_ia32_pmuludq512_mask"
| "__builtin_ia32_pmaxsd512_mask" | "__builtin_ia32_pmaxsq512_mask" | "__builtin_ia32_pmaxsq256_mask"
| "__builtin_ia32_pmaxsq128_mask" | "__builtin_ia32_pmaxud512_mask" | "__builtin_ia32_pmaxuq512_mask"
| "__builtin_ia32_pminsd512_mask" | "__builtin_ia32_pminsq512_mask" | "__builtin_ia32_pminsq256_mask"
| "__builtin_ia32_pminsq128_mask" | "__builtin_ia32_pminud512_mask" | "__builtin_ia32_pminuq512_mask"
| "__builtin_ia32_prolq512_mask" | "__builtin_ia32_prorq512_mask" | "__builtin_ia32_pslldi512_mask"
| "__builtin_ia32_psrldi512_mask" | "__builtin_ia32_psllqi512_mask" | "__builtin_ia32_psrlqi512_mask"
| "__builtin_ia32_pslld512_mask" | "__builtin_ia32_psrld512_mask" | "__builtin_ia32_psllq512_mask"
| "__builtin_ia32_psrlq512_mask" | "__builtin_ia32_psrad512_mask" | "__builtin_ia32_psraq512_mask"
| "__builtin_ia32_psradi512_mask" | "__builtin_ia32_psraqi512_mask" | "__builtin_ia32_psrav16si_mask"
| "__builtin_ia32_psrav8di_mask" | "__builtin_ia32_prolvd512_mask" | "__builtin_ia32_prorvd512_mask"
| "__builtin_ia32_prolvq512_mask" | "__builtin_ia32_prorvq512_mask" | "__builtin_ia32_psllv16si_mask"
| "__builtin_ia32_psrlv16si_mask" | "__builtin_ia32_psllv8di_mask" | "__builtin_ia32_psrlv8di_mask"
| "__builtin_ia32_permvarsi512_mask" | "__builtin_ia32_vpermilvarps512_mask"
| "__builtin_ia32_vpermilvarpd512_mask" | "__builtin_ia32_permvardi512_mask"
| "__builtin_ia32_permvarsf512_mask" | "__builtin_ia32_permvarqi512_mask"
| "__builtin_ia32_permvarqi256_mask" | "__builtin_ia32_permvarqi128_mask"
| "__builtin_ia32_vpmultishiftqb512_mask" | "__builtin_ia32_vpmultishiftqb256_mask"
| "__builtin_ia32_vpmultishiftqb128_mask"
=> {
"__builtin_ia32_prold512_mask"
| "__builtin_ia32_pmuldq512_mask"
| "__builtin_ia32_pmuludq512_mask"
| "__builtin_ia32_pmaxsd512_mask"
| "__builtin_ia32_pmaxsq512_mask"
| "__builtin_ia32_pmaxsq256_mask"
| "__builtin_ia32_pmaxsq128_mask"
| "__builtin_ia32_pmaxud512_mask"
| "__builtin_ia32_pmaxuq512_mask"
| "__builtin_ia32_pminsd512_mask"
| "__builtin_ia32_pminsq512_mask"
| "__builtin_ia32_pminsq256_mask"
| "__builtin_ia32_pminsq128_mask"
| "__builtin_ia32_pminud512_mask"
| "__builtin_ia32_pminuq512_mask"
| "__builtin_ia32_prolq512_mask"
| "__builtin_ia32_prorq512_mask"
| "__builtin_ia32_pslldi512_mask"
| "__builtin_ia32_psrldi512_mask"
| "__builtin_ia32_psllqi512_mask"
| "__builtin_ia32_psrlqi512_mask"
| "__builtin_ia32_pslld512_mask"
| "__builtin_ia32_psrld512_mask"
| "__builtin_ia32_psllq512_mask"
| "__builtin_ia32_psrlq512_mask"
| "__builtin_ia32_psrad512_mask"
| "__builtin_ia32_psraq512_mask"
| "__builtin_ia32_psradi512_mask"
| "__builtin_ia32_psraqi512_mask"
| "__builtin_ia32_psrav16si_mask"
| "__builtin_ia32_psrav8di_mask"
| "__builtin_ia32_prolvd512_mask"
| "__builtin_ia32_prorvd512_mask"
| "__builtin_ia32_prolvq512_mask"
| "__builtin_ia32_prorvq512_mask"
| "__builtin_ia32_psllv16si_mask"
| "__builtin_ia32_psrlv16si_mask"
| "__builtin_ia32_psllv8di_mask"
| "__builtin_ia32_psrlv8di_mask"
| "__builtin_ia32_permvarsi512_mask"
| "__builtin_ia32_vpermilvarps512_mask"
| "__builtin_ia32_vpermilvarpd512_mask"
| "__builtin_ia32_permvardi512_mask"
| "__builtin_ia32_permvarsf512_mask"
| "__builtin_ia32_permvarqi512_mask"
| "__builtin_ia32_permvarqi256_mask"
| "__builtin_ia32_permvarqi128_mask"
| "__builtin_ia32_vpmultishiftqb512_mask"
| "__builtin_ia32_vpmultishiftqb256_mask"
| "__builtin_ia32_vpmultishiftqb128_mask" => {
let mut new_args = args.to_vec();
let arg3_type = gcc_func.get_param_type(2);
let first_arg = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue();
let first_arg = builder
.current_func()
.new_local(None, arg3_type, "undefined_for_intrinsic")
.to_rvalue();
new_args.push(first_arg);
let arg4_type = gcc_func.get_param_type(3);
let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
new_args.push(minus_one);
args = new_args.into();
},
"__builtin_ia32_pmaxuq256_mask" | "__builtin_ia32_pmaxuq128_mask" | "__builtin_ia32_pminuq256_mask"
| "__builtin_ia32_pminuq128_mask" | "__builtin_ia32_prold256_mask" | "__builtin_ia32_prold128_mask"
| "__builtin_ia32_prord512_mask" | "__builtin_ia32_prord256_mask" | "__builtin_ia32_prord128_mask"
| "__builtin_ia32_prolq256_mask" | "__builtin_ia32_prolq128_mask" | "__builtin_ia32_prorq256_mask"
| "__builtin_ia32_prorq128_mask" | "__builtin_ia32_psraq256_mask" | "__builtin_ia32_psraq128_mask"
| "__builtin_ia32_psraqi256_mask" | "__builtin_ia32_psraqi128_mask" | "__builtin_ia32_psravq256_mask"
| "__builtin_ia32_psravq128_mask" | "__builtin_ia32_prolvd256_mask" | "__builtin_ia32_prolvd128_mask"
| "__builtin_ia32_prorvd256_mask" | "__builtin_ia32_prorvd128_mask" | "__builtin_ia32_prolvq256_mask"
| "__builtin_ia32_prolvq128_mask" | "__builtin_ia32_prorvq256_mask" | "__builtin_ia32_prorvq128_mask"
| "__builtin_ia32_permvardi256_mask" | "__builtin_ia32_permvardf512_mask" | "__builtin_ia32_permvardf256_mask"
| "__builtin_ia32_pmulhuw512_mask" | "__builtin_ia32_pmulhw512_mask" | "__builtin_ia32_pmulhrsw512_mask"
| "__builtin_ia32_pmaxuw512_mask" | "__builtin_ia32_pmaxub512_mask" | "__builtin_ia32_pmaxsw512_mask"
| "__builtin_ia32_pmaxsb512_mask" | "__builtin_ia32_pminuw512_mask" | "__builtin_ia32_pminub512_mask"
| "__builtin_ia32_pminsw512_mask" | "__builtin_ia32_pminsb512_mask"
| "__builtin_ia32_pmaddwd512_mask" | "__builtin_ia32_pmaddubsw512_mask" | "__builtin_ia32_packssdw512_mask"
| "__builtin_ia32_packsswb512_mask" | "__builtin_ia32_packusdw512_mask" | "__builtin_ia32_packuswb512_mask"
| "__builtin_ia32_pavgw512_mask" | "__builtin_ia32_pavgb512_mask" | "__builtin_ia32_psllw512_mask"
| "__builtin_ia32_psllwi512_mask" | "__builtin_ia32_psllv32hi_mask" | "__builtin_ia32_psrlw512_mask"
| "__builtin_ia32_psrlwi512_mask" | "__builtin_ia32_psllv16hi_mask" | "__builtin_ia32_psllv8hi_mask"
| "__builtin_ia32_psrlv32hi_mask" | "__builtin_ia32_psraw512_mask" | "__builtin_ia32_psrawi512_mask"
| "__builtin_ia32_psrlv16hi_mask" | "__builtin_ia32_psrlv8hi_mask" | "__builtin_ia32_psrav32hi_mask"
| "__builtin_ia32_permvarhi512_mask" | "__builtin_ia32_pshufb512_mask" | "__builtin_ia32_psrav16hi_mask"
| "__builtin_ia32_psrav8hi_mask" | "__builtin_ia32_permvarhi256_mask" | "__builtin_ia32_permvarhi128_mask"
=> {
}
"__builtin_ia32_pmaxuq256_mask"
| "__builtin_ia32_pmaxuq128_mask"
| "__builtin_ia32_pminuq256_mask"
| "__builtin_ia32_pminuq128_mask"
| "__builtin_ia32_prold256_mask"
| "__builtin_ia32_prold128_mask"
| "__builtin_ia32_prord512_mask"
| "__builtin_ia32_prord256_mask"
| "__builtin_ia32_prord128_mask"
| "__builtin_ia32_prolq256_mask"
| "__builtin_ia32_prolq128_mask"
| "__builtin_ia32_prorq256_mask"
| "__builtin_ia32_prorq128_mask"
| "__builtin_ia32_psraq256_mask"
| "__builtin_ia32_psraq128_mask"
| "__builtin_ia32_psraqi256_mask"
| "__builtin_ia32_psraqi128_mask"
| "__builtin_ia32_psravq256_mask"
| "__builtin_ia32_psravq128_mask"
| "__builtin_ia32_prolvd256_mask"
| "__builtin_ia32_prolvd128_mask"
| "__builtin_ia32_prorvd256_mask"
| "__builtin_ia32_prorvd128_mask"
| "__builtin_ia32_prolvq256_mask"
| "__builtin_ia32_prolvq128_mask"
| "__builtin_ia32_prorvq256_mask"
| "__builtin_ia32_prorvq128_mask"
| "__builtin_ia32_permvardi256_mask"
| "__builtin_ia32_permvardf512_mask"
| "__builtin_ia32_permvardf256_mask"
| "__builtin_ia32_pmulhuw512_mask"
| "__builtin_ia32_pmulhw512_mask"
| "__builtin_ia32_pmulhrsw512_mask"
| "__builtin_ia32_pmaxuw512_mask"
| "__builtin_ia32_pmaxub512_mask"
| "__builtin_ia32_pmaxsw512_mask"
| "__builtin_ia32_pmaxsb512_mask"
| "__builtin_ia32_pminuw512_mask"
| "__builtin_ia32_pminub512_mask"
| "__builtin_ia32_pminsw512_mask"
| "__builtin_ia32_pminsb512_mask"
| "__builtin_ia32_pmaddwd512_mask"
| "__builtin_ia32_pmaddubsw512_mask"
| "__builtin_ia32_packssdw512_mask"
| "__builtin_ia32_packsswb512_mask"
| "__builtin_ia32_packusdw512_mask"
| "__builtin_ia32_packuswb512_mask"
| "__builtin_ia32_pavgw512_mask"
| "__builtin_ia32_pavgb512_mask"
| "__builtin_ia32_psllw512_mask"
| "__builtin_ia32_psllwi512_mask"
| "__builtin_ia32_psllv32hi_mask"
| "__builtin_ia32_psrlw512_mask"
| "__builtin_ia32_psrlwi512_mask"
| "__builtin_ia32_psllv16hi_mask"
| "__builtin_ia32_psllv8hi_mask"
| "__builtin_ia32_psrlv32hi_mask"
| "__builtin_ia32_psraw512_mask"
| "__builtin_ia32_psrawi512_mask"
| "__builtin_ia32_psrlv16hi_mask"
| "__builtin_ia32_psrlv8hi_mask"
| "__builtin_ia32_psrav32hi_mask"
| "__builtin_ia32_permvarhi512_mask"
| "__builtin_ia32_pshufb512_mask"
| "__builtin_ia32_psrav16hi_mask"
| "__builtin_ia32_psrav8hi_mask"
| "__builtin_ia32_permvarhi256_mask"
| "__builtin_ia32_permvarhi128_mask" => {
let mut new_args = args.to_vec();
let arg3_type = gcc_func.get_param_type(2);
let vector_type = arg3_type.dyncast_vector().expect("vector type");
let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
let num_units = vector_type.get_num_units();
let first_arg = builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]);
let first_arg =
builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]);
new_args.push(first_arg);
let arg4_type = gcc_func.get_param_type(3);
let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
new_args.push(minus_one);
args = new_args.into();
},
"__builtin_ia32_dbpsadbw512_mask" | "__builtin_ia32_dbpsadbw256_mask" | "__builtin_ia32_dbpsadbw128_mask" => {
}
"__builtin_ia32_dbpsadbw512_mask"
| "__builtin_ia32_dbpsadbw256_mask"
| "__builtin_ia32_dbpsadbw128_mask" => {
let mut new_args = args.to_vec();
let arg4_type = gcc_func.get_param_type(3);
let vector_type = arg4_type.dyncast_vector().expect("vector type");
let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
let num_units = vector_type.get_num_units();
let first_arg = builder.context.new_rvalue_from_vector(None, arg4_type, &vec![zero; num_units]);
let first_arg =
builder.context.new_rvalue_from_vector(None, arg4_type, &vec![zero; num_units]);
new_args.push(first_arg);
let arg5_type = gcc_func.get_param_type(4);
let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1);
new_args.push(minus_one);
args = new_args.into();
},
"__builtin_ia32_vplzcntd_512_mask" | "__builtin_ia32_vplzcntd_256_mask" | "__builtin_ia32_vplzcntd_128_mask"
| "__builtin_ia32_vplzcntq_512_mask" | "__builtin_ia32_vplzcntq_256_mask" | "__builtin_ia32_vplzcntq_128_mask" => {
}
"__builtin_ia32_vplzcntd_512_mask"
| "__builtin_ia32_vplzcntd_256_mask"
| "__builtin_ia32_vplzcntd_128_mask"
| "__builtin_ia32_vplzcntq_512_mask"
| "__builtin_ia32_vplzcntq_256_mask"
| "__builtin_ia32_vplzcntq_128_mask" => {
let mut new_args = args.to_vec();
// Remove last arg as it doesn't seem to be used in GCC and is always false.
new_args.pop();
@ -98,37 +189,45 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
let vector_type = arg2_type.dyncast_vector().expect("vector type");
let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
let num_units = vector_type.get_num_units();
let first_arg = builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]);
let first_arg =
builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]);
new_args.push(first_arg);
let arg3_type = gcc_func.get_param_type(2);
let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1);
new_args.push(minus_one);
args = new_args.into();
},
"__builtin_ia32_vpconflictsi_512_mask" | "__builtin_ia32_vpconflictsi_256_mask"
| "__builtin_ia32_vpconflictsi_128_mask" | "__builtin_ia32_vpconflictdi_512_mask"
| "__builtin_ia32_vpconflictdi_256_mask" | "__builtin_ia32_vpconflictdi_128_mask" => {
}
"__builtin_ia32_vpconflictsi_512_mask"
| "__builtin_ia32_vpconflictsi_256_mask"
| "__builtin_ia32_vpconflictsi_128_mask"
| "__builtin_ia32_vpconflictdi_512_mask"
| "__builtin_ia32_vpconflictdi_256_mask"
| "__builtin_ia32_vpconflictdi_128_mask" => {
let mut new_args = args.to_vec();
let arg2_type = gcc_func.get_param_type(1);
let vector_type = arg2_type.dyncast_vector().expect("vector type");
let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
let num_units = vector_type.get_num_units();
let first_arg = builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]);
let first_arg =
builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]);
new_args.push(first_arg);
let arg3_type = gcc_func.get_param_type(2);
let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1);
new_args.push(minus_one);
args = new_args.into();
},
"__builtin_ia32_pternlogd512_mask" | "__builtin_ia32_pternlogd256_mask"
| "__builtin_ia32_pternlogd128_mask" | "__builtin_ia32_pternlogq512_mask"
| "__builtin_ia32_pternlogq256_mask" | "__builtin_ia32_pternlogq128_mask" => {
}
"__builtin_ia32_pternlogd512_mask"
| "__builtin_ia32_pternlogd256_mask"
| "__builtin_ia32_pternlogd128_mask"
| "__builtin_ia32_pternlogq512_mask"
| "__builtin_ia32_pternlogq256_mask"
| "__builtin_ia32_pternlogq128_mask" => {
let mut new_args = args.to_vec();
let arg5_type = gcc_func.get_param_type(4);
let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1);
new_args.push(minus_one);
args = new_args.into();
},
}
"__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => {
let mut new_args = args.to_vec();
@ -154,24 +253,33 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
}
args = new_args.into();
},
"__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask"
| "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask"
| "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask"
| "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask"
| "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask"
| "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" => {
}
"__builtin_ia32_addps512_mask"
| "__builtin_ia32_addpd512_mask"
| "__builtin_ia32_subps512_mask"
| "__builtin_ia32_subpd512_mask"
| "__builtin_ia32_mulps512_mask"
| "__builtin_ia32_mulpd512_mask"
| "__builtin_ia32_divps512_mask"
| "__builtin_ia32_divpd512_mask"
| "__builtin_ia32_maxps512_mask"
| "__builtin_ia32_maxpd512_mask"
| "__builtin_ia32_minps512_mask"
| "__builtin_ia32_minpd512_mask" => {
let mut new_args = args.to_vec();
let last_arg = new_args.pop().expect("last arg");
let arg3_type = gcc_func.get_param_type(2);
let undefined = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue();
let undefined = builder
.current_func()
.new_local(None, arg3_type, "undefined_for_intrinsic")
.to_rvalue();
new_args.push(undefined);
let arg4_type = gcc_func.get_param_type(3);
let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
new_args.push(minus_one);
new_args.push(last_arg);
args = new_args.into();
},
}
"__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => {
let mut new_args = args.to_vec();
let last_arg = new_args.pop().expect("last arg");
@ -180,54 +288,72 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
new_args.push(minus_one);
new_args.push(last_arg);
args = new_args.into();
},
"__builtin_ia32_vpermi2vard512_mask" | "__builtin_ia32_vpermi2vard256_mask"
| "__builtin_ia32_vpermi2vard128_mask" | "__builtin_ia32_vpermi2varq512_mask"
| "__builtin_ia32_vpermi2varq256_mask" | "__builtin_ia32_vpermi2varq128_mask"
| "__builtin_ia32_vpermi2varps512_mask" | "__builtin_ia32_vpermi2varps256_mask"
| "__builtin_ia32_vpermi2varps128_mask" | "__builtin_ia32_vpermi2varpd512_mask"
| "__builtin_ia32_vpermi2varpd256_mask" | "__builtin_ia32_vpermi2varpd128_mask" | "__builtin_ia32_vpmadd52huq512_mask"
| "__builtin_ia32_vpmadd52luq512_mask" | "__builtin_ia32_vpmadd52huq256_mask" | "__builtin_ia32_vpmadd52luq256_mask"
| "__builtin_ia32_vpmadd52huq128_mask"
=> {
}
"__builtin_ia32_vpermi2vard512_mask"
| "__builtin_ia32_vpermi2vard256_mask"
| "__builtin_ia32_vpermi2vard128_mask"
| "__builtin_ia32_vpermi2varq512_mask"
| "__builtin_ia32_vpermi2varq256_mask"
| "__builtin_ia32_vpermi2varq128_mask"
| "__builtin_ia32_vpermi2varps512_mask"
| "__builtin_ia32_vpermi2varps256_mask"
| "__builtin_ia32_vpermi2varps128_mask"
| "__builtin_ia32_vpermi2varpd512_mask"
| "__builtin_ia32_vpermi2varpd256_mask"
| "__builtin_ia32_vpermi2varpd128_mask"
| "__builtin_ia32_vpmadd52huq512_mask"
| "__builtin_ia32_vpmadd52luq512_mask"
| "__builtin_ia32_vpmadd52huq256_mask"
| "__builtin_ia32_vpmadd52luq256_mask"
| "__builtin_ia32_vpmadd52huq128_mask" => {
let mut new_args = args.to_vec();
let arg4_type = gcc_func.get_param_type(3);
let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
new_args.push(minus_one);
args = new_args.into();
},
"__builtin_ia32_cvtdq2ps512_mask" | "__builtin_ia32_cvtudq2ps512_mask"
| "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => {
}
"__builtin_ia32_cvtdq2ps512_mask"
| "__builtin_ia32_cvtudq2ps512_mask"
| "__builtin_ia32_sqrtps512_mask"
| "__builtin_ia32_sqrtpd512_mask" => {
let mut new_args = args.to_vec();
let last_arg = new_args.pop().expect("last arg");
let arg2_type = gcc_func.get_param_type(1);
let undefined = builder.current_func().new_local(None, arg2_type, "undefined_for_intrinsic").to_rvalue();
let undefined = builder
.current_func()
.new_local(None, arg2_type, "undefined_for_intrinsic")
.to_rvalue();
new_args.push(undefined);
let arg3_type = gcc_func.get_param_type(2);
let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1);
new_args.push(minus_one);
new_args.push(last_arg);
args = new_args.into();
},
}
"__builtin_ia32_stmxcsr" => {
args = vec![].into();
},
"__builtin_ia32_addcarryx_u64" | "__builtin_ia32_sbb_u64" | "__builtin_ia32_addcarryx_u32" | "__builtin_ia32_sbb_u32" => {
}
"__builtin_ia32_addcarryx_u64"
| "__builtin_ia32_sbb_u64"
| "__builtin_ia32_addcarryx_u32"
| "__builtin_ia32_sbb_u32" => {
let mut new_args = args.to_vec();
let arg2_type = gcc_func.get_param_type(1);
let variable = builder.current_func().new_local(None, arg2_type, "addcarryResult");
new_args.push(variable.get_address(None));
args = new_args.into();
},
"__builtin_ia32_vpermt2varqi512_mask" | "__builtin_ia32_vpermt2varqi256_mask"
| "__builtin_ia32_vpermt2varqi128_mask" | "__builtin_ia32_vpermt2varhi512_mask"
| "__builtin_ia32_vpermt2varhi256_mask" | "__builtin_ia32_vpermt2varhi128_mask"
=> {
}
"__builtin_ia32_vpermt2varqi512_mask"
| "__builtin_ia32_vpermt2varqi256_mask"
| "__builtin_ia32_vpermt2varqi128_mask"
| "__builtin_ia32_vpermt2varhi512_mask"
| "__builtin_ia32_vpermt2varhi256_mask"
| "__builtin_ia32_vpermt2varhi128_mask" => {
let new_args = args.to_vec();
let arg4_type = gcc_func.get_param_type(3);
let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
args = vec![new_args[1], new_args[0], new_args[2], minus_one].into();
},
}
"__builtin_ia32_xrstor" | "__builtin_ia32_xsavec" => {
let new_args = args.to_vec();
let thirty_two = builder.context.new_rvalue_from_int(new_args[1].get_type(), 32);
@ -235,22 +361,25 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
let arg2_type = gcc_func.get_param_type(1);
let arg2 = builder.context.new_cast(None, arg2, arg2_type);
args = vec![new_args[0], arg2].into();
},
}
// These builtins are sent one more argument than needed.
"__builtin_prefetch" => {
let mut new_args = args.to_vec();
new_args.pop();
args = new_args.into();
},
}
// The GCC version returns one value of the tuple through a pointer.
"__builtin_ia32_rdrand64_step" => {
let arg = builder.current_func().new_local(None, builder.ulonglong_type, "return_rdrand_arg");
let arg = builder.current_func().new_local(
None,
builder.ulonglong_type,
"return_rdrand_arg",
);
args = vec![arg.get_address(None)].into();
},
}
_ => (),
}
}
else {
} else {
match &*func_name {
"__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => {
let new_args = args.to_vec();
@ -259,10 +388,10 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
let arg4_type = gcc_func.get_param_type(3);
let arg4 = builder.context.new_bitcast(None, new_args[2], arg4_type);
args = vec![new_args[0], new_args[1], arg3, arg4, new_args[3], new_args[5]].into();
},
}
// NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors.
// FIXME: the intrinsics like _mm_mask_fmadd_sd should probably directly call the GCC
// instrinsic to avoid this.
// intrinsic to avoid this.
"__builtin_ia32_vfmaddss3_round" => {
let new_args = args.to_vec();
let arg1_type = gcc_func.get_param_type(0);
@ -272,7 +401,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 4]);
let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 4]);
args = vec![a, b, c, new_args[3]].into();
},
}
"__builtin_ia32_vfmaddsd3_round" => {
let new_args = args.to_vec();
let arg1_type = gcc_func.get_param_type(0);
@ -282,25 +411,34 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 2]);
let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 2]);
args = vec![a, b, c, new_args[3]].into();
},
"__builtin_ia32_vfmaddsubpd256" | "__builtin_ia32_vfmaddsubps" | "__builtin_ia32_vfmaddsubps256"
| "__builtin_ia32_vfmaddsubpd" => {
}
"__builtin_ia32_vfmaddsubpd256"
| "__builtin_ia32_vfmaddsubps"
| "__builtin_ia32_vfmaddsubps256"
| "__builtin_ia32_vfmaddsubpd" => {
if let Some(original_function_name) = original_function_name {
match &**original_function_name {
"llvm.x86.fma.vfmsubadd.pd.256" | "llvm.x86.fma.vfmsubadd.ps" | "llvm.x86.fma.vfmsubadd.ps.256"
| "llvm.x86.fma.vfmsubadd.pd" => {
"llvm.x86.fma.vfmsubadd.pd.256"
| "llvm.x86.fma.vfmsubadd.ps"
| "llvm.x86.fma.vfmsubadd.ps.256"
| "llvm.x86.fma.vfmsubadd.pd" => {
// NOTE: since both llvm.x86.fma.vfmsubadd.ps and llvm.x86.fma.vfmaddsub.ps maps to
// __builtin_ia32_vfmaddsubps, only add minus if this comes from a
// subadd LLVM intrinsic, e.g. _mm256_fmsubadd_pd.
let mut new_args = args.to_vec();
let arg3 = &mut new_args[2];
*arg3 = builder.context.new_unary_op(None, UnaryOp::Minus, arg3.get_type(), *arg3);
*arg3 = builder.context.new_unary_op(
None,
UnaryOp::Minus,
arg3.get_type(),
*arg3,
);
args = new_args.into();
},
}
_ => (),
}
}
},
}
"__builtin_ia32_ldmxcsr" => {
// The builtin __builtin_ia32_ldmxcsr takes an integer value while llvm.x86.sse.ldmxcsr takes a pointer,
// so dereference the pointer.
@ -309,23 +447,31 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
let arg1 = builder.context.new_cast(None, args[0], uint_ptr_type);
new_args[0] = arg1.dereference(None).to_rvalue();
args = new_args.into();
},
"__builtin_ia32_rcp14sd_mask" | "__builtin_ia32_rcp14ss_mask" | "__builtin_ia32_rsqrt14sd_mask"
| "__builtin_ia32_rsqrt14ss_mask" => {
}
"__builtin_ia32_rcp14sd_mask"
| "__builtin_ia32_rcp14ss_mask"
| "__builtin_ia32_rsqrt14sd_mask"
| "__builtin_ia32_rsqrt14ss_mask" => {
let new_args = args.to_vec();
args = vec![new_args[1], new_args[0], new_args[2], new_args[3]].into();
},
}
"__builtin_ia32_sqrtsd_mask_round" | "__builtin_ia32_sqrtss_mask_round" => {
let new_args = args.to_vec();
args = vec![new_args[1], new_args[0], new_args[2], new_args[3], new_args[4]].into();
},
"__builtin_ia32_vpshrdv_v8di" | "__builtin_ia32_vpshrdv_v4di" | "__builtin_ia32_vpshrdv_v2di" |
"__builtin_ia32_vpshrdv_v16si" | "__builtin_ia32_vpshrdv_v8si" | "__builtin_ia32_vpshrdv_v4si" |
"__builtin_ia32_vpshrdv_v32hi" | "__builtin_ia32_vpshrdv_v16hi" | "__builtin_ia32_vpshrdv_v8hi" => {
}
"__builtin_ia32_vpshrdv_v8di"
| "__builtin_ia32_vpshrdv_v4di"
| "__builtin_ia32_vpshrdv_v2di"
| "__builtin_ia32_vpshrdv_v16si"
| "__builtin_ia32_vpshrdv_v8si"
| "__builtin_ia32_vpshrdv_v4si"
| "__builtin_ia32_vpshrdv_v32hi"
| "__builtin_ia32_vpshrdv_v16hi"
| "__builtin_ia32_vpshrdv_v8hi" => {
// The first two arguments are reversed, compared to LLVM.
let new_args = args.to_vec();
args = vec![new_args[1], new_args[0], new_args[2]].into();
},
}
_ => (),
}
}
@ -333,16 +479,27 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
args
}
pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, mut return_value: RValue<'gcc>, func_name: &str, args: &[RValue<'gcc>], args_adjusted: bool, orig_args: &[RValue<'gcc>]) -> RValue<'gcc> {
pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(
builder: &Builder<'a, 'gcc, 'tcx>,
mut return_value: RValue<'gcc>,
func_name: &str,
args: &[RValue<'gcc>],
args_adjusted: bool,
orig_args: &[RValue<'gcc>],
) -> RValue<'gcc> {
match func_name {
"__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => {
#[cfg(feature="master")]
#[cfg(feature = "master")]
{
let zero = builder.context.new_rvalue_zero(builder.int_type);
return_value = builder.context.new_vector_access(None, return_value, zero).to_rvalue();
return_value =
builder.context.new_vector_access(None, return_value, zero).to_rvalue();
}
},
"__builtin_ia32_addcarryx_u64" | "__builtin_ia32_sbb_u64" | "__builtin_ia32_addcarryx_u32" | "__builtin_ia32_sbb_u32" => {
}
"__builtin_ia32_addcarryx_u64"
| "__builtin_ia32_sbb_u64"
| "__builtin_ia32_addcarryx_u32"
| "__builtin_ia32_sbb_u32" => {
// Both llvm.x86.addcarry.32 and llvm.x86.addcarryx.u32 points to the same GCC builtin,
// but only the former requires adjusting the return value.
// Those 2 LLVM intrinsics differ by their argument count, that's why we check if the
@ -351,10 +508,16 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc,
let last_arg = args.last().expect("last arg");
let field1 = builder.context.new_field(None, builder.u8_type, "carryFlag");
let field2 = builder.context.new_field(None, args[1].get_type(), "carryResult");
let struct_type = builder.context.new_struct_type(None, "addcarryResult", &[field1, field2]);
return_value = builder.context.new_struct_constructor(None, struct_type.as_type(), None, &[return_value, last_arg.dereference(None).to_rvalue()]);
let struct_type =
builder.context.new_struct_type(None, "addcarryResult", &[field1, field2]);
return_value = builder.context.new_struct_constructor(
None,
struct_type.as_type(),
None,
&[return_value, last_arg.dereference(None).to_rvalue()],
);
}
},
}
"__builtin_ia32_stmxcsr" => {
// The builtin __builtin_ia32_stmxcsr returns a value while llvm.x86.sse.stmxcsr writes
// the result in its pointer argument.
@ -366,20 +529,24 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc,
// The return value was assigned to the result pointer above. In order to not call the
// builtin twice, we overwrite the return value with a dummy value.
return_value = builder.context.new_rvalue_zero(builder.int_type);
},
}
"__builtin_ia32_rdrand64_step" => {
let random_number = args[0].dereference(None).to_rvalue();
let success_variable = builder.current_func().new_local(None, return_value.get_type(), "success");
let success_variable =
builder.current_func().new_local(None, return_value.get_type(), "success");
builder.llbb().add_assignment(None, success_variable, return_value);
let field1 = builder.context.new_field(None, random_number.get_type(), "random_number");
let field2 = builder.context.new_field(None, return_value.get_type(), "success");
let struct_type = builder.context.new_struct_type(None, "rdrand_result", &[field1, field2]);
return_value = builder.context.new_struct_constructor(None, struct_type.as_type(), None, &[
random_number,
success_variable.to_rvalue(),
]);
},
let struct_type =
builder.context.new_struct_type(None, "rdrand_result", &[field1, field2]);
return_value = builder.context.new_struct_constructor(
None,
struct_type.as_type(),
None,
&[random_number, success_variable.to_rvalue()],
);
}
_ => (),
}
@ -391,23 +558,33 @@ pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool {
match func_name {
// NOTE: these intrinsics have missing parameters before the last one, so ignore the
// last argument type check.
"__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask"
| "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" | "__builtin_ia32_sqrtps512_mask"
| "__builtin_ia32_sqrtpd512_mask" | "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask"
| "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask"
| "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask"
| "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask"
| "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask"
| "__builtin_ia32_cvtdq2ps512_mask" | "__builtin_ia32_cvtudq2ps512_mask" => {
if index == args_len - 1 {
return true;
}
},
"__builtin_ia32_maxps512_mask"
| "__builtin_ia32_maxpd512_mask"
| "__builtin_ia32_minps512_mask"
| "__builtin_ia32_minpd512_mask"
| "__builtin_ia32_sqrtps512_mask"
| "__builtin_ia32_sqrtpd512_mask"
| "__builtin_ia32_addps512_mask"
| "__builtin_ia32_addpd512_mask"
| "__builtin_ia32_subps512_mask"
| "__builtin_ia32_subpd512_mask"
| "__builtin_ia32_mulps512_mask"
| "__builtin_ia32_mulpd512_mask"
| "__builtin_ia32_divps512_mask"
| "__builtin_ia32_divpd512_mask"
| "__builtin_ia32_vfmaddsubps512_mask"
| "__builtin_ia32_vfmaddsubpd512_mask"
| "__builtin_ia32_cvtdq2ps512_mask"
| "__builtin_ia32_cvtudq2ps512_mask" => {
if index == args_len - 1 {
return true;
}
}
"__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => {
if index == 2 || index == 3 {
return true;
}
},
}
"__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => {
// Since there are two LLVM intrinsics that map to each of these GCC builtins and only
// one of them has a missing parameter before the last one, we check the number of
@ -415,49 +592,50 @@ pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool {
if args_len == 4 && index == args_len - 1 {
return true;
}
},
}
// NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors.
"__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => return true,
"__builtin_ia32_vplzcntd_512_mask" | "__builtin_ia32_vplzcntd_256_mask" | "__builtin_ia32_vplzcntd_128_mask"
| "__builtin_ia32_vplzcntq_512_mask" | "__builtin_ia32_vplzcntq_256_mask" | "__builtin_ia32_vplzcntq_128_mask" => {
"__builtin_ia32_vplzcntd_512_mask"
| "__builtin_ia32_vplzcntd_256_mask"
| "__builtin_ia32_vplzcntd_128_mask"
| "__builtin_ia32_vplzcntq_512_mask"
| "__builtin_ia32_vplzcntq_256_mask"
| "__builtin_ia32_vplzcntq_128_mask" => {
if index == args_len - 1 {
return true;
}
},
}
_ => (),
}
false
}
#[cfg(not(feature="master"))]
#[cfg(not(feature = "master"))]
pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
let gcc_name =
match name {
"llvm.x86.sse2.pause" => {
// NOTE: pause is only a hint, so we use a dummy built-in because target built-ins
// are not supported in libgccjit 12.
"__builtin_inff"
},
"llvm.x86.xgetbv" => {
"__builtin_trap"
},
_ => unimplemented!("unsupported LLVM intrinsic {}", name),
};
let gcc_name = match name {
"llvm.x86.sse2.pause" => {
// NOTE: pause is only a hint, so we use a dummy built-in because target built-ins
// are not supported in libgccjit 12.
"__builtin_inff"
}
"llvm.x86.xgetbv" => "__builtin_trap",
_ => unimplemented!("unsupported LLVM intrinsic {}", name),
};
let func = cx.context.get_builtin_function(gcc_name);
cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
return func;
}
#[cfg(feature="master")]
#[cfg(feature = "master")]
pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
match name {
"llvm.prefetch" => {
let gcc_name = "__builtin_prefetch";
let func = cx.context.get_builtin_function(gcc_name);
cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
return func
},
return func;
}
_ => (),
}

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,5 @@
use std::iter::FromIterator;
use gccjit::ToRValue;
use gccjit::{BinaryOp, RValue, Type};
#[cfg(feature = "master")]
@ -19,6 +21,8 @@ use rustc_span::{sym, Span, Symbol};
use rustc_target::abi::Align;
use crate::builder::Builder;
#[cfg(not(feature = "master"))]
use crate::common::SignType;
#[cfg(feature = "master")]
use crate::context::CodegenCx;
@ -156,6 +160,197 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
return Ok(compare_simd_types(bx, arg1, arg2, in_elem, llret_ty, cmp_op));
}
let simd_bswap = |bx: &mut Builder<'a, 'gcc, 'tcx>, vector: RValue<'gcc>| -> RValue<'gcc> {
let v_type = vector.get_type();
let vector_type = v_type.unqualified().dyncast_vector().expect("vector type");
let elem_type = vector_type.get_element_type();
let elem_size_bytes = elem_type.get_size();
if elem_size_bytes == 1 {
return vector;
}
let type_size_bytes = elem_size_bytes as u64 * in_len;
let shuffle_indices = Vec::from_iter(0..type_size_bytes);
let byte_vector_type = bx.context.new_vector_type(bx.type_u8(), type_size_bytes);
let byte_vector = bx.context.new_bitcast(None, args[0].immediate(), byte_vector_type);
#[cfg(not(feature = "master"))]
let shuffled = {
let new_elements: Vec<_> = shuffle_indices
.chunks_exact(elem_size_bytes as _)
.flat_map(|x| x.iter().rev())
.map(|&i| {
let index = bx.context.new_rvalue_from_long(bx.u64_type, i as _);
bx.extract_element(byte_vector, index)
})
.collect();
bx.context.new_rvalue_from_vector(None, byte_vector_type, &new_elements)
};
#[cfg(feature = "master")]
let shuffled = {
let indices: Vec<_> = shuffle_indices
.chunks_exact(elem_size_bytes as _)
.flat_map(|x| x.iter().rev())
.map(|&i| bx.context.new_rvalue_from_int(bx.u8_type, i as _))
.collect();
let mask = bx.context.new_rvalue_from_vector(None, byte_vector_type, &indices);
bx.context.new_rvalue_vector_perm(None, byte_vector, byte_vector, mask)
};
bx.context.new_bitcast(None, shuffled, v_type)
};
if name == sym::simd_bswap || name == sym::simd_bitreverse {
require!(
bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
);
}
if name == sym::simd_bswap {
return Ok(simd_bswap(bx, args[0].immediate()));
}
// We use a different algorithm from non-vector bitreverse to take advantage of most
// processors' vector shuffle units. It works like this:
// 1. Generate pre-reversed low and high nibbles as a vector.
// 2. Byte-swap the input.
// 3. Mask off the low and high nibbles of each byte in the byte-swapped input.
// 4. Shuffle the pre-reversed low and high-nibbles using the masked nibbles as a shuffle mask.
// 5. Combine the results of the shuffle back together and cast back to the original type.
#[cfg(feature = "master")]
if name == sym::simd_bitreverse {
let vector = args[0].immediate();
let v_type = vector.get_type();
let vector_type = v_type.unqualified().dyncast_vector().expect("vector type");
let elem_type = vector_type.get_element_type();
let elem_size_bytes = elem_type.get_size();
let type_size_bytes = elem_size_bytes as u64 * in_len;
// We need to ensure at least 16 entries in our vector type, since the pre-reversed vectors
// we generate below have 16 entries in them. `new_rvalue_vector_perm` requires the mask
// vector to be of the same length as the source vectors.
let byte_vector_type_size = type_size_bytes.max(16);
let byte_vector_type = bx.context.new_vector_type(bx.u8_type, type_size_bytes);
let long_byte_vector_type = bx.context.new_vector_type(bx.u8_type, byte_vector_type_size);
// Step 1: Generate pre-reversed low and high nibbles as a vector.
let zero_byte = bx.context.new_rvalue_zero(bx.u8_type);
let hi_nibble_elements: Vec<_> = (0u8..16)
.map(|x| bx.context.new_rvalue_from_int(bx.u8_type, x.reverse_bits() as _))
.chain((16..byte_vector_type_size).map(|_| zero_byte))
.collect();
let hi_nibble =
bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &hi_nibble_elements);
let lo_nibble_elements: Vec<_> = (0u8..16)
.map(|x| bx.context.new_rvalue_from_int(bx.u8_type, (x.reverse_bits() >> 4) as _))
.chain((16..byte_vector_type_size).map(|_| zero_byte))
.collect();
let lo_nibble =
bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &lo_nibble_elements);
let mask = bx.context.new_rvalue_from_vector(
None,
long_byte_vector_type,
&vec![bx.context.new_rvalue_from_int(bx.u8_type, 0x0f); byte_vector_type_size as _],
);
let four_vec = bx.context.new_rvalue_from_vector(
None,
long_byte_vector_type,
&vec![bx.context.new_rvalue_from_int(bx.u8_type, 4); byte_vector_type_size as _],
);
// Step 2: Byte-swap the input.
let swapped = simd_bswap(bx, args[0].immediate());
let byte_vector = bx.context.new_bitcast(None, swapped, byte_vector_type);
// We're going to need to extend the vector with zeros to make sure that the types are the
// same, since that's what new_rvalue_vector_perm expects.
let byte_vector = if byte_vector_type_size > type_size_bytes {
let mut byte_vector_elements = Vec::with_capacity(byte_vector_type_size as _);
for i in 0..type_size_bytes {
let idx = bx.context.new_rvalue_from_int(bx.u32_type, i as _);
let val = bx.extract_element(byte_vector, idx);
byte_vector_elements.push(val);
}
for _ in type_size_bytes..byte_vector_type_size {
byte_vector_elements.push(zero_byte);
}
bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &byte_vector_elements)
} else {
bx.context.new_bitcast(None, byte_vector, long_byte_vector_type)
};
// Step 3: Mask off the low and high nibbles of each byte in the byte-swapped input.
let masked_hi = (byte_vector >> four_vec) & mask;
let masked_lo = byte_vector & mask;
// Step 4: Shuffle the pre-reversed low and high-nibbles using the masked nibbles as a shuffle mask.
let hi = bx.context.new_rvalue_vector_perm(None, hi_nibble, hi_nibble, masked_lo);
let lo = bx.context.new_rvalue_vector_perm(None, lo_nibble, lo_nibble, masked_hi);
// Step 5: Combine the results of the shuffle back together and cast back to the original type.
let result = hi | lo;
let cast_ty =
bx.context.new_vector_type(elem_type, byte_vector_type_size / (elem_size_bytes as u64));
// we might need to truncate if sizeof(v_type) < sizeof(cast_type)
if type_size_bytes < byte_vector_type_size {
let cast_result = bx.context.new_bitcast(None, result, cast_ty);
let elems: Vec<_> = (0..in_len)
.map(|i| {
let idx = bx.context.new_rvalue_from_int(bx.u32_type, i as _);
bx.extract_element(cast_result, idx)
})
.collect();
return Ok(bx.context.new_rvalue_from_vector(None, v_type, &elems));
} else {
// avoid the unnecessary truncation as an optimization.
return Ok(bx.context.new_bitcast(None, result, v_type));
}
}
// since gcc doesn't have vector shuffle methods available in non-patched builds, fallback to
// component-wise bitreverses if they're not available.
#[cfg(not(feature = "master"))]
if name == sym::simd_bitreverse {
let vector = args[0].immediate();
let vector_ty = vector.get_type();
let vector_type = vector_ty.unqualified().dyncast_vector().expect("vector type");
let num_elements = vector_type.get_num_units();
let elem_type = vector_type.get_element_type();
let elem_size_bytes = elem_type.get_size();
let num_type = elem_type.to_unsigned(bx.cx);
let new_elements: Vec<_> = (0..num_elements)
.map(|idx| {
let index = bx.context.new_rvalue_from_long(num_type, idx as _);
let extracted_value = bx.extract_element(vector, index).to_rvalue();
bx.bit_reverse(elem_size_bytes as u64 * 8, extracted_value)
})
.collect();
return Ok(bx.context.new_rvalue_from_vector(None, vector_ty, &new_elements));
}
if name == sym::simd_ctlz || name == sym::simd_cttz {
let vector = args[0].immediate();
let elements: Vec<_> = (0..in_len)
.map(|i| {
let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
let value = bx.extract_element(vector, index).to_rvalue();
if name == sym::simd_ctlz {
bx.count_leading_zeroes(value.get_type().get_size() as u64 * 8, value)
} else {
bx.count_trailing_zeroes(value.get_type().get_size() as u64 * 8, value)
}
})
.collect();
return Ok(bx.context.new_rvalue_from_vector(None, vector.get_type(), &elements));
}
if name == sym::simd_shuffle {
// Make sure this is actually an array, since typeck only checks the length-suffixed
// version of this intrinsic.
@ -504,20 +699,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
default: RValue<'gcc>,
pointers: RValue<'gcc>,
mask: RValue<'gcc>,
pointer_count: usize,
bx: &mut Builder<'a, 'gcc, 'tcx>,
in_len: u64,
underlying_ty: Ty<'tcx>,
invert: bool,
) -> RValue<'gcc> {
let vector_type = if pointer_count > 1 {
bx.context.new_vector_type(bx.usize_type, in_len)
} else {
vector_ty(bx, underlying_ty, in_len)
};
let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type();
let vector_type = default.get_type();
let elem_type =
vector_type.unqualified().dyncast_vector().expect("vector type").get_element_type();
let mut values = vec![];
let mut values = Vec::with_capacity(in_len as usize);
for i in 0..in_len {
let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
let int = bx.context.new_vector_access(None, pointers, index).to_rvalue();
@ -530,13 +720,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let vector = bx.context.new_rvalue_from_vector(None, vector_type, &values);
let mut mask_types = vec![];
let mut mask_values = vec![];
let mut mask_types = Vec::with_capacity(in_len as usize);
let mut mask_values = Vec::with_capacity(in_len as usize);
for i in 0..in_len {
let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
mask_types.push(bx.context.new_field(None, bx.i32_type, "m"));
let mask_value = bx.context.new_vector_access(None, mask, index).to_rvalue();
let masked = bx.context.new_rvalue_from_int(bx.i32_type, in_len as i32) & mask_value;
let mask_value_cast = bx.context.new_cast(None, mask_value, bx.i32_type);
let masked =
bx.context.new_rvalue_from_int(bx.i32_type, in_len as i32) & mask_value_cast;
let value = index + masked;
mask_values.push(value);
}
@ -665,10 +857,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
pointer_count,
bx,
in_len,
underlying_ty,
false,
));
}
@ -779,16 +969,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
}
}
let result = gather(
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
pointer_count,
bx,
in_len,
underlying_ty,
true,
);
let result =
gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), bx, in_len, true);
let pointers = args[1].immediate();

View File

@ -4,6 +4,7 @@
* TODO(antoyo): support LTO (gcc's equivalent to Full LTO is -flto -flto-partition=one https://documentation.suse.com/sbp/all/html/SBP-GCC-10/index.html).
* For Thin LTO, this might be helpful:
* In gcc 4.6 -fwhopr was removed and became default with -flto. The non-whopr path can still be executed via -flto-partition=none.
* Or the new incremental LTO?
*
* Maybe some missing optizations enabled by rustc's LTO is in there: https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html
* Like -fipa-icf (should be already enabled) and maybe -fdevirtualize-at-ltrans.
@ -24,9 +25,10 @@
hash_raw_entry
)]
#![allow(broken_intra_doc_links)]
#![recursion_limit="256"]
#![recursion_limit = "256"]
#![warn(rust_2018_idioms)]
#![warn(unused_lifetimes)]
#![deny(clippy::pattern_type_mismatch)]
extern crate rustc_apfloat;
extern crate rustc_ast;
@ -37,7 +39,8 @@ extern crate rustc_errors;
extern crate rustc_fluent_macro;
extern crate rustc_fs_util;
extern crate rustc_hir;
#[cfg(feature="master")]
extern crate rustc_index;
#[cfg(feature = "master")]
extern crate rustc_interface;
extern crate rustc_macros;
extern crate rustc_metadata;
@ -77,36 +80,40 @@ mod type_of;
use std::any::Any;
use std::fmt::Debug;
#[cfg(not(feature = "master"))]
use std::sync::atomic::AtomicBool;
#[cfg(not(feature = "master"))]
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::sync::Mutex;
#[cfg(not(feature="master"))]
use std::sync::atomic::AtomicBool;
#[cfg(not(feature="master"))]
use std::sync::atomic::Ordering;
use gccjit::{Context, OptimizationLevel};
#[cfg(feature="master")]
use gccjit::{TargetInfo, Version};
#[cfg(not(feature="master"))]
use gccjit::CType;
use errors::LTONotSupported;
#[cfg(not(feature = "master"))]
use gccjit::CType;
use gccjit::{Context, OptimizationLevel};
#[cfg(feature = "master")]
use gccjit::{TargetInfo, Version};
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
use rustc_codegen_ssa::base::codegen_crate;
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn};
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
use rustc_codegen_ssa::back::write::{
CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn,
};
use rustc_codegen_ssa::base::codegen_crate;
use rustc_codegen_ssa::traits::{
CodegenBackend, ExtraBackendMethods, ThinBufferMethods, WriteBackendMethods,
};
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::sync::IntoDynSyncSend;
use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ThinBufferMethods, WriteBackendMethods};
use rustc_errors::{ErrorGuaranteed, DiagCtxt};
use rustc_errors::{DiagCtxt, ErrorGuaranteed};
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::util::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_middle::util::Providers;
use rustc_session::config::{Lto, OptLevel, OutputFilenames};
use rustc_session::Session;
use rustc_span::Symbol;
use rustc_span::fatal_error::FatalError;
use rustc_span::Symbol;
use tempfile::TempDir;
use crate::back::lto::ModuleBuffer;
@ -124,13 +131,13 @@ impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
}
}
#[cfg(not(feature="master"))]
#[cfg(not(feature = "master"))]
#[derive(Debug)]
pub struct TargetInfo {
supports_128bit_integers: AtomicBool,
}
#[cfg(not(feature="master"))]
#[cfg(not(feature = "master"))]
impl TargetInfo {
fn cpu_supports(&self, _feature: &str) -> bool {
false
@ -173,26 +180,26 @@ impl CodegenBackend for GccCodegenBackend {
}
fn init(&self, sess: &Session) {
#[cfg(feature="master")]
#[cfg(feature = "master")]
{
let target_cpu = target_cpu(sess);
// Get the second TargetInfo with the correct CPU features by setting the arch.
let context = Context::default();
if target_cpu != "generic" {
context.add_command_line_option(&format!("-march={}", target_cpu));
context.add_command_line_option(format!("-march={}", target_cpu));
}
**self.target_info.info.lock().expect("lock") = context.get_target_info();
}
#[cfg(feature="master")]
#[cfg(feature = "master")]
gccjit::set_global_personality_function_name(b"rust_eh_personality\0");
if sess.lto() == Lto::Thin {
sess.dcx().emit_warn(LTONotSupported {});
}
#[cfg(not(feature="master"))]
#[cfg(not(feature = "master"))]
{
let temp_dir = TempDir::new().expect("cannot create temporary directory");
let temp_file = temp_dir.into_path().join("result.asm");
@ -200,39 +207,62 @@ impl CodegenBackend for GccCodegenBackend {
check_context.set_print_errors_to_stderr(false);
let _int128_ty = check_context.new_c_type(CType::UInt128t);
// NOTE: we cannot just call compile() as this would require other files than libgccjit.so.
check_context.compile_to_file(gccjit::OutputKind::Assembler, temp_file.to_str().expect("path to str"));
self.target_info.info.lock().expect("lock").supports_128bit_integers.store(check_context.get_last_error() == Ok(None), Ordering::SeqCst);
check_context.compile_to_file(
gccjit::OutputKind::Assembler,
temp_file.to_str().expect("path to str"),
);
self.target_info
.info
.lock()
.expect("lock")
.supports_128bit_integers
.store(check_context.get_last_error() == Ok(None), Ordering::SeqCst);
}
}
fn provide(&self, providers: &mut Providers) {
providers.global_backend_features =
|tcx, ()| gcc_util::global_gcc_features(tcx.sess, true)
providers.global_backend_features = |tcx, ()| gcc_util::global_gcc_features(tcx.sess, true)
}
fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> {
fn codegen_crate(
&self,
tcx: TyCtxt<'_>,
metadata: EncodedMetadata,
need_metadata_module: bool,
) -> Box<dyn Any> {
let target_cpu = target_cpu(tcx.sess);
let res = codegen_crate(self.clone(), tcx, target_cpu.to_string(), metadata, need_metadata_module);
let res = codegen_crate(
self.clone(),
tcx,
target_cpu.to_string(),
metadata,
need_metadata_module,
);
Box::new(res)
}
fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session, _outputs: &OutputFilenames) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
fn join_codegen(
&self,
ongoing_codegen: Box<dyn Any>,
sess: &Session,
_outputs: &OutputFilenames,
) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
ongoing_codegen
.downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>()
.expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>")
.join(sess)
}
fn link(&self, sess: &Session, codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorGuaranteed> {
fn link(
&self,
sess: &Session,
codegen_results: CodegenResults,
outputs: &OutputFilenames,
) -> Result<(), ErrorGuaranteed> {
use rustc_codegen_ssa::back::link::link_binary;
link_binary(
sess,
&crate::archive::ArArchiveBuilderBuilder,
&codegen_results,
outputs,
)
link_binary(sess, &crate::archive::ArArchiveBuilderBuilder, &codegen_results, outputs)
}
fn target_features(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
@ -245,13 +275,15 @@ fn new_context<'gcc, 'tcx>(tcx: TyCtxt<'tcx>) -> Context<'gcc> {
if tcx.sess.target.arch == "x86" || tcx.sess.target.arch == "x86_64" {
context.add_command_line_option("-masm=intel");
}
#[cfg(feature="master")]
#[cfg(feature = "master")]
{
context.set_special_chars_allowed_in_func_names("$.*");
let version = Version::get();
let version = format!("{}.{}.{}", version.major, version.minor, version.patch);
context.set_output_ident(&format!("rustc version {} with libgccjit {}",
rustc_interface::util::rustc_version_str().unwrap_or("unknown version"),
version,
context.set_output_ident(&format!(
"rustc version {} with libgccjit {}",
rustc_interface::util::rustc_version_str().unwrap_or("unknown version"),
version,
));
}
// TODO(antoyo): check if this should only be added when using -Cforce-unwind-tables=n.
@ -260,26 +292,41 @@ fn new_context<'gcc, 'tcx>(tcx: TyCtxt<'tcx>) -> Context<'gcc> {
}
impl ExtraBackendMethods for GccCodegenBackend {
fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) -> Self::Module {
fn codegen_allocator(
&self,
tcx: TyCtxt<'_>,
module_name: &str,
kind: AllocatorKind,
alloc_error_handler_kind: AllocatorKind,
) -> Self::Module {
let mut mods = GccContext {
context: new_context(tcx),
should_combine_object_files: false,
temp_dir: None,
};
unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind); }
unsafe {
allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind);
}
mods
}
fn compile_codegen_unit(&self, tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
fn compile_codegen_unit(
&self,
tcx: TyCtxt<'_>,
cgu_name: Symbol,
) -> (ModuleCodegen<Self::Module>, u64) {
base::compile_codegen_unit(tcx, cgu_name, self.target_info.clone())
}
fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel, _features: &[String]) -> TargetMachineFactoryFn<Self> {
fn target_machine_factory(
&self,
_sess: &Session,
_opt_level: OptLevel,
_features: &[String],
) -> TargetMachineFactoryFn<Self> {
// TODO(antoyo): set opt level.
Arc::new(|_| {
Ok(())
})
Arc::new(|_| Ok(()))
}
}
@ -310,11 +357,19 @@ impl WriteBackendMethods for GccCodegenBackend {
type ThinData = ();
type ThinBuffer = ThinBuffer;
fn run_fat_lto(cgcx: &CodegenContext<Self>, modules: Vec<FatLtoInput<Self>>, cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
fn run_fat_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<FatLtoInput<Self>>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<LtoModuleCodegen<Self>, FatalError> {
back::lto::run_fat(cgcx, modules, cached_modules)
}
fn run_thin_lto(_cgcx: &CodegenContext<Self>, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
fn run_thin_lto(
_cgcx: &CodegenContext<Self>,
_modules: Vec<(String, Self::ThinBuffer)>,
_cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
unimplemented!();
}
@ -326,21 +381,37 @@ impl WriteBackendMethods for GccCodegenBackend {
unimplemented!()
}
unsafe fn optimize(_cgcx: &CodegenContext<Self>, _dcx: &DiagCtxt, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> {
unsafe fn optimize(
_cgcx: &CodegenContext<Self>,
_dcx: &DiagCtxt,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<(), FatalError> {
module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
Ok(())
}
fn optimize_fat(_cgcx: &CodegenContext<Self>, _module: &mut ModuleCodegen<Self::Module>) -> Result<(), FatalError> {
fn optimize_fat(
_cgcx: &CodegenContext<Self>,
_module: &mut ModuleCodegen<Self::Module>,
) -> Result<(), FatalError> {
// TODO(antoyo)
Ok(())
}
unsafe fn optimize_thin(_cgcx: &CodegenContext<Self>, _thin: ThinModule<Self>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
unsafe fn optimize_thin(
_cgcx: &CodegenContext<Self>,
_thin: ThinModule<Self>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
unimplemented!();
}
unsafe fn codegen(cgcx: &CodegenContext<Self>, dcx: &DiagCtxt, module: ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
unsafe fn codegen(
cgcx: &CodegenContext<Self>,
dcx: &DiagCtxt,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
back::write::codegen(cgcx, dcx, module, config)
}
@ -352,7 +423,11 @@ impl WriteBackendMethods for GccCodegenBackend {
unimplemented!();
}
fn run_link(cgcx: &CodegenContext<Self>, dcx: &DiagCtxt, modules: Vec<ModuleCodegen<Self::Module>>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
fn run_link(
cgcx: &CodegenContext<Self>,
dcx: &DiagCtxt,
modules: Vec<ModuleCodegen<Self::Module>>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
back::write::link(cgcx, dcx, modules)
}
}
@ -360,56 +435,57 @@ impl WriteBackendMethods for GccCodegenBackend {
/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
#[no_mangle]
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
#[cfg(feature="master")]
#[cfg(feature = "master")]
let info = {
// Check whether the target supports 128-bit integers.
let context = Context::default();
Arc::new(Mutex::new(IntoDynSyncSend(context.get_target_info())))
};
#[cfg(not(feature="master"))]
#[cfg(not(feature = "master"))]
let info = Arc::new(Mutex::new(IntoDynSyncSend(TargetInfo {
supports_128bit_integers: AtomicBool::new(false),
})));
Box::new(GccCodegenBackend {
target_info: LockedTargetInfo { info },
})
Box::new(GccCodegenBackend { target_info: LockedTargetInfo { info } })
}
fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
match optlevel {
None => OptimizationLevel::None,
Some(level) => {
match level {
OptLevel::No => OptimizationLevel::None,
OptLevel::Less => OptimizationLevel::Limited,
OptLevel::Default => OptimizationLevel::Standard,
OptLevel::Aggressive => OptimizationLevel::Aggressive,
OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited,
}
Some(level) => match level {
OptLevel::No => OptimizationLevel::None,
OptLevel::Less => OptimizationLevel::Limited,
OptLevel::Default => OptimizationLevel::Standard,
OptLevel::Aggressive => OptimizationLevel::Aggressive,
OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited,
},
}
}
pub fn target_features(sess: &Session, allow_unstable: bool, target_info: &LockedTargetInfo) -> Vec<Symbol> {
sess
.target
pub fn target_features(
sess: &Session,
allow_unstable: bool,
target_info: &LockedTargetInfo,
) -> Vec<Symbol> {
sess.target
.supported_target_features()
.iter()
.filter_map(
|&(feature, gate)| {
if sess.is_nightly_build() || allow_unstable || gate.is_stable() { Some(feature) } else { None }
},
)
.filter_map(|&(feature, gate)| {
if sess.is_nightly_build() || allow_unstable || gate.is_stable() {
Some(feature)
} else {
None
}
})
.filter(|_feature| {
target_info.cpu_supports(_feature)
/*
adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512fp16, avx512ifma,
avx512pf, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpopcntdq,
bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, gfni, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm,
sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, vaes, vpclmulqdq, xsave, xsavec, xsaveopt, xsaves
*/
adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512fp16, avx512ifma,
avx512pf, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpopcntdq,
bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, gfni, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm,
sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, vaes, vpclmulqdq, xsave, xsavec, xsaveopt, xsaves
*/
})
.map(|feature| Symbol::intern(feature))
.map(Symbol::intern)
.collect()
}

View File

@ -1,11 +1,11 @@
#[cfg(feature="master")]
use gccjit::{VarAttribute, FnAttribute};
#[cfg(feature = "master")]
use gccjit::{FnAttribute, VarAttribute};
use rustc_codegen_ssa::traits::PreDefineMethods;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::mono::{Linkage, Visibility};
use rustc_middle::ty::{self, Instance, TypeVisitableExt};
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
use rustc_middle::ty::{self, Instance, TypeVisitableExt};
use crate::attributes;
use crate::base;
@ -13,8 +13,14 @@ use crate::context::CodegenCx;
use crate::type_of::LayoutGccExt;
impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
#[cfg_attr(not(feature="master"), allow(unused_variables))]
fn predefine_static(&self, def_id: DefId, _linkage: Linkage, visibility: Visibility, symbol_name: &str) {
#[cfg_attr(not(feature = "master"), allow(unused_variables))]
fn predefine_static(
&self,
def_id: DefId,
_linkage: Linkage,
visibility: Visibility,
symbol_name: &str,
) {
let attrs = self.tcx.codegen_fn_attrs(def_id);
let instance = Instance::mono(self.tcx, def_id);
let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
@ -22,20 +28,26 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section);
#[cfg(feature="master")]
global.add_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility)));
#[cfg(feature = "master")]
global.add_string_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility)));
// TODO(antoyo): set linkage.
self.instances.borrow_mut().insert(instance, global);
}
#[cfg_attr(not(feature="master"), allow(unused_variables))]
fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, visibility: Visibility, symbol_name: &str) {
#[cfg_attr(not(feature = "master"), allow(unused_variables))]
fn predefine_fn(
&self,
instance: Instance<'tcx>,
linkage: Linkage,
visibility: Visibility,
symbol_name: &str,
) {
assert!(!instance.args.has_infer());
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
self.linkage.set(base::linkage_to_gcc(linkage));
let decl = self.declare_fn(symbol_name, &fn_abi);
let decl = self.declare_fn(symbol_name, fn_abi);
//let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
attributes::from_fn_attrs(self, decl, instance);
@ -48,11 +60,10 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
&& linkage != Linkage::Private
&& self.tcx.is_compiler_builtins(LOCAL_CRATE)
{
#[cfg(feature="master")]
#[cfg(feature = "master")]
decl.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
}
else {
#[cfg(feature="master")]
} else {
#[cfg(feature = "master")]
decl.add_attribute(FnAttribute::Visibility(base::visibility_to_gcc(visibility)));
}

View File

@ -1,8 +1,8 @@
use gccjit::{RValue, Struct, Type};
use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, TypeMembershipMethods};
use rustc_codegen_ssa::common::TypeKind;
use rustc_middle::{bug, ty};
use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, TypeMembershipMethods};
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::{bug, ty};
use rustc_target::abi::{AddressSpace, Align, Integer, Size};
use crate::common::TypeReflection;
@ -123,7 +123,7 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn type_f16(&self) -> Type<'gcc> {
unimplemented!("f16_f128")
}
fn type_f32(&self) -> Type<'gcc> {
self.float_type
}
@ -143,14 +143,18 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> {
let types = fields.to_vec();
if let Some(typ) = self.struct_types.borrow().get(fields) {
return typ.clone();
return *typ;
}
let fields: Vec<_> = fields.iter().enumerate()
.map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index)))
let fields: Vec<_> = fields
.iter()
.enumerate()
.map(|(index, field)| {
self.context.new_field(None, *field, format!("field{}_TODO", index))
})
.collect();
let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
if packed {
#[cfg(feature="master")]
#[cfg(feature = "master")]
typ.set_packed();
}
self.struct_types.borrow_mut().insert(types, typ);
@ -160,17 +164,13 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
if self.is_int_type_or_bool(typ) {
TypeKind::Integer
}
else if typ.is_compatible_with(self.float_type) {
} else if typ.is_compatible_with(self.float_type) {
TypeKind::Float
}
else if typ.is_compatible_with(self.double_type) {
} else if typ.is_compatible_with(self.double_type) {
TypeKind::Double
}
else if typ.is_vector() {
} else if typ.is_vector() {
TypeKind::Vector
}
else {
} else {
// TODO(antoyo): support other types.
TypeKind::Void
}
@ -187,14 +187,11 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {
if let Some(typ) = ty.dyncast_array() {
typ
}
else if let Some(vector_type) = ty.dyncast_vector() {
} else if let Some(vector_type) = ty.dyncast_vector() {
vector_type.get_element_type()
}
else if let Some(typ) = ty.get_pointee() {
} else if let Some(typ) = ty.get_pointee() {
typ
}
else {
} else {
unreachable!()
}
}
@ -208,11 +205,9 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let f64 = self.context.new_type::<f64>();
if typ.is_compatible_with(f32) {
32
}
else if typ.is_compatible_with(f64) {
} else if typ.is_compatible_with(f64) {
64
}
else {
} else {
panic!("Cannot get width of float type {:?}", typ);
}
// TODO(antoyo): support other sizes.
@ -226,9 +221,9 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
value.get_type()
}
#[cfg_attr(feature="master", allow(unused_mut))]
#[cfg_attr(feature = "master", allow(unused_mut))]
fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> {
#[cfg(not(feature="master"))]
#[cfg(not(feature = "master"))]
if let Some(struct_type) = ty.is_struct() {
if struct_type.get_field_count() == 0 {
// NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
@ -252,12 +247,14 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], packed: bool) {
let fields: Vec<_> = fields.iter().enumerate()
.map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
let fields: Vec<_> = fields
.iter()
.enumerate()
.map(|(index, field)| self.context.new_field(None, *field, format!("field_{}", index)))
.collect();
typ.set_fields(None, &fields);
if packed {
#[cfg(feature="master")]
#[cfg(feature = "master")]
typ.as_type().set_packed();
}
}
@ -267,7 +264,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
}
pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) {
pub fn struct_fields<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
layout: TyAndLayout<'tcx>,
) -> (Vec<Type<'gcc>>, bool) {
let field_count = layout.fields.count();
let mut packed = false;
@ -275,7 +275,7 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
let mut prev_effective_align = layout.align.abi;
let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
for i in layout.fields.index_by_increasing_offset() {
let target_offset = layout.fields.offset(i as usize);
let target_offset = layout.fields.offset(i);
let field = layout.field(cx, i);
let effective_field_align =
layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
@ -305,5 +305,4 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
(result, packed)
}
impl<'gcc, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
}
impl<'gcc, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'gcc, 'tcx> {}

View File

@ -1,13 +1,16 @@
use std::fmt::Write;
use gccjit::{Struct, Type};
use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods};
use gccjit::{Struct, Type};
use rustc_middle::bug;
use rustc_middle::ty::{self, Ty, TypeVisitableExt};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_target::abi::{self, Abi, Align, F16, F128, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants};
use rustc_middle::ty::{self, Ty, TypeVisitableExt};
use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
use rustc_target::abi::{
self, Abi, Align, FieldsShape, Int, Integer, PointeeInfo, Pointer, Size, TyAbiInterface,
Variants, F128, F16, F32, F64,
};
use crate::abi::{FnAbiGcc, FnAbiGccExt, GccType};
use crate::context::CodegenCx;
@ -25,7 +28,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
}
#[cfg(feature="master")]
#[cfg(feature = "master")]
pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> {
match t {
ty::IntTy::Isize => self.type_isize(),
@ -37,7 +40,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
}
#[cfg(feature="master")]
#[cfg(feature = "master")]
pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> {
match t {
ty::UintTy::Usize => self.type_isize(),
@ -56,7 +59,11 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
}
}
fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> {
fn uncached_gcc_type<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
layout: TyAndLayout<'tcx>,
defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>,
) -> Type<'gcc> {
match layout.abi {
Abi::Scalar(_) => bug!("handled elsewhere"),
Abi::Vector { ref element, count } => {
@ -70,7 +77,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
element
};
return cx.context.new_vector_type(element, count);
},
}
Abi::ScalarPair(..) => {
return cx.type_struct(
&[
@ -87,7 +94,12 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
// FIXME(eddyb) producing readable type names for trait objects can result
// in problematically distinct types due to HRTB and subtyping (see #47638).
// ty::Dynamic(..) |
ty::Adt(..) | ty::Closure(..) | ty::CoroutineClosure(..) | ty::Foreign(..) | ty::Coroutine(..) | ty::Str
ty::Adt(..)
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Foreign(..)
| ty::Coroutine(..)
| ty::Str
if !cx.sess().fewer_names() =>
{
let mut name = with_no_trimmed_paths!(layout.ty.to_string());
@ -125,22 +137,21 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
let gcc_type = cx.type_named_struct(name);
cx.set_struct_body(gcc_type, &[fill], packed);
gcc_type.as_type()
},
}
}
}
FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx), count),
FieldsShape::Arbitrary { .. } =>
match name {
None => {
let (gcc_fields, packed) = struct_fields(cx, layout);
cx.type_struct(&gcc_fields, packed)
},
Some(ref name) => {
let gcc_type = cx.type_named_struct(name);
*defer = Some((gcc_type, layout));
gcc_type.as_type()
},
},
FieldsShape::Arbitrary { .. } => match name {
None => {
let (gcc_fields, packed) = struct_fields(cx, layout);
cx.type_struct(&gcc_fields, packed)
}
Some(ref name) => {
let gcc_type = cx.type_named_struct(name);
*defer = Some((gcc_type, layout));
gcc_type.as_type()
}
},
}
}
@ -149,9 +160,22 @@ pub trait LayoutGccExt<'tcx> {
fn is_gcc_scalar_pair(&self) -> bool;
fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc>;
fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
fn scalar_gcc_type_at<'gcc>(
&self,
cx: &CodegenCx<'gcc, 'tcx>,
scalar: &abi::Scalar,
offset: Size,
) -> Type<'gcc>;
fn scalar_pair_element_gcc_type<'gcc>(
&self,
cx: &CodegenCx<'gcc, 'tcx>,
index: usize,
) -> Type<'gcc>;
fn pointee_info_at<'gcc>(
&self,
cx: &CodegenCx<'gcc, 'tcx>,
offset: Size,
) -> Option<PointeeInfo>;
}
impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
@ -191,24 +215,24 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
return ty;
}
let ty =
match *self.ty.kind() {
// NOTE: we cannot remove this match like in the LLVM codegen because the call
// to fn_ptr_backend_type handle the on-stack attribute.
// TODO(antoyo): find a less hackish way to hande the on-stack attribute.
ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())),
_ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
};
let ty = match *self.ty.kind() {
// NOTE: we cannot remove this match like in the LLVM codegen because the call
// to fn_ptr_backend_type handle the on-stack attribute.
// TODO(antoyo): find a less hackish way to hande the on-stack attribute.
ty::FnPtr(sig) => {
cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
}
_ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
};
cx.scalar_types.borrow_mut().insert(self.ty, ty);
return ty;
}
// Check the cache.
let variant_index =
match self.variants {
Variants::Single { index } => Some(index),
_ => None,
};
let variant_index = match self.variants {
Variants::Single { index } => Some(index),
_ => None,
};
let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned();
if let Some(ty) = cached_type {
return ty;
@ -221,17 +245,15 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
let normal_ty = cx.tcx.erase_regions(self.ty);
let mut defer = None;
let ty =
if self.ty != normal_ty {
let mut layout = cx.layout_of(normal_ty);
if let Some(v) = variant_index {
layout = layout.for_variant(cx, v);
}
layout.gcc_type(cx)
let ty = if self.ty != normal_ty {
let mut layout = cx.layout_of(normal_ty);
if let Some(v) = variant_index {
layout = layout.for_variant(cx, v);
}
else {
uncached_gcc_type(cx, *self, &mut defer)
};
layout.gcc_type(cx)
} else {
uncached_gcc_type(cx, *self, &mut defer)
};
cx.types.borrow_mut().insert((self.ty, variant_index), ty);
@ -252,7 +274,12 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
self.gcc_type(cx)
}
fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> {
fn scalar_gcc_type_at<'gcc>(
&self,
cx: &CodegenCx<'gcc, 'tcx>,
scalar: &abi::Scalar,
offset: Size,
) -> Type<'gcc> {
match scalar.primitive() {
Int(i, true) => cx.type_from_integer(i),
Int(i, false) => cx.type_from_unsigned_integer(i),
@ -262,19 +289,21 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
F128 => cx.type_f128(),
Pointer(address_space) => {
// If we know the alignment, pick something better than i8.
let pointee =
if let Some(pointee) = self.pointee_info_at(cx, offset) {
cx.type_pointee_for_align(pointee.align)
}
else {
cx.type_i8()
};
let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
cx.type_pointee_for_align(pointee.align)
} else {
cx.type_i8()
};
cx.type_ptr_to_ext(pointee, address_space)
}
}
}
fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc> {
fn scalar_pair_element_gcc_type<'gcc>(
&self,
cx: &CodegenCx<'gcc, 'tcx>,
index: usize,
) -> Type<'gcc> {
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
// In other words, this should generally not look at the type at all, but only at the
// layout.
@ -295,13 +324,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
return cx.type_i1();
}
let offset =
if index == 0 {
Size::ZERO
}
else {
a.size(cx).align_to(b.align(cx).abi)
};
let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
self.scalar_gcc_type_at(cx, scalar, offset)
}
@ -334,7 +357,12 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
layout.is_gcc_scalar_pair()
}
fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, _immediate: bool) -> Type<'gcc> {
fn scalar_pair_element_backend_type(
&self,
layout: TyAndLayout<'tcx>,
index: usize,
_immediate: bool,
) -> Type<'gcc> {
layout.scalar_pair_element_gcc_type(self, index)
}
@ -352,12 +380,7 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
// FIXME(antoyo): Should we do something with `FnAbiGcc::fn_attributes`?
let FnAbiGcc {
return_type,
arguments_type,
is_c_variadic,
..
} = fn_abi.gcc_type(self);
let FnAbiGcc { return_type, arguments_type, is_c_variadic, .. } = fn_abi.gcc_type(self);
self.context.new_function_pointer_type(None, return_type, &arguments_type, is_c_variadic)
}
}

View File

@ -1,479 +0,0 @@
#!/usr/bin/env bash
# TODO(antoyo): rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed?
set -e
#set -x
flags=
gcc_master_branch=1
channel="debug"
funcs=()
build_only=0
nb_parts=0
current_part=0
use_system_gcc=0
use_backend=0
cargo_target_dir=""
export CHANNEL='debug'
while [[ $# -gt 0 ]]; do
case $1 in
--release)
codegen_channel=release
channel="release"
export CHANNEL='release'
shift
;;
--release-sysroot)
sysroot_channel="--release"
shift
;;
--no-default-features)
gcc_master_branch=0
flags="$flags --no-default-features"
shift
;;
--features)
shift
flags="$flags --features $1"
shift
;;
"--test-rustc")
funcs+=(test_rustc)
shift
;;
"--test-successful-rustc")
funcs+=(test_successful_rustc)
shift
;;
"--test-failing-rustc")
funcs+=(test_failing_rustc)
shift
;;
"--test-libcore")
funcs+=(test_libcore)
shift
;;
"--clean-ui-tests")
funcs+=(clean_ui_tests)
shift
;;
"--clean")
funcs+=(clean)
shift
;;
"--std-tests")
funcs+=(std_tests)
shift
;;
"--asm-tests")
funcs+=(asm_tests)
shift
;;
"--extended-tests")
funcs+=(extended_sysroot_tests)
shift
;;
"--extended-rand-tests")
funcs+=(extended_rand_tests)
shift
;;
"--extended-regex-example-tests")
funcs+=(extended_regex_example_tests)
shift
;;
"--extended-regex-tests")
funcs+=(extended_regex_tests)
shift
;;
"--mini-tests")
funcs+=(mini_tests)
shift
;;
"--build-sysroot")
funcs+=(build_sysroot)
shift
;;
"--build")
build_only=1
shift
;;
"--use-system-gcc")
use_system_gcc=1
shift
;;
"--use-backend")
use_backend=1
shift
export BUILTIN_BACKEND=$1
shift
;;
"--out-dir")
shift
export CARGO_TARGET_DIR=$1
cargo_target_dir=$1
shift
;;
"--nb-parts")
shift
nb_parts=$1
shift
;;
"--current-part")
shift
current_part=$1
shift
;;
*)
echo "Unknown option $1"
exit 1
;;
esac
done
if [ -f ./gcc_path ]; then
export GCC_PATH=$(cat gcc_path)
elif (( $use_system_gcc == 1 )); then
echo 'Using system GCC'
else
echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
exit 1
fi
export LD_LIBRARY_PATH="$GCC_PATH"
export LIBRARY_PATH="$GCC_PATH"
if [[ $use_backend == 0 ]]; then
if [[ $channel == "release" ]]; then
CARGO_INCREMENTAL=1 cargo rustc --release $flags
else
echo $LD_LIBRARY_PATH
cargo rustc $flags
fi
fi
if (( $build_only == 1 )); then
echo "Since it's 'build-only', exiting..."
exit
fi
source config.sh
function clean() {
rm -r $cargo_target_dir || true
mkdir -p $cargo_target_dir/gccjit
}
function mini_tests() {
echo "[BUILD] mini_core"
crate_types="lib,dylib"
if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
crate_types="lib"
fi
$RUST_CMD example/mini_core.rs --crate-name mini_core --crate-type $crate_types --target $TARGET_TRIPLE
echo "[BUILD] example"
$RUST_CMD example/example.rs --crate-type lib --target $TARGET_TRIPLE
echo "[AOT] mini_core_hello_world"
$RUST_CMD example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE
$RUN_WRAPPER $cargo_target_dir/mini_core_hello_world abc bcd
}
function build_sysroot() {
echo "[BUILD] sysroot"
time ./build_sysroot/build_sysroot.sh $sysroot_channel
}
# TODO(GuillaumeGomez): when rewriting in Rust, refactor with the code in tests/lang_tests_common.rs if possible.
function run_in_vm() {
vm_parent_dir=${CG_GCC_VM_DIR:-$(pwd)}
vm_dir=vm
exe=$1
exe_filename=$(basename $exe)
vm_home_dir=$vm_parent_dir/$vm_dir/home
vm_exe_path=$vm_home_dir/$exe_filename
inside_vm_exe_path=/home/$exe_filename
sudo cp $exe $vm_exe_path
shift
pushd $vm_parent_dir
sudo chroot $vm_dir qemu-m68k-static $inside_vm_exe_path $@
popd
}
function std_tests() {
echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
$RUST_CMD example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE
$RUN_WRAPPER $cargo_target_dir/arbitrary_self_types_pointers_and_wrappers
echo "[AOT] alloc_system"
$RUST_CMD example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
# FIXME: doesn't work on m68k.
if [[ "$HOST_TRIPLE" == "$TARGET_TRIPLE" ]]; then
echo "[AOT] alloc_example"
$RUST_CMD example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
$RUN_WRAPPER $cargo_target_dir/alloc_example
fi
echo "[AOT] dst_field_align"
# FIXME(antoyo): Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
$RUST_CMD example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE
$RUN_WRAPPER $cargo_target_dir/dst_field_align || (echo $?; false)
echo "[AOT] std_example"
std_flags="--cfg feature=\"master\""
if (( $gcc_master_branch == 0 )); then
std_flags=""
fi
$RUST_CMD example/std_example.rs --crate-type bin --target $TARGET_TRIPLE $std_flags
$RUN_WRAPPER $cargo_target_dir/std_example --target $TARGET_TRIPLE
echo "[AOT] subslice-patterns-const-eval"
$RUST_CMD example/subslice-patterns-const-eval.rs --crate-type bin $TEST_FLAGS --target $TARGET_TRIPLE
$RUN_WRAPPER $cargo_target_dir/subslice-patterns-const-eval
echo "[AOT] track-caller-attribute"
$RUST_CMD example/track-caller-attribute.rs --crate-type bin $TEST_FLAGS --target $TARGET_TRIPLE
$RUN_WRAPPER $cargo_target_dir/track-caller-attribute
echo "[BUILD] mod_bench"
$RUST_CMD example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
}
function setup_rustc() {
rust_toolchain=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/')
git clone https://github.com/rust-lang/rust.git || true
cd rust
git fetch
git checkout $($RUSTC -V | cut -d' ' -f3 | tr -d '(')
export RUSTFLAGS=
rm config.toml || true
cat > config.toml <<EOF
change-id = 115898
[rust]
codegen-backends = []
deny-warnings = false
verbose-tests = true
[build]
cargo = "$(rustup which cargo)"
local-rebuild = true
rustc = "$HOME/.rustup/toolchains/$rust_toolchain-$HOST_TRIPLE/bin/rustc"
[target.x86_64-unknown-linux-gnu]
llvm-filecheck = "`which FileCheck-10 || which FileCheck-11 || which FileCheck-12 || which FileCheck-13 || which FileCheck-14`"
[llvm]
download-ci-llvm = false
EOF
$RUSTC -V | cut -d' ' -f3 | tr -d '('
git checkout $($RUSTC -V | cut -d' ' -f3 | tr -d '(') tests
}
function asm_tests() {
setup_rustc
echo "[TEST] rustc asm test suite"
RUSTC_ARGS="-Zpanic-abort-tests -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort"
COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 tests/assembly/asm --rustc-args "$RUSTC_ARGS"
}
# FIXME(antoyo): linker gives multiple definitions error on Linux
#echo "[BUILD] sysroot in release mode"
#./build_sysroot/build_sysroot.sh --release
function test_libcore() {
pushd build_sysroot/sysroot_src/library/core/tests
echo "[TEST] libcore"
rm -r ./target || true
../../../../../cargo.sh test
popd
}
#echo
#echo "[BENCH COMPILE] mod_bench"
#COMPILE_MOD_BENCH_INLINE="$RUSTC example/mod_bench.rs --crate-type bin -Zmir-opt-level=3 -O --crate-name mod_bench_inline"
#COMPILE_MOD_BENCH_LLVM_0="rustc example/mod_bench.rs --crate-type bin -Copt-level=0 -o $cargo_target_dir/mod_bench_llvm_0 -Cpanic=abort"
#COMPILE_MOD_BENCH_LLVM_1="rustc example/mod_bench.rs --crate-type bin -Copt-level=1 -o $cargo_target_dir/mod_bench_llvm_1 -Cpanic=abort"
#COMPILE_MOD_BENCH_LLVM_2="rustc example/mod_bench.rs --crate-type bin -Copt-level=2 -o $cargo_target_dir/mod_bench_llvm_2 -Cpanic=abort"
#COMPILE_MOD_BENCH_LLVM_3="rustc example/mod_bench.rs --crate-type bin -Copt-level=3 -o $cargo_target_dir/mod_bench_llvm_3 -Cpanic=abort"
## Use 100 runs, because a single compilations doesn't take more than ~150ms, so it isn't very slow
#hyperfine --runs ${COMPILE_RUNS:-100} "$COMPILE_MOD_BENCH_INLINE" "$COMPILE_MOD_BENCH_LLVM_0" "$COMPILE_MOD_BENCH_LLVM_1" "$COMPILE_MOD_BENCH_LLVM_2" "$COMPILE_MOD_BENCH_LLVM_3"
#echo
#echo "[BENCH RUN] mod_bench"
#hyperfine --runs ${RUN_RUNS:-10} $cargo_target_dir/mod_bench{,_inline} $cargo_target_dir/mod_bench_llvm_*
function extended_rand_tests() {
if (( $gcc_master_branch == 0 )); then
return
fi
pushd rand
cargo clean
echo "[TEST] rust-random/rand"
../cargo.sh test --workspace
popd
}
function extended_regex_example_tests() {
if (( $gcc_master_branch == 0 )); then
return
fi
pushd regex
echo "[TEST] rust-lang/regex example shootout-regex-dna"
cargo clean
export CG_RUSTFLAGS="--cap-lints warn" # newer aho_corasick versions throw a deprecation warning
# Make sure `[codegen mono items] start` doesn't poison the diff
../cargo.sh build --example shootout-regex-dna
cat examples/regexdna-input.txt \
| ../cargo.sh run --example shootout-regex-dna \
| grep -v "Spawned thread" > res.txt
diff -u res.txt examples/regexdna-output.txt
popd
}
function extended_regex_tests() {
if (( $gcc_master_branch == 0 )); then
return
fi
pushd regex
echo "[TEST] rust-lang/regex tests"
export CG_RUSTFLAGS="--cap-lints warn" # newer aho_corasick versions throw a deprecation warning
../cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
popd
}
function extended_sysroot_tests() {
#pushd simple-raytracer
#echo "[BENCH COMPILE] ebobby/simple-raytracer"
#hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "cargo clean" \
#"RUSTC=rustc RUSTFLAGS='' cargo build" \
#"../cargo.sh build"
#echo "[BENCH RUN] ebobby/simple-raytracer"
#cp ./target/debug/main ./raytracer_cg_gcc
#hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_gcc
#popd
extended_rand_tests
extended_regex_example_tests
extended_regex_tests
}
function test_rustc() {
echo
echo "[TEST] rust-lang/rust"
setup_rustc
for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" tests/ui); do
rm $test
done
rm tests/ui/consts/const_cmp_type_id.rs
rm tests/ui/consts/issue-73976-monomorphic.rs
git checkout -- tests/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
rm -r tests/ui/{abi*,extern/,unsized-locals/,proc-macro/,threads-sendsync/,borrowck/,test*,consts/issue-miri-1910.rs} || true
rm tests/ui/mir/mir_heavy_promoted.rs # this test is oom-killed in the CI.
# Tests generating errors.
rm tests/ui/consts/issue-94675.rs
for test in $(rg --files-with-matches "thread" tests/ui); do
rm $test
done
git checkout tests/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs
git checkout tests/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs
git checkout tests/ui/macros/rfc-2011-nicer-assert-messages/auxiliary/common.rs
git checkout tests/ui/imports/ambiguous-1.rs
git checkout tests/ui/imports/ambiguous-4-extern.rs
git checkout tests/ui/entry-point/auxiliary/bad_main_functions.rs
RUSTC_ARGS="$TEST_FLAGS -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot"
if [ $# -eq 0 ]; then
# No argument supplied to the function. Doing nothing.
echo "No argument provided. Keeping all UI tests"
elif [ $1 = "0" ]; then
# Removing the failing tests.
xargs -a ../failing-ui-tests.txt -d'\n' rm
else
# Removing all tests.
find tests/ui -type f -name '*.rs' -not -path '*/auxiliary/*' -delete
# Putting back only the failing ones.
xargs -a ../failing-ui-tests.txt -d'\n' git checkout --
fi
if [ $nb_parts -gt 0 ]; then
echo "Splitting ui_test into $nb_parts parts (and running part $current_part)"
find tests/ui -type f -name '*.rs' -not -path "*/auxiliary/*" > ui_tests
# To ensure it'll be always the same sub files, we sort the content.
sort ui_tests -o ui_tests
count=$((`wc -l < ui_tests` / $nb_parts))
# We increment the number of tests by one because if this is an odd number, we would skip
# one test.
count=$((count + 1))
split -d -l $count -a 1 ui_tests ui_tests.split
# Removing all tests.
find tests/ui -type f -name '*.rs' -not -path "*/auxiliary/*" -delete
# Putting back only the ones we want to test.
xargs -a "ui_tests.split$current_part" -d'\n' git checkout --
fi
echo "[TEST] rustc test suite"
COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 tests/ui/ --rustc-args "$RUSTC_ARGS" # --target $TARGET_TRIPLE
}
function test_failing_rustc() {
test_rustc "1"
}
function test_successful_rustc() {
test_rustc "0"
}
function clean_ui_tests() {
find rust/build/x86_64-unknown-linux-gnu/test/ui/ -name stamp -delete
}
function all() {
clean
mini_tests
build_sysroot
std_tests
#asm_tests
test_libcore
extended_sysroot_tests
test_rustc
}
if [ ${#funcs[@]} -eq 0 ]; then
echo "No command passed, running '--all'..."
all
else
for t in ${funcs[@]}; do
$t
done
fi

View File

@ -1,6 +1,6 @@
tests/ui/lint/unsafe_code/forge_unsafe_block.rs
tests/ui/lint/unused-qualification-in-derive-expansion.rs
tests/ui/macro-quote-test.rs
tests/ui/macros/macro-quote-test.rs
tests/ui/macros/proc_macro.rs
tests/ui/panic-runtime/lto-unwind.rs
tests/ui/resolve/derive-macro-1.rs
@ -21,3 +21,12 @@ tests/ui/fmt/format-args-capture-issue-106408.rs
tests/ui/fmt/indoc-issue-106408.rs
tests/ui/hygiene/issue-77523-def-site-async-await.rs
tests/ui/inherent-impls-overlap-check/no-overlap.rs
tests/ui/enum-discriminant/issue-46519.rs
tests/ui/issues/issue-45731.rs
tests/ui/lint/test-allow-dead-extern-static-no-warning.rs
tests/ui/macros/macro-comma-behavior-rpass.rs
tests/ui/macros/rfc-2011-nicer-assert-messages/assert-with-custom-errors-does-not-create-unnecessary-code.rs
tests/ui/macros/rfc-2011-nicer-assert-messages/feature-gate-generic_assert.rs
tests/ui/macros/stringify.rs
tests/ui/reexport-test-harness-main.rs
tests/ui/rfcs/rfc-1937-termination-trait/termination-trait-in-test.rs

View File

@ -5,7 +5,7 @@ tests/ui/lto/lto-many-codegen-units.rs
tests/ui/lto/issue-100772.rs
tests/ui/lto/lto-rustc-loads-linker-plugin.rs
tests/ui/panic-runtime/lto-unwind.rs
tests/ui/sanitize/issue-111184-coroutine-witness.rs
tests/ui/sanitizer/issue-111184-cfi-coroutine-witness.rs
tests/ui/sepcomp/sepcomp-lib-lto.rs
tests/ui/lto/lto-opt-level-s.rs
tests/ui/lto/lto-opt-level-z.rs

View File

@ -13,7 +13,6 @@ tests/ui/sepcomp/sepcomp-extern.rs
tests/ui/sepcomp/sepcomp-fns-backwards.rs
tests/ui/sepcomp/sepcomp-fns.rs
tests/ui/sepcomp/sepcomp-statics.rs
tests/ui/simd/intrinsic/generic-arithmetic-pass.rs
tests/ui/asm/x86_64/may_unwind.rs
tests/ui/backtrace.rs
tests/ui/catch-unwind-bang.rs
@ -49,7 +48,6 @@ tests/ui/rfcs/rfc-1857-stabilize-drop-order/drop-order.rs
tests/ui/rfcs/rfc-2091-track-caller/std-panic-locations.rs
tests/ui/simd/issue-17170.rs
tests/ui/simd/issue-39720.rs
tests/ui/simd/issue-89193.rs
tests/ui/statics/issue-91050-1.rs
tests/ui/statics/issue-91050-2.rs
tests/ui/alloc-error/default-alloc-error-hook.rs
@ -57,7 +55,6 @@ tests/ui/coroutine/panic-safe.rs
tests/ui/issues/issue-14875.rs
tests/ui/issues/issue-29948.rs
tests/ui/panics/nested_panic_caught.rs
tests/ui/simd/intrinsic/generic-bswap-byte.rs
tests/ui/const_prop/ice-issue-111353.rs
tests/ui/process/println-with-broken-pipe.rs
tests/ui/panic-runtime/lto-abort.rs
@ -72,3 +69,8 @@ tests/ui/async-await/deep-futures-are-freeze.rs
tests/ui/closures/capture-unsized-by-ref.rs
tests/ui/coroutine/resume-after-return.rs
tests/ui/macros/rfc-2011-nicer-assert-messages/all-expr-kinds.rs
tests/ui/simd/masked-load-store.rs
tests/ui/simd/repr_packed.rs
tests/ui/async-await/in-trait/dont-project-to-specializable-projection.rs
tests/ui/consts/try-operator.rs
tests/ui/coroutine/unwind-abort-mix.rs

View File

@ -9,6 +9,7 @@ tests/ui/packed/packed-struct-vec.rs
tests/ui/packed/packed-tuple-struct-layout.rs
tests/ui/simd/array-type.rs
tests/ui/simd/intrinsic/float-minmax-pass.rs
tests/ui/simd/intrinsic/generic-arithmetic-pass.rs
tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs
tests/ui/simd/intrinsic/generic-as.rs
tests/ui/simd/intrinsic/generic-cast-pass.rs
@ -32,11 +33,16 @@ tests/ui/coroutine/size-moved-locals.rs
tests/ui/macros/rfc-2011-nicer-assert-messages/all-not-available-cases.rs
tests/ui/simd/intrinsic/generic-gather-pass.rs
tests/ui/simd/issue-85915-simd-ptrs.rs
tests/ui/simd/issue-89193.rs
tests/ui/issues/issue-68010-large-zst-consts.rs
tests/ui/rust-2018/proc-macro-crate-in-paths.rs
tests/ui/target-feature/missing-plusminus.rs
tests/ui/sse2.rs
tests/ui/codegen/issue-79865-llvm-miscompile.rs
tests/ui/intrinsics/intrinsics-integer.rs
tests/ui/std-backtrace.rs
tests/ui/mir/alignment/packed.rs
tests/ui/intrinsics/intrinsics-integer.rs
tests/ui/asm/x86_64/evex512-implicit-feature.rs
tests/ui/packed/dyn-trait.rs
tests/ui/packed/issue-118537-field-offset-ice.rs
tests/ui/stable-mir-print/basic_function.rs

View File

@ -5,6 +5,7 @@ use std::{
process::Command,
};
use boml::Toml;
use lang_tester::LangTester;
use tempfile::TempDir;
@ -20,20 +21,32 @@ pub fn main_inner(profile: Profile) {
let tempdir = TempDir::new().expect("temp dir");
let current_dir = current_dir().expect("current dir");
let current_dir = current_dir.to_str().expect("current dir").to_string();
let gcc_path = include_str!("../gcc_path");
let gcc_path = gcc_path.trim();
let toml = Toml::parse(include_str!("../config.toml")).expect("Failed to parse `config.toml`");
let gcc_path = if let Ok(gcc_path) = toml.get_string("gcc-path") {
PathBuf::from(gcc_path.to_string())
} else {
// then we try to retrieve it from the `target` folder.
let commit = include_str!("../libgccjit.version").trim();
Path::new("build/libgccjit").join(commit)
};
let gcc_path = Path::new(&gcc_path)
.canonicalize()
.expect("failed to get absolute path of `gcc-path`")
.display()
.to_string();
env::set_var("LD_LIBRARY_PATH", gcc_path);
fn rust_filter(filename: &Path) -> bool {
filename.extension().expect("extension").to_str().expect("to_str") == "rs"
fn rust_filter(path: &Path) -> bool {
path.is_file() && path.extension().expect("extension").to_str().expect("to_str") == "rs"
}
#[cfg(feature="master")]
#[cfg(feature = "master")]
fn filter(filename: &Path) -> bool {
rust_filter(filename)
}
#[cfg(not(feature="master"))]
#[cfg(not(feature = "master"))]
fn filter(filename: &Path) -> bool {
if let Some(filename) = filename.to_str() {
if filename.ends_with("gep.rs") {
@ -45,16 +58,17 @@ pub fn main_inner(profile: Profile) {
LangTester::new()
.test_dir("tests/run")
.test_file_filter(filter)
.test_extract(|source| {
let lines =
source.lines()
.skip_while(|l| !l.starts_with("//"))
.take_while(|l| l.starts_with("//"))
.map(|l| &l[2..])
.collect::<Vec<_>>()
.join("\n");
Some(lines)
.test_path_filter(filter)
.test_extract(|path| {
let lines = std::fs::read_to_string(path)
.expect("read file")
.lines()
.skip_while(|l| !l.starts_with("//"))
.take_while(|l| l.starts_with("//"))
.map(|l| &l[2..])
.collect::<Vec<_>>()
.join("\n");
lines
})
.test_cmds(move |path| {
// Test command 1: Compile `x.rs` into `tempdir/x`.
@ -62,19 +76,22 @@ pub fn main_inner(profile: Profile) {
exe.push(&tempdir);
exe.push(path.file_stem().expect("file_stem"));
let mut compiler = Command::new("rustc");
compiler.args(&[
compiler.args([
&format!("-Zcodegen-backend={}/target/debug/librustc_codegen_gcc.so", current_dir),
"--sysroot", &format!("{}/build_sysroot/sysroot/", current_dir),
"--sysroot",
&format!("{}/build_sysroot/sysroot/", current_dir),
"-Zno-parallel-llvm",
"-C", "link-arg=-lc",
"-o", exe.to_str().expect("to_str"),
"-C",
"link-arg=-lc",
"-o",
exe.to_str().expect("to_str"),
path.to_str().expect("to_str"),
]);
// TODO(antoyo): find a way to send this via a cli argument.
let test_target = std::env::var("CG_GCC_TEST_TARGET");
if let Ok(ref target) = test_target {
compiler.args(&["--target", &target]);
compiler.args(["--target", target]);
let linker = format!("{}-gcc", target);
compiler.args(&[format!("-Clinker={}", linker)]);
let mut env_path = std::env::var("PATH").unwrap_or_default();
@ -85,49 +102,38 @@ pub fn main_inner(profile: Profile) {
if let Some(flags) = option_env!("TEST_FLAGS") {
for flag in flags.split_whitespace() {
compiler.arg(&flag);
compiler.arg(flag);
}
}
match profile {
Profile::Debug => {}
Profile::Release => {
compiler.args(&[
"-C", "opt-level=3",
"-C", "lto=no",
]);
compiler.args(["-C", "opt-level=3", "-C", "lto=no"]);
}
}
// Test command 2: run `tempdir/x`.
if test_target.is_ok() {
let vm_parent_dir = std::env::var("CG_GCC_VM_DIR")
.map(|dir| PathBuf::from(dir))
.map(PathBuf::from)
.unwrap_or_else(|_| std::env::current_dir().unwrap());
let vm_dir = "vm";
let exe_filename = exe.file_name().unwrap();
let vm_home_dir = vm_parent_dir.join(vm_dir).join("home");
let vm_exe_path = vm_home_dir.join(exe_filename);
// FIXME(antoyo): panicking here makes the test pass.
let inside_vm_exe_path = PathBuf::from("/home").join(&exe_filename);
let inside_vm_exe_path = PathBuf::from("/home").join(exe_filename);
let mut copy = Command::new("sudo");
copy.arg("cp");
copy.args(&[&exe, &vm_exe_path]);
copy.args([&exe, &vm_exe_path]);
let mut runtime = Command::new("sudo");
runtime.args(&["chroot", vm_dir, "qemu-m68k-static"]);
runtime.args(["chroot", vm_dir, "qemu-m68k-static"]);
runtime.arg(inside_vm_exe_path);
runtime.current_dir(vm_parent_dir);
vec![
("Compiler", compiler),
("Copy", copy),
("Run-time", runtime),
]
}
else {
vec![("Compiler", compiler), ("Copy", copy), ("Run-time", runtime)]
} else {
let runtime = Command::new(exe);
vec![
("Compiler", compiler),
("Run-time", runtime),
]
vec![("Compiler", compiler), ("Run-time", runtime)]
}
})
.run();

View File

@ -2,7 +2,7 @@
set -e
echo "[BUILD] build system" 1>&2
cd build_system
pushd $(dirname "$0")/build_system > /dev/null
cargo build --release
cd ..
./build_system/target/release/y $@
popd > /dev/null
$(dirname "$0")/build_system/target/release/y $@

View File

@ -424,7 +424,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(attrs);
let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
let sret = llvm::CreateStructRetAttr(
cx.llcx,
cx.type_array(cx.type_i8(), self.ret.layout.size.bytes()),
);
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
}
PassMode::Cast { cast, pad_i32: _ } => {
@ -437,7 +440,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Ignore => {}
PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
let i = apply(attrs);
let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
let byval = llvm::CreateByValAttr(
cx.llcx,
cx.type_array(cx.type_i8(), arg.layout.size.bytes()),
);
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
}
PassMode::Direct(attrs)
@ -486,7 +492,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(bx.cx, attrs);
let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
let sret = llvm::CreateStructRetAttr(
bx.cx.llcx,
bx.cx.type_array(bx.cx.type_i8(), self.ret.layout.size.bytes()),
);
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
}
PassMode::Cast { cast, pad_i32: _ } => {
@ -513,7 +522,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Ignore => {}
PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
let i = apply(bx.cx, attrs);
let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
let byval = llvm::CreateByValAttr(
bx.cx.llcx,
bx.cx.type_array(bx.cx.type_i8(), arg.layout.size.bytes()),
);
attributes::apply_to_callsite(
callsite,
llvm::AttributePlace::Argument(i),

View File

@ -77,8 +77,8 @@ pub struct CodegenCx<'ll, 'tcx> {
/// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details
pub compiler_used_statics: RefCell<Vec<&'ll Value>>,
/// Mapping of non-scalar types to llvm types and field remapping if needed.
pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>,
/// Mapping of non-scalar types to llvm types.
pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), &'ll Type>>,
/// Mapping of scalar types to llvm types.
pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
@ -105,15 +105,6 @@ pub struct CodegenCx<'ll, 'tcx> {
pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>,
}
pub struct TypeLowering<'ll> {
/// Associated LLVM type
pub lltype: &'ll Type,
/// If padding is used the slice maps fields from source order
/// to llvm order.
pub field_remapping: Option<SmallVec<[u32; 4]>>,
}
fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
match tls_model {
TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic,
@ -558,11 +549,12 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let tcx = self.tcx;
let llfn = match tcx.lang_items().eh_personality() {
Some(def_id) if name.is_none() => self.get_fn_addr(
ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, ty::List::empty())
.unwrap()
.unwrap(),
),
Some(def_id) if name.is_none() => self.get_fn_addr(ty::Instance::expect_resolve(
tcx,
ty::ParamEnv::reveal_all(),
def_id,
ty::List::empty(),
)),
_ => {
let name = name.unwrap_or("rust_eh_personality");
if let Some(llfn) = self.get_declared_value(name) {

View File

@ -4,7 +4,7 @@ use std::path::Path;
use crate::fluent_generated as fluent;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_errors::{Diag, DiagCtxt, EmissionGuarantee, IntoDiagnostic, Level};
use rustc_errors::{Diag, DiagCtxt, Diagnostic, EmissionGuarantee, Level};
use rustc_macros::{Diagnostic, Subdiagnostic};
use rustc_span::Span;
@ -99,9 +99,9 @@ pub(crate) struct DynamicLinkingWithLTO;
pub(crate) struct ParseTargetMachineConfig<'a>(pub LlvmError<'a>);
impl<G: EmissionGuarantee> IntoDiagnostic<'_, G> for ParseTargetMachineConfig<'_> {
fn into_diagnostic(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> {
let diag: Diag<'_, G> = self.0.into_diagnostic(dcx, level);
impl<G: EmissionGuarantee> Diagnostic<'_, G> for ParseTargetMachineConfig<'_> {
fn into_diag(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> {
let diag: Diag<'_, G> = self.0.into_diag(dcx, level);
let (message, _) = diag.messages.first().expect("`LlvmError` with no message");
let message = dcx.eagerly_translate_to_string(message.clone(), diag.args.iter());
Diag::new(dcx, level, fluent::codegen_llvm_parse_target_machine_config)
@ -119,8 +119,8 @@ pub(crate) struct TargetFeatureDisableOrEnable<'a> {
#[help(codegen_llvm_missing_features)]
pub(crate) struct MissingFeatures;
impl<G: EmissionGuarantee> IntoDiagnostic<'_, G> for TargetFeatureDisableOrEnable<'_> {
fn into_diagnostic(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> {
impl<G: EmissionGuarantee> Diagnostic<'_, G> for TargetFeatureDisableOrEnable<'_> {
fn into_diag(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> {
let mut diag = Diag::new(dcx, level, fluent::codegen_llvm_target_feature_disable_or_enable);
if let Some(span) = self.span {
diag.span(span);
@ -179,8 +179,8 @@ pub enum LlvmError<'a> {
pub(crate) struct WithLlvmError<'a>(pub LlvmError<'a>, pub String);
impl<G: EmissionGuarantee> IntoDiagnostic<'_, G> for WithLlvmError<'_> {
fn into_diagnostic(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> {
impl<G: EmissionGuarantee> Diagnostic<'_, G> for WithLlvmError<'_> {
fn into_diag(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> {
use LlvmError::*;
let msg_with_llvm_err = match &self.0 {
WriteOutput { .. } => fluent::codegen_llvm_write_output_with_llvm_err,
@ -198,7 +198,7 @@ impl<G: EmissionGuarantee> IntoDiagnostic<'_, G> for WithLlvmError<'_> {
ParseBitcode => fluent::codegen_llvm_parse_bitcode_with_llvm_err,
};
self.0
.into_diagnostic(dcx, level)
.into_diag(dcx, level)
.with_primary_message(msg_with_llvm_err)
.with_arg("llvm_err", self.1)
}

View File

@ -5,6 +5,7 @@ use crate::errors::{
};
use crate::llvm;
use libc::c_int;
use rustc_codegen_ssa::base::wants_wasm_eh;
use rustc_codegen_ssa::traits::PrintBackendInfo;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::small_c_str::SmallCStr;
@ -98,6 +99,10 @@ unsafe fn configure_llvm(sess: &Session) {
}
}
if wants_wasm_eh(sess) {
add("-wasm-enable-eh", false);
}
if sess.target.os == "emscripten" && sess.panic_strategy() == PanicStrategy::Unwind {
add("-enable-emscripten-cxx-exceptions", false);
}
@ -523,6 +528,10 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str
.map(String::from),
);
if wants_wasm_eh(sess) && sess.panic_strategy() == PanicStrategy::Unwind {
features.push("+exception-handling".into());
}
// -Ctarget-features
let supported_features = sess.target.supported_target_features();
let mut featsmap = FxHashMap::default();

View File

@ -1,5 +1,4 @@
use crate::common::*;
use crate::context::TypeLowering;
use crate::type_::Type;
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
@ -10,7 +9,6 @@ use rustc_target::abi::HasDataLayout;
use rustc_target::abi::{Abi, Align, FieldsShape};
use rustc_target::abi::{Int, Pointer, F128, F16, F32, F64};
use rustc_target::abi::{Scalar, Size, Variants};
use smallvec::{smallvec, SmallVec};
use std::fmt::Write;
@ -18,7 +16,6 @@ fn uncached_llvm_type<'a, 'tcx>(
cx: &CodegenCx<'a, 'tcx>,
layout: TyAndLayout<'tcx>,
defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
field_remapping: &mut Option<SmallVec<[u32; 4]>>,
) -> &'a Type {
match layout.abi {
Abi::Scalar(_) => bug!("handled elsewhere"),
@ -71,8 +68,7 @@ fn uncached_llvm_type<'a, 'tcx>(
FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count),
FieldsShape::Arbitrary { .. } => match name {
None => {
let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
*field_remapping = new_field_remapping;
let (llfields, packed) = struct_llfields(cx, layout);
cx.type_struct(&llfields, packed)
}
Some(ref name) => {
@ -87,7 +83,7 @@ fn uncached_llvm_type<'a, 'tcx>(
fn struct_llfields<'a, 'tcx>(
cx: &CodegenCx<'a, 'tcx>,
layout: TyAndLayout<'tcx>,
) -> (Vec<&'a Type>, bool, Option<SmallVec<[u32; 4]>>) {
) -> (Vec<&'a Type>, bool) {
debug!("struct_llfields: {:#?}", layout);
let field_count = layout.fields.count();
@ -95,7 +91,6 @@ fn struct_llfields<'a, 'tcx>(
let mut offset = Size::ZERO;
let mut prev_effective_align = layout.align.abi;
let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
let mut field_remapping = smallvec![0; field_count];
for i in layout.fields.index_by_increasing_offset() {
let target_offset = layout.fields.offset(i as usize);
let field = layout.field(cx, i);
@ -120,12 +115,10 @@ fn struct_llfields<'a, 'tcx>(
result.push(cx.type_padding_filler(padding, padding_align));
debug!(" padding before: {:?}", padding);
}
field_remapping[i] = result.len() as u32;
result.push(field.llvm_type(cx));
offset = target_offset + field.size;
prev_effective_align = effective_field_align;
}
let padding_used = result.len() > field_count;
if layout.is_sized() && field_count > 0 {
if offset > layout.size {
bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
@ -143,8 +136,7 @@ fn struct_llfields<'a, 'tcx>(
} else {
debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
}
let field_remapping = padding_used.then_some(field_remapping);
(result, packed, field_remapping)
(result, packed)
}
impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
@ -224,7 +216,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
_ => None,
};
if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
return llty.lltype;
return llty;
}
debug!("llvm_type({:#?})", self);
@ -236,7 +228,6 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
let normal_ty = cx.tcx.erase_regions(self.ty);
let mut defer = None;
let mut field_remapping = None;
let llty = if self.ty != normal_ty {
let mut layout = cx.layout_of(normal_ty);
if let Some(v) = variant_index {
@ -244,22 +235,15 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
}
layout.llvm_type(cx)
} else {
uncached_llvm_type(cx, *self, &mut defer, &mut field_remapping)
uncached_llvm_type(cx, *self, &mut defer)
};
debug!("--> mapped {:#?} to llty={:?}", self, llty);
cx.type_lowering
.borrow_mut()
.insert((self.ty, variant_index), TypeLowering { lltype: llty, field_remapping });
cx.type_lowering.borrow_mut().insert((self.ty, variant_index), llty);
if let Some((llty, layout)) = defer {
let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
let (llfields, packed) = struct_llfields(cx, layout);
cx.set_struct_body(llty, &llfields, packed);
cx.type_lowering
.borrow_mut()
.get_mut(&(self.ty, variant_index))
.unwrap()
.field_remapping = new_field_remapping;
}
llty
}

View File

@ -27,7 +27,7 @@ use crate::errors;
use rustc_ast as ast;
use rustc_data_structures::unord::UnordMap;
use rustc_data_structures::unord::UnordSet;
use rustc_errors::{DiagArgValue, IntoDiagnosticArg};
use rustc_errors::{DiagArgValue, IntoDiagArg};
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::mir::mono::CodegenUnitNameBuilder;
use rustc_middle::ty::TyCtxt;
@ -205,8 +205,8 @@ impl fmt::Display for CguReuse {
}
}
impl IntoDiagnosticArg for CguReuse {
fn into_diagnostic_arg(self) -> DiagArgValue {
impl IntoDiagArg for CguReuse {
fn into_diag_arg(self) -> DiagArgValue {
DiagArgValue::Str(Cow::Owned(self.to_string()))
}
}

View File

@ -24,6 +24,7 @@ use rustc_span::symbol::Symbol;
use rustc_target::spec::crt_objects::CrtObjects;
use rustc_target::spec::LinkSelfContainedComponents;
use rustc_target::spec::LinkSelfContainedDefault;
use rustc_target::spec::LinkerFlavorCli;
use rustc_target::spec::{Cc, LinkOutputKind, LinkerFlavor, Lld, PanicStrategy};
use rustc_target::spec::{RelocModel, RelroLevel, SanitizerSet, SplitDebuginfo};
@ -1350,6 +1351,7 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
}
}
LinkerFlavor::Bpf => "bpf-linker",
LinkerFlavor::Llbc => "llvm-bitcode-linker",
LinkerFlavor::Ptx => "rust-ptx-linker",
}),
flavor,
@ -1367,8 +1369,17 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
// linker and linker flavor specified via command line have precedence over what the target
// specification specifies
let linker_flavor =
sess.opts.cg.linker_flavor.map(|flavor| sess.target.linker_flavor.with_cli_hints(flavor));
let linker_flavor = match sess.opts.cg.linker_flavor {
// The linker flavors that are non-target specific can be directly translated to LinkerFlavor
Some(LinkerFlavorCli::Llbc) => Some(LinkerFlavor::Llbc),
Some(LinkerFlavorCli::Ptx) => Some(LinkerFlavor::Ptx),
// The linker flavors that corresponds to targets needs logic that keeps the base LinkerFlavor
_ => sess
.opts
.cg
.linker_flavor
.map(|flavor| sess.target.linker_flavor.with_cli_hints(flavor)),
};
if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), linker_flavor) {
return ret;
}
@ -2338,8 +2349,12 @@ fn add_order_independent_options(
});
}
if flavor == LinkerFlavor::Ptx {
// Provide the linker with fallback to internal `target-cpu`.
if flavor == LinkerFlavor::Llbc {
cmd.arg("--target");
cmd.arg(sess.target.llvm_target.as_ref());
cmd.arg("--target-cpu");
cmd.arg(&codegen_results.crate_info.target_cpu);
} else if flavor == LinkerFlavor::Ptx {
cmd.arg("--fallback-arch");
cmd.arg(&codegen_results.crate_info.target_cpu);
} else if flavor == LinkerFlavor::Bpf {

View File

@ -153,6 +153,7 @@ pub fn get_linker<'a>(
LinkerFlavor::Msvc(..) => Box::new(MsvcLinker { cmd, sess }) as Box<dyn Linker>,
LinkerFlavor::EmCc => Box::new(EmLinker { cmd, sess }) as Box<dyn Linker>,
LinkerFlavor::Bpf => Box::new(BpfLinker { cmd, sess }) as Box<dyn Linker>,
LinkerFlavor::Llbc => Box::new(LlbcLinker { cmd, sess }) as Box<dyn Linker>,
LinkerFlavor::Ptx => Box::new(PtxLinker { cmd, sess }) as Box<dyn Linker>,
}
}
@ -1824,7 +1825,7 @@ impl<'a> Linker for PtxLinker<'a> {
}
Lto::No => {}
};
}
}
fn output_filename(&mut self, path: &Path) {
@ -1862,6 +1863,104 @@ impl<'a> Linker for PtxLinker<'a> {
fn linker_plugin_lto(&mut self) {}
}
/// The `self-contained` LLVM bitcode linker
pub struct LlbcLinker<'a> {
cmd: Command,
sess: &'a Session,
}
impl<'a> Linker for LlbcLinker<'a> {
fn cmd(&mut self) -> &mut Command {
&mut self.cmd
}
fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
fn link_dylib_by_name(&mut self, _name: &str, _verbatim: bool, _as_needed: bool) {
panic!("external dylibs not supported")
}
fn link_staticlib_by_name(
&mut self,
_name: &str,
_verbatim: bool,
_whole_archive: bool,
_search_paths: &SearchPaths,
) {
panic!("staticlibs not supported")
}
fn link_staticlib_by_path(&mut self, path: &Path, _whole_archive: bool) {
self.cmd.arg(path);
}
fn include_path(&mut self, path: &Path) {
self.cmd.arg("-L").arg(path);
}
fn debuginfo(&mut self, _strip: Strip, _: &[PathBuf]) {
self.cmd.arg("--debug");
}
fn add_object(&mut self, path: &Path) {
self.cmd.arg(path);
}
fn optimize(&mut self) {
match self.sess.opts.optimize {
OptLevel::No => "-O0",
OptLevel::Less => "-O1",
OptLevel::Default => "-O2",
OptLevel::Aggressive => "-O3",
OptLevel::Size => "-Os",
OptLevel::SizeMin => "-Oz",
};
}
fn output_filename(&mut self, path: &Path) {
self.cmd.arg("-o").arg(path);
}
fn framework_path(&mut self, _path: &Path) {
panic!("frameworks not supported")
}
fn full_relro(&mut self) {}
fn partial_relro(&mut self) {}
fn no_relro(&mut self) {}
fn gc_sections(&mut self, _keep_metadata: bool) {}
fn no_gc_sections(&mut self) {}
fn pgo_gen(&mut self) {}
fn no_crt_objects(&mut self) {}
fn no_default_libraries(&mut self) {}
fn control_flow_guard(&mut self) {}
fn ehcont_guard(&mut self) {}
fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType, symbols: &[String]) {
match _crate_type {
CrateType::Cdylib => {
for sym in symbols {
self.cmd.arg("--export-symbol").arg(sym);
}
}
_ => (),
}
}
fn subsystem(&mut self, _subsystem: &str) {}
fn linker_plugin_lto(&mut self) {}
}
pub struct BpfLinker<'a> {
cmd: Command,
sess: &'a Session,

View File

@ -374,6 +374,10 @@ pub struct CodegenContext<B: WriteBackendMethods> {
pub incr_comp_session_dir: Option<PathBuf>,
/// Channel back to the main control thread to send messages to
pub coordinator_send: Sender<Box<dyn Any + Send>>,
/// `true` if the codegen should be run in parallel.
///
/// Depends on [`CodegenBackend::supports_parallel()`] and `-Zno_parallel_backend`.
pub parallel: bool,
}
impl<B: WriteBackendMethods> CodegenContext<B> {
@ -1152,6 +1156,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
target_arch: tcx.sess.target.arch.to_string(),
split_debuginfo: tcx.sess.split_debuginfo(),
split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend,
};
// This is the "main loop" of parallel work happening for parallel codegen.
@ -1422,7 +1427,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
.binary_search_by_key(&cost, |&(_, cost)| cost)
.unwrap_or_else(|e| e);
work_items.insert(insertion_index, (work, cost));
if !cgcx.opts.unstable_opts.no_parallel_llvm {
if cgcx.parallel {
helper.request_token();
}
}
@ -1545,7 +1550,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
};
work_items.insert(insertion_index, (llvm_work_item, cost));
if !cgcx.opts.unstable_opts.no_parallel_llvm {
if cgcx.parallel {
helper.request_token();
}
assert_eq!(main_thread_state, MainThreadState::Codegenning);

Some files were not shown because too many files have changed in this diff Show More