mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-01 15:01:51 +00:00
commit
bfaf6b07ee
14
Cargo.lock
14
Cargo.lock
@ -593,7 +593,6 @@ dependencies = [
|
||||
"syn 2.0.55",
|
||||
"tempfile",
|
||||
"termize",
|
||||
"tester",
|
||||
"tokio",
|
||||
"toml 0.7.8",
|
||||
"ui_test 0.22.2",
|
||||
@ -5508,19 +5507,6 @@ dependencies = [
|
||||
"std",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tester"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89e8bf7e0eb2dd7b4228cc1b6821fc5114cd6841ae59f652a85488c016091e5f"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"getopts",
|
||||
"libc",
|
||||
"num_cpus",
|
||||
"term",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thin-vec"
|
||||
version = "0.2.13"
|
||||
|
37
README.md
37
README.md
@ -1,27 +1,36 @@
|
||||
# The Rust Programming Language
|
||||
<div align="center">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/rust-lang/www.rust-lang.org/master/static/images/rust-social-wide-dark.svg">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://raw.githubusercontent.com/rust-lang/www.rust-lang.org/master/static/images/rust-social-wide-light.svg">
|
||||
<img alt="The Rust Programming Language: A language empowering everyone to build reliable and efficient software"
|
||||
src="https://raw.githubusercontent.com/rust-lang/www.rust-lang.org/master/static/images/rust-social-wide-light.svg"
|
||||
width="50%">
|
||||
</picture>
|
||||
|
||||
[![Rust Community](https://img.shields.io/badge/Rust_Community%20-Join_us-brightgreen?style=plastic&logo=rust)](https://www.rust-lang.org/community)
|
||||
[Website][Rust] | [Getting started] | [Learn] | [Documentation] | [Contributing]
|
||||
</div>
|
||||
|
||||
This is the main source code repository for [Rust]. It contains the compiler,
|
||||
standard library, and documentation.
|
||||
|
||||
[Rust]: https://www.rust-lang.org/
|
||||
[Getting Started]: https://www.rust-lang.org/learn/get-started
|
||||
[Learn]: https://www.rust-lang.org/learn
|
||||
[Documentation]: https://www.rust-lang.org/learn#learn-use
|
||||
[Contributing]: CONTRIBUTING.md
|
||||
|
||||
**Note: this README is for _users_ rather than _contributors_.**
|
||||
If you wish to _contribute_ to the compiler, you should read
|
||||
[CONTRIBUTING.md](CONTRIBUTING.md) instead.
|
||||
## Why Rust?
|
||||
|
||||
<details>
|
||||
<summary>Table of Contents</summary>
|
||||
- **Performance:** Fast and memory-efficient, suitable for critical services, embedded devices, and easily integrate with other languages.
|
||||
|
||||
- [Quick Start](#quick-start)
|
||||
- [Installing from Source](#installing-from-source)
|
||||
- [Getting Help](#getting-help)
|
||||
- [Contributing](#contributing)
|
||||
- [License](#license)
|
||||
- [Trademark](#trademark)
|
||||
- **Reliability:** Our rich type system and ownership model ensure memory and thread safety, reducing bugs at compile-time.
|
||||
|
||||
</details>
|
||||
- **Productivity:** Comprehensive documentation, a compiler committed to providing great diagnostics, and advanced tooling including package manager and build tool ([Cargo]), auto-formatter ([rustfmt]), linter ([Clippy]) and editor support ([rust-analyzer]).
|
||||
|
||||
[Cargo]: https://github.com/rust-lang/cargo
|
||||
[rustfmt]: https://github.com/rust-lang/rustfmt
|
||||
[Clippy]: https://github.com/rust-lang/rust-clippy
|
||||
[rust-analyzer]: https://github.com/rust-lang/rust-analyzer
|
||||
|
||||
## Quick Start
|
||||
|
||||
|
@ -2484,6 +2484,14 @@ pub enum CoroutineKind {
|
||||
}
|
||||
|
||||
impl CoroutineKind {
|
||||
pub fn span(self) -> Span {
|
||||
match self {
|
||||
CoroutineKind::Async { span, .. } => span,
|
||||
CoroutineKind::Gen { span, .. } => span,
|
||||
CoroutineKind::AsyncGen { span, .. } => span,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_async(self) -> bool {
|
||||
matches!(self, CoroutineKind::Async { .. })
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ struct NodeCollector<'a, 'hir> {
|
||||
parenting: LocalDefIdMap<ItemLocalId>,
|
||||
|
||||
/// The parent of this node
|
||||
parent_node: hir::ItemLocalId,
|
||||
parent_node: ItemLocalId,
|
||||
|
||||
owner: OwnerId,
|
||||
}
|
||||
@ -31,17 +31,16 @@ pub(super) fn index_hir<'hir>(
|
||||
bodies: &SortedMap<ItemLocalId, &'hir Body<'hir>>,
|
||||
num_nodes: usize,
|
||||
) -> (IndexVec<ItemLocalId, ParentedNode<'hir>>, LocalDefIdMap<ItemLocalId>) {
|
||||
let zero_id = ItemLocalId::ZERO;
|
||||
let err_node = ParentedNode { parent: zero_id, node: Node::Err(item.span()) };
|
||||
let err_node = ParentedNode { parent: ItemLocalId::ZERO, node: Node::Err(item.span()) };
|
||||
let mut nodes = IndexVec::from_elem_n(err_node, num_nodes);
|
||||
// This node's parent should never be accessed: the owner's parent is computed by the
|
||||
// hir_owner_parent query. Make it invalid (= ItemLocalId::MAX) to force an ICE whenever it is
|
||||
// used.
|
||||
nodes[zero_id] = ParentedNode { parent: ItemLocalId::INVALID, node: item.into() };
|
||||
nodes[ItemLocalId::ZERO] = ParentedNode { parent: ItemLocalId::INVALID, node: item.into() };
|
||||
let mut collector = NodeCollector {
|
||||
tcx,
|
||||
owner: item.def_id(),
|
||||
parent_node: zero_id,
|
||||
parent_node: ItemLocalId::ZERO,
|
||||
nodes,
|
||||
bodies,
|
||||
parenting: Default::default(),
|
||||
@ -112,7 +111,7 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> {
|
||||
}
|
||||
|
||||
fn insert_nested(&mut self, item: LocalDefId) {
|
||||
if self.parent_node.as_u32() != 0 {
|
||||
if self.parent_node != ItemLocalId::ZERO {
|
||||
self.parenting.insert(item, self.parent_node);
|
||||
}
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ ast_passes_extern_block_suggestion = if you meant to declare an externally defin
|
||||
|
||||
ast_passes_extern_fn_qualifiers = functions in `extern` blocks cannot have qualifiers
|
||||
.label = in this `extern` block
|
||||
.suggestion = remove the qualifiers
|
||||
.suggestion = remove this qualifier
|
||||
|
||||
ast_passes_extern_item_ascii = items in `extern` blocks cannot use non-ascii identifiers
|
||||
.label = in this `extern` block
|
||||
|
@ -514,13 +514,32 @@ impl<'a> AstValidator<'a> {
|
||||
}
|
||||
|
||||
/// An `fn` in `extern { ... }` cannot have qualifiers, e.g. `async fn`.
|
||||
fn check_foreign_fn_headerless(&self, ident: Ident, span: Span, header: FnHeader) {
|
||||
if header.has_qualifiers() {
|
||||
fn check_foreign_fn_headerless(
|
||||
&self,
|
||||
// Deconstruct to ensure exhaustiveness
|
||||
FnHeader { unsafety, coroutine_kind, constness, ext }: FnHeader,
|
||||
) {
|
||||
let report_err = |span| {
|
||||
self.dcx().emit_err(errors::FnQualifierInExtern {
|
||||
span: ident.span,
|
||||
span: span,
|
||||
block: self.current_extern_span(),
|
||||
sugg_span: span.until(ident.span.shrink_to_lo()),
|
||||
});
|
||||
};
|
||||
match unsafety {
|
||||
Unsafe::Yes(span) => report_err(span),
|
||||
Unsafe::No => (),
|
||||
}
|
||||
match coroutine_kind {
|
||||
Some(knd) => report_err(knd.span()),
|
||||
None => (),
|
||||
}
|
||||
match constness {
|
||||
Const::Yes(span) => report_err(span),
|
||||
Const::No => (),
|
||||
}
|
||||
match ext {
|
||||
Extern::None => (),
|
||||
Extern::Implicit(span) | Extern::Explicit(_, span) => report_err(span),
|
||||
}
|
||||
}
|
||||
|
||||
@ -1145,7 +1164,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
|
||||
ForeignItemKind::Fn(box Fn { defaultness, sig, body, .. }) => {
|
||||
self.check_defaultness(fi.span, *defaultness);
|
||||
self.check_foreign_fn_bodyless(fi.ident, body.as_deref());
|
||||
self.check_foreign_fn_headerless(fi.ident, fi.span, sig.header);
|
||||
self.check_foreign_fn_headerless(sig.header);
|
||||
self.check_foreign_item_ascii_only(fi.ident);
|
||||
}
|
||||
ForeignItemKind::TyAlias(box TyAlias {
|
||||
|
@ -270,11 +270,10 @@ pub struct FnBodyInExtern {
|
||||
#[diag(ast_passes_extern_fn_qualifiers)]
|
||||
pub struct FnQualifierInExtern {
|
||||
#[primary_span]
|
||||
#[suggestion(code = "", applicability = "maybe-incorrect")]
|
||||
pub span: Span,
|
||||
#[label]
|
||||
pub block: Span,
|
||||
#[suggestion(code = "fn ", applicability = "maybe-incorrect", style = "verbose")]
|
||||
pub sugg_span: Span,
|
||||
}
|
||||
|
||||
#[derive(Diagnostic)]
|
||||
|
@ -13,4 +13,7 @@ task:
|
||||
- ./y.sh prepare
|
||||
test_script:
|
||||
- . $HOME/.cargo/env
|
||||
# Disabling incr comp reduces cache size and incr comp doesn't save as much
|
||||
# on CI anyway.
|
||||
- export CARGO_BUILD_INCREMENTAL=false
|
||||
- ./y.sh test
|
||||
|
@ -3,6 +3,8 @@ name: Abi-cafe
|
||||
on:
|
||||
- push
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
abi_cafe:
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
@ -4,6 +4,20 @@ on:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
# Disabling incr comp reduces cache size and incr comp doesn't save as much
|
||||
# on CI anyway.
|
||||
CARGO_BUILD_INCREMENTAL: false
|
||||
# Rust's CI denies warnings. Deny them here too to ensure subtree syncs don't
|
||||
# fail because of warnings.
|
||||
RUSTFLAGS: "-Dwarnings"
|
||||
|
||||
jobs:
|
||||
rustfmt:
|
||||
runs-on: ubuntu-latest
|
||||
@ -23,15 +37,15 @@ jobs:
|
||||
cargo fmt --check
|
||||
rustfmt --check build_system/main.rs
|
||||
rustfmt --check example/*
|
||||
rustfmt --check scripts/*.rs
|
||||
|
||||
|
||||
test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
env:
|
||||
CG_CLIF_EXPENSIVE_CHECKS: 1
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@ -47,15 +61,19 @@ jobs:
|
||||
- os: ubuntu-latest
|
||||
env:
|
||||
TARGET_TRIPLE: x86_64-pc-windows-gnu
|
||||
apt_deps: gcc-mingw-w64-x86-64 wine-stable
|
||||
- os: ubuntu-latest
|
||||
env:
|
||||
TARGET_TRIPLE: aarch64-unknown-linux-gnu
|
||||
apt_deps: gcc-aarch64-linux-gnu qemu-user
|
||||
- os: ubuntu-latest
|
||||
env:
|
||||
TARGET_TRIPLE: s390x-unknown-linux-gnu
|
||||
apt_deps: gcc-s390x-linux-gnu qemu-user
|
||||
- os: ubuntu-latest
|
||||
env:
|
||||
TARGET_TRIPLE: riscv64gc-unknown-linux-gnu
|
||||
apt_deps: gcc-riscv64-linux-gnu qemu-user
|
||||
- os: windows-latest
|
||||
env:
|
||||
TARGET_TRIPLE: x86_64-pc-windows-msvc
|
||||
@ -80,29 +98,11 @@ jobs:
|
||||
if: matrix.os == 'windows-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
|
||||
run: rustup set default-host x86_64-pc-windows-gnu
|
||||
|
||||
- name: Install MinGW toolchain and wine
|
||||
if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
|
||||
- name: Install toolchain and emulator
|
||||
if: matrix.apt_deps != null
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-mingw-w64-x86-64 wine-stable
|
||||
|
||||
- name: Install AArch64 toolchain and qemu
|
||||
if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'aarch64-unknown-linux-gnu'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu qemu-user
|
||||
|
||||
- name: Install s390x toolchain and qemu
|
||||
if: matrix.env.TARGET_TRIPLE == 's390x-unknown-linux-gnu'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-s390x-linux-gnu qemu-user
|
||||
|
||||
- name: Install riscv64gc toolchain and qemu
|
||||
if: matrix.env.TARGET_TRIPLE == 'riscv64gc-unknown-linux-gnu'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-riscv64-linux-gnu qemu-user
|
||||
sudo apt-get install -y ${{ matrix.apt_deps }}
|
||||
|
||||
- name: Prepare dependencies
|
||||
run: ./y.sh prepare
|
||||
@ -142,10 +142,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@ -168,10 +164,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@ -193,20 +185,16 @@ jobs:
|
||||
run: ./y.sh prepare
|
||||
|
||||
- name: Build
|
||||
run: CI_OPT=1 ./y.sh build --sysroot none
|
||||
run: ./y.sh build --sysroot none
|
||||
|
||||
- name: Benchmark
|
||||
run: CI_OPT=1 ./y.sh bench
|
||||
run: ./y.sh bench
|
||||
|
||||
|
||||
dist:
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@ -252,10 +240,10 @@ jobs:
|
||||
run: ./y.sh prepare
|
||||
|
||||
- name: Build backend
|
||||
run: CI_OPT=1 ./y.sh build --sysroot none
|
||||
run: ./y.sh build --sysroot none
|
||||
|
||||
- name: Build sysroot
|
||||
run: CI_OPT=1 ./y.sh build
|
||||
run: ./y.sh build
|
||||
|
||||
- name: Package prebuilt cg_clif
|
||||
run: tar cvfJ cg_clif.tar.xz dist
|
||||
|
@ -3,6 +3,8 @@ name: Various rustc tests
|
||||
on:
|
||||
- push
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
bootstrap_rustc:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -101,27 +101,7 @@ For additional ways to use rustc_codegen_cranelift like the JIT mode see [usage.
|
||||
|
||||
## Building and testing with changes in rustc code
|
||||
|
||||
This is useful when changing code in `rustc_codegen_cranelift` as part of changing [main Rust repository](https://github.com/rust-lang/rust/).
|
||||
This can happen, for example, when you are implementing a new compiler intrinsic.
|
||||
|
||||
Instruction below uses `$RustCheckoutDir` as substitute for any folder where you cloned Rust repository.
|
||||
|
||||
You need to do this steps to successfully compile and use the cranelift backend with your changes in rustc code:
|
||||
|
||||
1. `cd $RustCheckoutDir`
|
||||
2. Run `python x.py setup` and choose option for compiler (`b`).
|
||||
3. Build compiler and necessary tools: `python x.py build --stage=2 compiler library/std src/tools/rustdoc src/tools/rustfmt`
|
||||
* (Optional) You can also build cargo by adding `src/tools/cargo` to previous command.
|
||||
4. Copy cargo from a nightly toolchain: `cp $(rustup +nightly which cargo) ./build/host/stage2/bin/cargo`. Note that you would need to do this every time you rebuilt `rust` repository.
|
||||
5. Link your new `rustc` to toolchain: `rustup toolchain link stage2 ./build/host/stage2/`.
|
||||
6. (Windows only) compile the build system: `rustc +stage2 -O build_system/main.rs -o y.exe`.
|
||||
7. You need to prefix every `./y.sh` (or `y` if you built `build_system/main.rs` as `y`) command by `rustup run stage2` to make cg_clif use your local changes in rustc.
|
||||
* `rustup run stage2 ./y.sh prepare`
|
||||
* `rustup run stage2 ./y.sh build`
|
||||
* (Optional) run tests: `rustup run stage2 ./y.sh test`
|
||||
8. Now you can use your cg_clif build to compile other Rust programs, e.g. you can open any Rust crate and run commands like `$RustCheckoutDir/compiler/rustc_codegen_cranelift/dist/cargo-clif build --release`.
|
||||
|
||||
You can also set `rust-analyzer.rustc.source` to your rust workspace to get rust-analyzer to understand your changes.
|
||||
See [rustc_testing.md](docs/rustc_testing.md).
|
||||
|
||||
## Not yet supported
|
||||
|
||||
|
@ -1,9 +1,10 @@
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::path::{Dirs, RelPath};
|
||||
use crate::rustc_info::get_file_name;
|
||||
use crate::shared_utils::{rustflags_from_env, rustflags_to_cmd_env};
|
||||
use crate::utils::{is_ci, is_ci_opt, maybe_incremental, CargoProject, Compiler, LogGroup};
|
||||
use crate::utils::{CargoProject, Compiler, LogGroup};
|
||||
|
||||
pub(crate) static CG_CLIF: CargoProject = CargoProject::new(&RelPath::SOURCE, "cg_clif");
|
||||
|
||||
@ -16,20 +17,15 @@ pub(crate) fn build_backend(
|
||||
let _group = LogGroup::guard("Build backend");
|
||||
|
||||
let mut cmd = CG_CLIF.build(&bootstrap_host_compiler, dirs);
|
||||
maybe_incremental(&mut cmd);
|
||||
|
||||
let mut rustflags = rustflags_from_env("RUSTFLAGS");
|
||||
|
||||
rustflags.push("-Zallow-features=rustc_private".to_owned());
|
||||
|
||||
if is_ci() {
|
||||
// Deny warnings on CI
|
||||
rustflags.push("-Dwarnings".to_owned());
|
||||
|
||||
if !is_ci_opt() {
|
||||
cmd.env("CARGO_PROFILE_RELEASE_DEBUG_ASSERTIONS", "true");
|
||||
cmd.env("CARGO_PROFILE_RELEASE_OVERFLOW_CHECKS", "true");
|
||||
}
|
||||
if env::var("CG_CLIF_EXPENSIVE_CHECKS").is_ok() {
|
||||
// Enabling debug assertions implicitly enables the clif ir verifier
|
||||
cmd.env("CARGO_PROFILE_RELEASE_DEBUG_ASSERTIONS", "true");
|
||||
cmd.env("CARGO_PROFILE_RELEASE_OVERFLOW_CHECKS", "true");
|
||||
}
|
||||
|
||||
if use_unstable_features {
|
||||
|
@ -6,8 +6,7 @@ use std::process::Command;
|
||||
use crate::path::{Dirs, RelPath};
|
||||
use crate::rustc_info::get_file_name;
|
||||
use crate::utils::{
|
||||
maybe_incremental, remove_dir_if_exists, spawn_and_wait, try_hard_link, CargoProject, Compiler,
|
||||
LogGroup,
|
||||
remove_dir_if_exists, spawn_and_wait, try_hard_link, CargoProject, Compiler, LogGroup,
|
||||
};
|
||||
use crate::{config, CodegenBackend, SysrootKind};
|
||||
|
||||
@ -270,7 +269,6 @@ fn build_clif_sysroot_for_triple(
|
||||
}
|
||||
compiler.rustflags.extend(rustflags);
|
||||
let mut build_cmd = STANDARD_LIBRARY.build(&compiler, dirs);
|
||||
maybe_incremental(&mut build_cmd);
|
||||
if channel == "release" {
|
||||
build_cmd.arg("--release");
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ use std::env;
|
||||
use std::path::PathBuf;
|
||||
use std::process;
|
||||
|
||||
use self::utils::{is_ci, is_ci_opt, Compiler};
|
||||
use self::utils::Compiler;
|
||||
|
||||
mod abi_cafe;
|
||||
mod bench;
|
||||
@ -60,14 +60,9 @@ fn main() {
|
||||
}
|
||||
env::set_var("CG_CLIF_DISABLE_INCR_CACHE", "1");
|
||||
|
||||
if is_ci() {
|
||||
// Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
|
||||
env::set_var("CARGO_BUILD_INCREMENTAL", "false");
|
||||
|
||||
if !is_ci_opt() {
|
||||
// Enable the Cranelift verifier
|
||||
env::set_var("CG_CLIF_ENABLE_VERIFIER", "1");
|
||||
}
|
||||
// Force incr comp even in release mode unless in CI or incremental builds are explicitly disabled
|
||||
if env::var_os("CARGO_BUILD_INCREMENTAL").is_none() {
|
||||
env::set_var("CARGO_BUILD_INCREMENTAL", "true");
|
||||
}
|
||||
|
||||
let mut args = env::args().skip(1);
|
||||
|
@ -15,7 +15,6 @@ pub(crate) fn prepare(dirs: &Dirs) {
|
||||
RelPath::DOWNLOAD.ensure_exists(dirs);
|
||||
crate::tests::RAND_REPO.fetch(dirs);
|
||||
crate::tests::REGEX_REPO.fetch(dirs);
|
||||
crate::tests::PORTABLE_SIMD_REPO.fetch(dirs);
|
||||
}
|
||||
|
||||
pub(crate) fn prepare_stdlib(dirs: &Dirs, rustc: &Path) {
|
||||
|
@ -130,16 +130,10 @@ pub(crate) static REGEX_REPO: GitRepo = GitRepo::github(
|
||||
|
||||
pub(crate) static REGEX: CargoProject = CargoProject::new(®EX_REPO.source_dir(), "regex_target");
|
||||
|
||||
pub(crate) static PORTABLE_SIMD_REPO: GitRepo = GitRepo::github(
|
||||
"rust-lang",
|
||||
"portable-simd",
|
||||
"5794c837bc605c4cd9dbb884285976dfdb293cce",
|
||||
"a64d8fdd0ed0d9c4",
|
||||
"portable-simd",
|
||||
);
|
||||
pub(crate) static PORTABLE_SIMD_SRC: RelPath = RelPath::BUILD.join("coretests");
|
||||
|
||||
pub(crate) static PORTABLE_SIMD: CargoProject =
|
||||
CargoProject::new(&PORTABLE_SIMD_REPO.source_dir(), "portable-simd_target");
|
||||
CargoProject::new(&PORTABLE_SIMD_SRC, "portable-simd_target");
|
||||
|
||||
static LIBCORE_TESTS_SRC: RelPath = RelPath::BUILD.join("coretests");
|
||||
|
||||
@ -221,7 +215,12 @@ const EXTENDED_SYSROOT_SUITE: &[TestCase] = &[
|
||||
}
|
||||
}),
|
||||
TestCase::custom("test.portable-simd", &|runner| {
|
||||
PORTABLE_SIMD_REPO.patch(&runner.dirs);
|
||||
apply_patches(
|
||||
&runner.dirs,
|
||||
"portable-simd",
|
||||
&runner.stdlib_source.join("library/portable-simd"),
|
||||
&PORTABLE_SIMD_SRC.to_path(&runner.dirs),
|
||||
);
|
||||
|
||||
PORTABLE_SIMD.clean(&runner.dirs);
|
||||
|
||||
|
@ -254,14 +254,6 @@ pub(crate) fn copy_dir_recursively(from: &Path, to: &Path) {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_ci() -> bool {
|
||||
env::var("CI").is_ok()
|
||||
}
|
||||
|
||||
pub(crate) fn is_ci_opt() -> bool {
|
||||
env::var("CI_OPT").is_ok()
|
||||
}
|
||||
|
||||
static IN_GROUP: AtomicBool = AtomicBool::new(false);
|
||||
pub(crate) struct LogGroup {
|
||||
is_gha: bool,
|
||||
@ -288,13 +280,3 @@ impl Drop for LogGroup {
|
||||
IN_GROUP.store(false, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn maybe_incremental(cmd: &mut Command) {
|
||||
if is_ci() || std::env::var("CARGO_BUILD_INCREMENTAL").map_or(false, |val| val == "false") {
|
||||
// Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
|
||||
cmd.env("CARGO_BUILD_INCREMENTAL", "false");
|
||||
} else {
|
||||
// Force incr comp even in release mode unless in CI or incremental builds are explicitly disabled
|
||||
cmd.env("CARGO_BUILD_INCREMENTAL", "true");
|
||||
}
|
||||
}
|
||||
|
23
compiler/rustc_codegen_cranelift/docs/rustc_testing.md
Normal file
23
compiler/rustc_codegen_cranelift/docs/rustc_testing.md
Normal file
@ -0,0 +1,23 @@
|
||||
# Building and testing with changes in rustc code
|
||||
|
||||
This is useful when changing code in `rustc_codegen_cranelift` as part of changing [main Rust repository](https://github.com/rust-lang/rust/).
|
||||
This can happen, for example, when you are implementing a new compiler intrinsic.
|
||||
|
||||
Instruction below uses `$RustCheckoutDir` as substitute for any folder where you cloned Rust repository.
|
||||
|
||||
You need to do this steps to successfully compile and use the cranelift backend with your changes in rustc code:
|
||||
|
||||
1. `cd $RustCheckoutDir`
|
||||
2. Run `python x.py setup` and choose option for compiler (`b`).
|
||||
3. Build compiler and necessary tools: `python x.py build --stage=2 compiler library/std src/tools/rustdoc src/tools/rustfmt`
|
||||
* (Optional) You can also build cargo by adding `src/tools/cargo` to previous command.
|
||||
4. Copy cargo from a nightly toolchain: `cp $(rustup +nightly which cargo) ./build/host/stage2/bin/cargo`. Note that you would need to do this every time you rebuilt `rust` repository.
|
||||
5. Link your new `rustc` to toolchain: `rustup toolchain link stage2 ./build/host/stage2/`.
|
||||
6. (Windows only) compile the build system: `rustc +stage2 -O build_system/main.rs -o y.exe`.
|
||||
7. You need to prefix every `./y.sh` (or `y` if you built `build_system/main.rs` as `y`) command by `rustup run stage2` to make cg_clif use your local changes in rustc.
|
||||
* `rustup run stage2 ./y.sh prepare`
|
||||
* `rustup run stage2 ./y.sh build`
|
||||
* (Optional) run tests: `rustup run stage2 ./y.sh test`
|
||||
8. Now you can use your cg_clif build to compile other Rust programs, e.g. you can open any Rust crate and run commands like `$RustCheckoutDir/compiler/rustc_codegen_cranelift/dist/cargo-clif build --release`.
|
||||
|
||||
You can also set `rust-analyzer.rustc.source` to your rust workspace to get rust-analyzer to understand your changes.
|
@ -90,8 +90,9 @@ unsafe impl Sync for i16 {}
|
||||
unsafe impl Sync for i32 {}
|
||||
unsafe impl Sync for isize {}
|
||||
unsafe impl Sync for char {}
|
||||
unsafe impl Sync for f32 {}
|
||||
unsafe impl<'a, T: ?Sized> Sync for &'a T {}
|
||||
unsafe impl Sync for [u8; 16] {}
|
||||
unsafe impl<T: Sync, const N: usize> Sync for [T; N] {}
|
||||
|
||||
#[lang = "freeze"]
|
||||
unsafe auto trait Freeze {}
|
||||
@ -467,7 +468,6 @@ pub fn panic(_msg: &'static str) -> ! {
|
||||
|
||||
macro_rules! panic_const {
|
||||
($($lang:ident = $message:expr,)+) => {
|
||||
#[cfg(not(bootstrap))]
|
||||
pub mod panic_const {
|
||||
use super::*;
|
||||
|
||||
|
@ -1,3 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-03-28"
|
||||
channel = "nightly-2024-04-05"
|
||||
components = ["rust-src", "rustc-dev", "llvm-tools"]
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
#![forbid(unsafe_code)]/* This line is ignored by bash
|
||||
#![rustfmt::skip]/* This line is ignored by bash
|
||||
# This block is ignored by rustc
|
||||
pushd $(dirname "$0")/../
|
||||
RUSTC="$(pwd)/dist/rustc-clif"
|
||||
@ -26,11 +26,8 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
}
|
||||
let profile = std::fs::read_to_string(profile_name)
|
||||
.map_err(|err| format!("Failed to read profile {}", err))?;
|
||||
let mut output = std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.open(output_name)?;
|
||||
let mut output =
|
||||
std::fs::OpenOptions::new().create(true).write(true).truncate(true).open(output_name)?;
|
||||
|
||||
for line in profile.lines() {
|
||||
let mut stack = &line[..line.rfind(" ").unwrap()];
|
||||
|
@ -130,7 +130,7 @@ pub(crate) fn codegen_int_binop<'tcx>(
|
||||
in_lhs: CValue<'tcx>,
|
||||
in_rhs: CValue<'tcx>,
|
||||
) -> CValue<'tcx> {
|
||||
if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
|
||||
if !matches!(bin_op, BinOp::Shl | BinOp::ShlUnchecked | BinOp::Shr | BinOp::ShrUnchecked) {
|
||||
assert_eq!(
|
||||
in_lhs.layout().ty,
|
||||
in_rhs.layout().ty,
|
||||
|
@ -79,16 +79,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "gccjit"
|
||||
version = "1.0.0"
|
||||
source = "git+https://github.com/antoyo/gccjit.rs#9f8f67edc006d543b17529a001803ffece48349e"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ecaa4c3da2d74c1a991b4faff75d49ab1d0522d9a99d8e2614b3b04d226417ce"
|
||||
dependencies = [
|
||||
"gccjit_sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gccjit_sys"
|
||||
version = "0.0.1"
|
||||
source = "git+https://github.com/antoyo/gccjit.rs#9f8f67edc006d543b17529a001803ffece48349e"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "406a66fba005f1a02661f2f9443e5693dd3a667b7c58e70aa4ccc4c8b50b4758"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
@ -22,7 +22,7 @@ master = ["gccjit/master"]
|
||||
default = ["master"]
|
||||
|
||||
[dependencies]
|
||||
gccjit = { git = "https://github.com/antoyo/gccjit.rs" }
|
||||
gccjit = "2.0"
|
||||
|
||||
# Local copy.
|
||||
#gccjit = { path = "../gccjit.rs" }
|
||||
|
@ -912,6 +912,7 @@ fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
|
||||
|| cgcx.opts.target_triple.triple().contains("-darwin")
|
||||
|| cgcx.opts.target_triple.triple().contains("-tvos")
|
||||
|| cgcx.opts.target_triple.triple().contains("-watchos")
|
||||
|| cgcx.opts.target_triple.triple().contains("-visionos")
|
||||
}
|
||||
|
||||
fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
|
||||
|
@ -17,11 +17,11 @@ use rustc_span::Symbol;
|
||||
|
||||
/// Generates and exports the Coverage Map.
|
||||
///
|
||||
/// Rust Coverage Map generation supports LLVM Coverage Mapping Format version
|
||||
/// 6 (zero-based encoded as 5), as defined at
|
||||
/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
|
||||
/// Rust Coverage Map generation supports LLVM Coverage Mapping Format versions
|
||||
/// 6 and 7 (encoded as 5 and 6 respectively), as described at
|
||||
/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/18.0-2024-02-13/llvm/docs/CoverageMappingFormat.rst).
|
||||
/// These versions are supported by the LLVM coverage tools (`llvm-profdata` and `llvm-cov`)
|
||||
/// bundled with Rust's fork of LLVM.
|
||||
/// distributed in the `llvm-tools-preview` rustup component.
|
||||
///
|
||||
/// Consequently, Rust's bundled version of Clang also generates Coverage Maps compliant with
|
||||
/// the same version. Clang's implementation of Coverage Map generation was referenced when
|
||||
@ -31,10 +31,21 @@ use rustc_span::Symbol;
|
||||
pub fn finalize(cx: &CodegenCx<'_, '_>) {
|
||||
let tcx = cx.tcx;
|
||||
|
||||
// Ensure the installed version of LLVM supports Coverage Map Version 6
|
||||
// (encoded as a zero-based value: 5), which was introduced with LLVM 13.
|
||||
let version = coverageinfo::mapping_version();
|
||||
assert_eq!(version, 5, "The `CoverageMappingVersion` exposed by `llvm-wrapper` is out of sync");
|
||||
// Ensure that LLVM is using a version of the coverage mapping format that
|
||||
// agrees with our Rust-side code. Expected versions (encoded as n-1) are:
|
||||
// - `CovMapVersion::Version6` (5) used by LLVM 13-17
|
||||
// - `CovMapVersion::Version7` (6) used by LLVM 18
|
||||
let covmap_version = {
|
||||
let llvm_covmap_version = coverageinfo::mapping_version();
|
||||
let expected_versions = 5..=6;
|
||||
assert!(
|
||||
expected_versions.contains(&llvm_covmap_version),
|
||||
"Coverage mapping version exposed by `llvm-wrapper` is out of sync; \
|
||||
expected {expected_versions:?} but was {llvm_covmap_version}"
|
||||
);
|
||||
// This is the version number that we will embed in the covmap section:
|
||||
llvm_covmap_version
|
||||
};
|
||||
|
||||
debug!("Generating coverage map for CodegenUnit: `{}`", cx.codegen_unit.name());
|
||||
|
||||
@ -74,7 +85,7 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
|
||||
|
||||
// Generate the coverage map header, which contains the filenames used by
|
||||
// this CGU's coverage mappings, and store it in a well-known global.
|
||||
let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val);
|
||||
let cov_data_val = generate_coverage_map(cx, covmap_version, filenames_size, filenames_val);
|
||||
coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
|
||||
|
||||
let mut unused_function_names = Vec::new();
|
||||
|
@ -24,8 +24,6 @@ pub(crate) mod ffi;
|
||||
pub(crate) mod map_data;
|
||||
pub mod mapgen;
|
||||
|
||||
const VAR_ALIGN: Align = Align::EIGHT;
|
||||
|
||||
/// A context object for maintaining all state needed by the coverageinfo module.
|
||||
pub struct CrateCoverageContext<'ll, 'tcx> {
|
||||
/// Coverage data for each instrumented function identified by DefId.
|
||||
@ -226,7 +224,8 @@ pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>(
|
||||
llvm::set_global_constant(llglobal, true);
|
||||
llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
|
||||
llvm::set_section(llglobal, &covmap_section_name);
|
||||
llvm::set_alignment(llglobal, VAR_ALIGN);
|
||||
// LLVM's coverage mapping format specifies 8-byte alignment for items in this section.
|
||||
llvm::set_alignment(llglobal, Align::EIGHT);
|
||||
cx.add_used_global(llglobal);
|
||||
}
|
||||
|
||||
@ -256,7 +255,8 @@ pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
|
||||
llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage);
|
||||
llvm::set_visibility(llglobal, llvm::Visibility::Hidden);
|
||||
llvm::set_section(llglobal, covfun_section_name);
|
||||
llvm::set_alignment(llglobal, VAR_ALIGN);
|
||||
// LLVM's coverage mapping format specifies 8-byte alignment for items in this section.
|
||||
llvm::set_alignment(llglobal, Align::EIGHT);
|
||||
llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name);
|
||||
cx.add_used_global(llglobal);
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
|
||||
for options in [
|
||||
TypeIdOptions::GENERALIZE_POINTERS,
|
||||
TypeIdOptions::NORMALIZE_INTEGERS,
|
||||
TypeIdOptions::NO_SELF_TYPE_ERASURE,
|
||||
TypeIdOptions::USE_CONCRETE_SELF,
|
||||
]
|
||||
.into_iter()
|
||||
.powerset()
|
||||
|
@ -2946,7 +2946,7 @@ fn add_apple_sdk(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
|
||||
let os = &sess.target.os;
|
||||
let llvm_target = &sess.target.llvm_target;
|
||||
if sess.target.vendor != "apple"
|
||||
|| !matches!(os.as_ref(), "ios" | "tvos" | "watchos" | "macos")
|
||||
|| !matches!(os.as_ref(), "ios" | "tvos" | "watchos" | "visionos" | "macos")
|
||||
|| !matches!(flavor, LinkerFlavor::Darwin(..))
|
||||
{
|
||||
return;
|
||||
@ -2971,6 +2971,8 @@ fn add_apple_sdk(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
|
||||
("arm64_32", "watchos") => "watchos",
|
||||
("aarch64", "watchos") if llvm_target.ends_with("-simulator") => "watchsimulator",
|
||||
("aarch64", "watchos") => "watchos",
|
||||
("aarch64", "visionos") if llvm_target.ends_with("-simulator") => "xrsimulator",
|
||||
("aarch64", "visionos") => "xros",
|
||||
("arm", "watchos") => "watchos",
|
||||
(_, "macos") => "macosx",
|
||||
_ => {
|
||||
@ -3027,6 +3029,10 @@ fn get_apple_sdk_root(sdk_name: &str) -> Result<String, errors::AppleSdkRootErro
|
||||
|| sdkroot.contains("MacOSX.platform") => {}
|
||||
"watchsimulator"
|
||||
if sdkroot.contains("WatchOS.platform") || sdkroot.contains("MacOSX.platform") => {}
|
||||
"visionos"
|
||||
if sdkroot.contains("XROS.platform") || sdkroot.contains("MacOSX.platform") => {}
|
||||
"visionossimulator"
|
||||
if sdkroot.contains("XROS.platform") || sdkroot.contains("MacOSX.platform") => {}
|
||||
// Ignore `SDKROOT` if it's not a valid path.
|
||||
_ if !p.is_absolute() || p == Path::new("/") || !p.exists() => {}
|
||||
_ => return Ok(sdkroot),
|
||||
|
@ -567,6 +567,8 @@ declare_features! (
|
||||
(unstable, proc_macro_hygiene, "1.30.0", Some(54727)),
|
||||
/// Allows `&raw const $place_expr` and `&raw mut $place_expr` expressions.
|
||||
(unstable, raw_ref_op, "1.41.0", Some(64490)),
|
||||
/// Allows `&` and `&mut` patterns to consume match-ergonomics-inserted references.
|
||||
(incomplete, ref_pat_everywhere, "CURRENT_RUSTC_VERSION", Some(123076)),
|
||||
/// Allows using the `#[register_tool]` attribute.
|
||||
(unstable, register_tool, "1.41.0", Some(66079)),
|
||||
/// Allows the `#[repr(i128)]` attribute for enums.
|
||||
|
@ -380,14 +380,19 @@ impl Definitions {
|
||||
pub fn local_def_path_hash_to_def_id(
|
||||
&self,
|
||||
hash: DefPathHash,
|
||||
err: &mut dyn FnMut() -> !,
|
||||
err_msg: &dyn std::fmt::Debug,
|
||||
) -> LocalDefId {
|
||||
debug_assert!(hash.stable_crate_id() == self.table.stable_crate_id);
|
||||
#[cold]
|
||||
#[inline(never)]
|
||||
fn err(err_msg: &dyn std::fmt::Debug) -> ! {
|
||||
panic!("{err_msg:?}")
|
||||
}
|
||||
self.table
|
||||
.def_path_hash_to_index
|
||||
.get(&hash.local_hash())
|
||||
.map(|local_def_index| LocalDefId { local_def_index })
|
||||
.unwrap_or_else(|| err())
|
||||
.unwrap_or_else(|| err(err_msg))
|
||||
}
|
||||
|
||||
pub fn def_path_hash_to_def_index_map(&self) -> &DefPathHashMap {
|
||||
|
@ -918,7 +918,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
|
||||
let param = callee_args.const_at(host_effect_index);
|
||||
let cause = self.misc(span);
|
||||
match self.at(&cause, self.param_env).eq(infer::DefineOpaqueTypes::No, effect, param) {
|
||||
// We know the type of `effect` to be `bool`, there will be no opaque type inference.
|
||||
match self.at(&cause, self.param_env).eq(infer::DefineOpaqueTypes::Yes, effect, param) {
|
||||
Ok(infer::InferOk { obligations, value: () }) => {
|
||||
self.register_predicates(obligations);
|
||||
}
|
||||
|
@ -227,11 +227,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
kind: TypeVariableOriginKind::ClosureSynthetic,
|
||||
span: expr_span,
|
||||
});
|
||||
let closure_kind_ty = self.next_ty_var(TypeVariableOrigin {
|
||||
// FIXME(eddyb) distinguish closure kind inference variables from the rest.
|
||||
kind: TypeVariableOriginKind::ClosureSynthetic,
|
||||
span: expr_span,
|
||||
});
|
||||
|
||||
let closure_kind_ty = match expected_kind {
|
||||
Some(kind) => Ty::from_closure_kind(tcx, kind),
|
||||
|
||||
// Create a type variable (for now) to represent the closure kind.
|
||||
// It will be unified during the upvar inference phase (`upvar.rs`)
|
||||
None => self.next_ty_var(TypeVariableOrigin {
|
||||
kind: TypeVariableOriginKind::ClosureSynthetic,
|
||||
span: expr_span,
|
||||
}),
|
||||
};
|
||||
|
||||
let coroutine_captures_by_ref_ty = self.next_ty_var(TypeVariableOrigin {
|
||||
kind: TypeVariableOriginKind::ClosureSynthetic,
|
||||
span: expr_span,
|
||||
@ -262,10 +269,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
},
|
||||
);
|
||||
|
||||
let coroutine_kind_ty = self.next_ty_var(TypeVariableOrigin {
|
||||
kind: TypeVariableOriginKind::ClosureSynthetic,
|
||||
span: expr_span,
|
||||
});
|
||||
let coroutine_kind_ty = match expected_kind {
|
||||
Some(kind) => Ty::from_coroutine_closure_kind(tcx, kind),
|
||||
|
||||
// Create a type variable (for now) to represent the closure kind.
|
||||
// It will be unified during the upvar inference phase (`upvar.rs`)
|
||||
None => self.next_ty_var(TypeVariableOrigin {
|
||||
kind: TypeVariableOriginKind::ClosureSynthetic,
|
||||
span: expr_span,
|
||||
}),
|
||||
};
|
||||
|
||||
let coroutine_upvars_ty = self.next_ty_var(TypeVariableOrigin {
|
||||
kind: TypeVariableOriginKind::ClosureSynthetic,
|
||||
span: expr_span,
|
||||
|
@ -400,7 +400,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
// what our ideal rcvr ty would look like.
|
||||
let _ = self
|
||||
.at(&ObligationCause::dummy(), self.param_env)
|
||||
.eq(DefineOpaqueTypes::No, method.sig.inputs()[idx + 1], arg_ty)
|
||||
.eq(DefineOpaqueTypes::Yes, method.sig.inputs()[idx + 1], arg_ty)
|
||||
.ok()?;
|
||||
self.select_obligations_where_possible(|errs| {
|
||||
// Yeet the errors, we're already reporting errors.
|
||||
@ -479,7 +479,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
.and_then(|method| {
|
||||
let _ = self
|
||||
.at(&ObligationCause::dummy(), self.param_env)
|
||||
.eq(DefineOpaqueTypes::No, ideal_rcvr_ty, expected_ty)
|
||||
.eq(DefineOpaqueTypes::Yes, ideal_rcvr_ty, expected_ty)
|
||||
.ok()?;
|
||||
Some(method)
|
||||
});
|
||||
|
@ -44,10 +44,7 @@ use rustc_infer::infer::InferOk;
|
||||
use rustc_infer::traits::query::NoSolution;
|
||||
use rustc_infer::traits::ObligationCause;
|
||||
use rustc_middle::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase};
|
||||
use rustc_middle::ty::error::{
|
||||
ExpectedFound,
|
||||
TypeError::{FieldMisMatch, Sorts},
|
||||
};
|
||||
use rustc_middle::ty::error::{ExpectedFound, TypeError::Sorts};
|
||||
use rustc_middle::ty::GenericArgsRef;
|
||||
use rustc_middle::ty::{self, AdtKind, Ty, TypeVisitableExt};
|
||||
use rustc_session::errors::ExprParenthesesNeeded;
|
||||
@ -1811,7 +1808,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
let target_ty = self.field_ty(base_expr.span, f, args);
|
||||
let cause = self.misc(base_expr.span);
|
||||
match self.at(&cause, self.param_env).sup(
|
||||
DefineOpaqueTypes::No,
|
||||
// We're already using inference variables for any params, and don't allow converting
|
||||
// between different structs, so there is no way this ever actually defines an opaque type.
|
||||
// Thus choosing `Yes` is fine.
|
||||
DefineOpaqueTypes::Yes,
|
||||
target_ty,
|
||||
fru_ty,
|
||||
) {
|
||||
@ -1819,16 +1819,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
self.register_predicates(obligations)
|
||||
}
|
||||
Err(_) => {
|
||||
// This should never happen, since we're just subtyping the
|
||||
// remaining_fields, but it's fine to emit this, I guess.
|
||||
self.err_ctxt()
|
||||
.report_mismatched_types(
|
||||
&cause,
|
||||
target_ty,
|
||||
fru_ty,
|
||||
FieldMisMatch(variant.name, ident.name),
|
||||
)
|
||||
.emit();
|
||||
span_bug!(
|
||||
cause.span(),
|
||||
"subtyping remaining fields of type changing FRU failed: {target_ty} != {fru_ty}: {}::{}",
|
||||
variant.name,
|
||||
ident.name,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -461,7 +461,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
where
|
||||
T: TypeVisitable<TyCtxt<'tcx>>,
|
||||
{
|
||||
t.has_free_regions() || t.has_projections() || t.has_infer_types()
|
||||
t.has_free_regions() || t.has_aliases() || t.has_infer_types()
|
||||
}
|
||||
|
||||
pub fn node_ty(&self, id: hir::HirId) -> Ty<'tcx> {
|
||||
|
@ -687,7 +687,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
// Using probe here, since we don't want this subtyping to affect inference.
|
||||
let subtyping_error = self.probe(|_| {
|
||||
self.at(&self.misc(arg_span), self.param_env)
|
||||
.sup(DefineOpaqueTypes::No, formal_input_ty, coerced_ty)
|
||||
.sup(DefineOpaqueTypes::Yes, formal_input_ty, coerced_ty)
|
||||
.err()
|
||||
});
|
||||
|
||||
|
@ -130,7 +130,14 @@ enum AdjustMode {
|
||||
/// Peel off all immediate reference types.
|
||||
Peel,
|
||||
/// Reset binding mode to the initial mode.
|
||||
/// Used for destructuring assignment, where we don't want any match ergonomics.
|
||||
Reset,
|
||||
/// Produced by ref patterns.
|
||||
/// Reset the binding mode to the initial mode,
|
||||
/// and if the old biding mode was by-reference
|
||||
/// with mutability matching the pattern,
|
||||
/// mark the pattern as having consumed this reference.
|
||||
ResetAndConsumeRef(Mutability),
|
||||
/// Pass on the input binding mode and expected type.
|
||||
Pass,
|
||||
}
|
||||
@ -174,7 +181,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
_ => None,
|
||||
};
|
||||
let adjust_mode = self.calc_adjust_mode(pat, path_res.map(|(res, ..)| res));
|
||||
let (expected, def_bm) = self.calc_default_binding_mode(pat, expected, def_bm, adjust_mode);
|
||||
let (expected, def_bm, ref_pattern_already_consumed) =
|
||||
self.calc_default_binding_mode(pat, expected, def_bm, adjust_mode);
|
||||
let pat_info = PatInfo {
|
||||
binding_mode: def_bm,
|
||||
top_info: ti,
|
||||
@ -211,7 +219,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
}
|
||||
PatKind::Box(inner) => self.check_pat_box(pat.span, inner, expected, pat_info),
|
||||
PatKind::Deref(inner) => self.check_pat_deref(pat.span, inner, expected, pat_info),
|
||||
PatKind::Ref(inner, mutbl) => self.check_pat_ref(pat, inner, mutbl, expected, pat_info),
|
||||
PatKind::Ref(inner, mutbl) => self.check_pat_ref(
|
||||
pat,
|
||||
inner,
|
||||
mutbl,
|
||||
expected,
|
||||
pat_info,
|
||||
ref_pattern_already_consumed,
|
||||
),
|
||||
PatKind::Slice(before, slice, after) => {
|
||||
self.check_pat_slice(pat.span, before, slice, after, expected, pat_info)
|
||||
}
|
||||
@ -264,17 +279,27 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
|
||||
/// Compute the new expected type and default binding mode from the old ones
|
||||
/// as well as the pattern form we are currently checking.
|
||||
///
|
||||
/// Last entry is only relevant for ref patterns (`&` and `&mut`);
|
||||
/// if `true`, then the ref pattern consumed a match ergonomics inserted reference
|
||||
/// and so does no need to match against a reference in the scrutinee type.
|
||||
fn calc_default_binding_mode(
|
||||
&self,
|
||||
pat: &'tcx Pat<'tcx>,
|
||||
expected: Ty<'tcx>,
|
||||
def_bm: BindingAnnotation,
|
||||
adjust_mode: AdjustMode,
|
||||
) -> (Ty<'tcx>, BindingAnnotation) {
|
||||
) -> (Ty<'tcx>, BindingAnnotation, bool) {
|
||||
match adjust_mode {
|
||||
AdjustMode::Pass => (expected, def_bm),
|
||||
AdjustMode::Reset => (expected, INITIAL_BM),
|
||||
AdjustMode::Peel => self.peel_off_references(pat, expected, def_bm),
|
||||
AdjustMode::Pass => (expected, def_bm, false),
|
||||
AdjustMode::Reset => (expected, INITIAL_BM, false),
|
||||
AdjustMode::ResetAndConsumeRef(mutbl) => {
|
||||
(expected, INITIAL_BM, def_bm.0 == ByRef::Yes(mutbl))
|
||||
}
|
||||
AdjustMode::Peel => {
|
||||
let peeled = self.peel_off_references(pat, expected, def_bm);
|
||||
(peeled.0, peeled.1, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -329,7 +354,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
// ```
|
||||
//
|
||||
// See issue #46688.
|
||||
PatKind::Ref(..) => AdjustMode::Reset,
|
||||
PatKind::Ref(_, mutbl) => AdjustMode::ResetAndConsumeRef(*mutbl),
|
||||
// A `_` pattern works with any expected type, so there's no need to do anything.
|
||||
PatKind::Wild
|
||||
// A malformed pattern doesn't have an expected type, so let's just accept any type.
|
||||
@ -840,8 +865,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
&& let Some(mt) = self.shallow_resolve(expected).builtin_deref(true)
|
||||
&& let ty::Dynamic(..) = mt.ty.kind()
|
||||
{
|
||||
// This is "x = SomeTrait" being reduced from
|
||||
// "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
|
||||
// This is "x = dyn SomeTrait" being reduced from
|
||||
// "let &x = &dyn SomeTrait" or "let box x = Box<dyn SomeTrait>", an error.
|
||||
let type_str = self.ty_to_string(expected);
|
||||
let mut err = struct_span_code_err!(
|
||||
self.dcx(),
|
||||
@ -2036,6 +2061,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
mutbl: Mutability,
|
||||
expected: Ty<'tcx>,
|
||||
pat_info: PatInfo<'tcx, '_>,
|
||||
consumed_inherited_ref: bool,
|
||||
) -> Ty<'tcx> {
|
||||
let tcx = self.tcx;
|
||||
let expected = self.shallow_resolve(expected);
|
||||
@ -2051,26 +2077,37 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
match *expected.kind() {
|
||||
ty::Ref(_, r_ty, r_mutbl) if r_mutbl == mutbl => (expected, r_ty),
|
||||
_ => {
|
||||
let inner_ty = self.next_ty_var(TypeVariableOrigin {
|
||||
kind: TypeVariableOriginKind::TypeInference,
|
||||
span: inner.span,
|
||||
});
|
||||
let ref_ty = self.new_ref_ty(pat.span, mutbl, inner_ty);
|
||||
debug!("check_pat_ref: demanding {:?} = {:?}", expected, ref_ty);
|
||||
let err = self.demand_eqtype_pat_diag(
|
||||
pat.span,
|
||||
expected,
|
||||
ref_ty,
|
||||
pat_info.top_info,
|
||||
);
|
||||
if consumed_inherited_ref && self.tcx.features().ref_pat_everywhere {
|
||||
// We already matched against a match-ergonmics inserted reference,
|
||||
// so we don't need to match against a reference from the original type.
|
||||
// Save this infor for use in lowering later
|
||||
self.typeck_results
|
||||
.borrow_mut()
|
||||
.skipped_ref_pats_mut()
|
||||
.insert(pat.hir_id);
|
||||
(expected, expected)
|
||||
} else {
|
||||
let inner_ty = self.next_ty_var(TypeVariableOrigin {
|
||||
kind: TypeVariableOriginKind::TypeInference,
|
||||
span: inner.span,
|
||||
});
|
||||
let ref_ty = self.new_ref_ty(pat.span, mutbl, inner_ty);
|
||||
debug!("check_pat_ref: demanding {:?} = {:?}", expected, ref_ty);
|
||||
let err = self.demand_eqtype_pat_diag(
|
||||
pat.span,
|
||||
expected,
|
||||
ref_ty,
|
||||
pat_info.top_info,
|
||||
);
|
||||
|
||||
// Look for a case like `fn foo(&foo: u32)` and suggest
|
||||
// `fn foo(foo: &u32)`
|
||||
if let Some(mut err) = err {
|
||||
self.borrow_pat_suggestion(&mut err, pat);
|
||||
err.emit();
|
||||
// Look for a case like `fn foo(&foo: u32)` and suggest
|
||||
// `fn foo(foo: &u32)`
|
||||
if let Some(mut err) = err {
|
||||
self.borrow_pat_suggestion(&mut err, pat);
|
||||
err.emit();
|
||||
}
|
||||
(ref_ty, inner_ty)
|
||||
}
|
||||
(ref_ty, inner_ty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
span: Span,
|
||||
body_id: hir::BodyId,
|
||||
body: &'tcx hir::Body<'tcx>,
|
||||
capture_clause: hir::CaptureBy,
|
||||
mut capture_clause: hir::CaptureBy,
|
||||
) {
|
||||
// Extract the type of the closure.
|
||||
let ty = self.node_ty(closure_hir_id);
|
||||
@ -259,6 +259,28 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
)
|
||||
.consume_body(body);
|
||||
|
||||
// If a coroutine is comes from a coroutine-closure that is `move`, but
|
||||
// the coroutine-closure was inferred to be `FnOnce` during signature
|
||||
// inference, then it's still possible that we try to borrow upvars from
|
||||
// the coroutine-closure because they are not used by the coroutine body
|
||||
// in a way that forces a move.
|
||||
//
|
||||
// This would lead to an impossible to satisfy situation, since `AsyncFnOnce`
|
||||
// coroutine bodies can't borrow from their parent closure. To fix this,
|
||||
// we force the inner coroutine to also be `move`. This only matters for
|
||||
// coroutine-closures that are `move` since otherwise they themselves will
|
||||
// be borrowing from the outer environment, so there's no self-borrows occuring.
|
||||
if let UpvarArgs::Coroutine(..) = args
|
||||
&& let hir::CoroutineKind::Desugared(_, hir::CoroutineSource::Closure) =
|
||||
self.tcx.coroutine_kind(closure_def_id).expect("coroutine should have kind")
|
||||
&& let parent_hir_id =
|
||||
self.tcx.local_def_id_to_hir_id(self.tcx.local_parent(closure_def_id))
|
||||
&& let parent_ty = self.node_ty(parent_hir_id)
|
||||
&& let Some(ty::ClosureKind::FnOnce) = self.closure_kind(parent_ty)
|
||||
{
|
||||
capture_clause = self.tcx.hir_node(parent_hir_id).expect_closure().capture_clause;
|
||||
}
|
||||
|
||||
debug!(
|
||||
"For closure={:?}, capture_information={:#?}",
|
||||
closure_def_id, delegate.capture_information
|
||||
@ -399,16 +421,22 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
);
|
||||
|
||||
// Additionally, we can now constrain the coroutine's kind type.
|
||||
let ty::Coroutine(_, coroutine_args) =
|
||||
*self.typeck_results.borrow().expr_ty(body.value).kind()
|
||||
else {
|
||||
bug!();
|
||||
};
|
||||
self.demand_eqtype(
|
||||
span,
|
||||
coroutine_args.as_coroutine().kind_ty(),
|
||||
Ty::from_coroutine_closure_kind(self.tcx, closure_kind),
|
||||
);
|
||||
//
|
||||
// We only do this if `infer_kind`, because if we have constrained
|
||||
// the kind from closure signature inference, the kind inferred
|
||||
// for the inner coroutine may actually be more restrictive.
|
||||
if infer_kind {
|
||||
let ty::Coroutine(_, coroutine_args) =
|
||||
*self.typeck_results.borrow().expr_ty(body.value).kind()
|
||||
else {
|
||||
bug!();
|
||||
};
|
||||
self.demand_eqtype(
|
||||
span,
|
||||
coroutine_args.as_coroutine().kind_ty(),
|
||||
Ty::from_coroutine_closure_kind(self.tcx, closure_kind),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
self.log_closure_min_capture_info(closure_def_id, span);
|
||||
|
@ -345,6 +345,7 @@ impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> {
|
||||
_ => {}
|
||||
};
|
||||
|
||||
self.visit_skipped_ref_pats(p.hir_id);
|
||||
self.visit_pat_adjustments(p.span, p.hir_id);
|
||||
|
||||
self.visit_node_id(p.span, p.hir_id);
|
||||
@ -674,6 +675,14 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self), level = "debug")]
|
||||
fn visit_skipped_ref_pats(&mut self, hir_id: hir::HirId) {
|
||||
if self.fcx.typeck_results.borrow_mut().skipped_ref_pats_mut().remove(hir_id) {
|
||||
debug!("node is a skipped ref pat");
|
||||
self.typeck_results.skipped_ref_pats_mut().insert(hir_id);
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_liberated_fn_sigs(&mut self) {
|
||||
let fcx_typeck_results = self.fcx.typeck_results.borrow();
|
||||
assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
|
||||
|
@ -11,6 +11,7 @@ use rustc_session::config::IncrementalStateAssertion;
|
||||
use rustc_session::Session;
|
||||
use rustc_span::ErrorGuaranteed;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::data::*;
|
||||
use super::file_format;
|
||||
@ -88,7 +89,7 @@ fn delete_dirty_work_product(sess: &Session, swp: SerializedWorkProduct) {
|
||||
work_product::delete_workproduct_files(sess, &swp.work_product);
|
||||
}
|
||||
|
||||
fn load_dep_graph(sess: &Session) -> LoadResult<(SerializedDepGraph, WorkProductMap)> {
|
||||
fn load_dep_graph(sess: &Session) -> LoadResult<(Arc<SerializedDepGraph>, WorkProductMap)> {
|
||||
let prof = sess.prof.clone();
|
||||
|
||||
if sess.opts.incremental.is_none() {
|
||||
|
@ -10,6 +10,7 @@ use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
|
||||
use rustc_serialize::Encodable as RustcEncodable;
|
||||
use rustc_session::Session;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::data::*;
|
||||
use super::dirty_clean;
|
||||
@ -147,7 +148,7 @@ fn encode_query_cache(tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult
|
||||
/// and moves it to the permanent dep-graph path
|
||||
pub(crate) fn build_dep_graph(
|
||||
sess: &Session,
|
||||
prev_graph: SerializedDepGraph,
|
||||
prev_graph: Arc<SerializedDepGraph>,
|
||||
prev_work_products: WorkProductMap,
|
||||
) -> Option<DepGraph> {
|
||||
if sess.opts.incremental.is_none() {
|
||||
|
@ -843,7 +843,9 @@ impl<'tcx> InferCtxt<'tcx> {
|
||||
{
|
||||
let origin = &ObligationCause::dummy();
|
||||
self.probe(|_| {
|
||||
self.at(origin, param_env).sub(DefineOpaqueTypes::No, expected, actual).is_ok()
|
||||
// We're only answering whether there could be a subtyping relation, and with
|
||||
// opaque types, "there could be one", via registering a hidden type.
|
||||
self.at(origin, param_env).sub(DefineOpaqueTypes::Yes, expected, actual).is_ok()
|
||||
})
|
||||
}
|
||||
|
||||
@ -852,7 +854,9 @@ impl<'tcx> InferCtxt<'tcx> {
|
||||
T: at::ToTrace<'tcx>,
|
||||
{
|
||||
let origin = &ObligationCause::dummy();
|
||||
self.probe(|_| self.at(origin, param_env).eq(DefineOpaqueTypes::No, a, b).is_ok())
|
||||
// We're only answering whether the types could be the same, and with
|
||||
// opaque types, "they can be the same", via registering a hidden type.
|
||||
self.probe(|_| self.at(origin, param_env).eq(DefineOpaqueTypes::Yes, a, b).is_ok())
|
||||
}
|
||||
|
||||
#[instrument(skip(self), level = "debug")]
|
||||
|
@ -443,7 +443,7 @@ lint_non_local_definitions_impl = non-local `impl` definition, they should be av
|
||||
[one] `{$body_name}`
|
||||
*[other] `{$body_name}` and up {$depth} bodies
|
||||
}
|
||||
.non_local = an `impl` definition is non-local if it is nested inside an item and neither the type nor the trait are at the same nesting level as the `impl` block
|
||||
.non_local = an `impl` definition is non-local if it is nested inside an item and may impact type checking outside of that item. This can be the case if neither the trait or the self type are at the same nesting level as the `impl`
|
||||
.exception = one exception to the rule are anon-const (`const _: () = {"{"} ... {"}"}`) at top-level module and anon-const at the same nesting as the trait or type
|
||||
.const_anon = use a const-anon item to suppress this lint
|
||||
|
||||
|
@ -1,6 +1,18 @@
|
||||
use rustc_hir::{def::DefKind, Body, Item, ItemKind, Node, Path, QPath, TyKind};
|
||||
use rustc_hir::{def::DefKind, Body, Item, ItemKind, Node, TyKind};
|
||||
use rustc_hir::{Path, QPath};
|
||||
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
|
||||
use rustc_infer::infer::InferCtxt;
|
||||
use rustc_infer::traits::{Obligation, ObligationCause};
|
||||
use rustc_middle::query::Key;
|
||||
use rustc_middle::ty::{self, Binder, Ty, TyCtxt, TypeFoldable, TypeFolder};
|
||||
use rustc_middle::ty::{EarlyBinder, TraitRef, TypeSuperFoldable};
|
||||
use rustc_span::def_id::{DefId, LOCAL_CRATE};
|
||||
use rustc_span::Span;
|
||||
use rustc_span::{sym, symbol::kw, ExpnKind, MacroKind};
|
||||
use rustc_trait_selection::infer::TyCtxtInferExt;
|
||||
use rustc_trait_selection::traits::error_reporting::ambiguity::{
|
||||
compute_applicable_impls_for_diagnostics, CandidateSource,
|
||||
};
|
||||
|
||||
use crate::lints::{NonLocalDefinitionsCargoUpdateNote, NonLocalDefinitionsDiag};
|
||||
use crate::{LateContext, LateLintPass, LintContext};
|
||||
@ -35,7 +47,7 @@ declare_lint! {
|
||||
/// All nested bodies (functions, enum discriminant, array length, consts) (expect for
|
||||
/// `const _: Ty = { ... }` in top-level module, which is still undecided) are checked.
|
||||
pub NON_LOCAL_DEFINITIONS,
|
||||
Allow,
|
||||
Warn,
|
||||
"checks for non-local definitions",
|
||||
report_in_external_macro
|
||||
}
|
||||
@ -66,7 +78,8 @@ impl<'tcx> LateLintPass<'tcx> for NonLocalDefinitions {
|
||||
return;
|
||||
}
|
||||
|
||||
let parent = cx.tcx.parent(item.owner_id.def_id.into());
|
||||
let def_id = item.owner_id.def_id.into();
|
||||
let parent = cx.tcx.parent(def_id);
|
||||
let parent_def_kind = cx.tcx.def_kind(parent);
|
||||
let parent_opt_item_name = cx.tcx.opt_item_name(parent);
|
||||
|
||||
@ -121,6 +134,7 @@ impl<'tcx> LateLintPass<'tcx> for NonLocalDefinitions {
|
||||
None
|
||||
};
|
||||
|
||||
// Part 1: Is the Self type local?
|
||||
let self_ty_has_local_parent = match impl_.self_ty.kind {
|
||||
TyKind::Path(QPath::Resolved(_, ty_path)) => {
|
||||
path_has_local_parent(ty_path, cx, parent, parent_parent)
|
||||
@ -150,41 +164,70 @@ impl<'tcx> LateLintPass<'tcx> for NonLocalDefinitions {
|
||||
| TyKind::Err(_) => false,
|
||||
};
|
||||
|
||||
if self_ty_has_local_parent {
|
||||
return;
|
||||
}
|
||||
|
||||
// Part 2: Is the Trait local?
|
||||
let of_trait_has_local_parent = impl_
|
||||
.of_trait
|
||||
.map(|of_trait| path_has_local_parent(of_trait.path, cx, parent, parent_parent))
|
||||
.unwrap_or(false);
|
||||
|
||||
// If none of them have a local parent (LOGICAL NOR) this means that
|
||||
// this impl definition is a non-local definition and so we lint on it.
|
||||
if !(self_ty_has_local_parent || of_trait_has_local_parent) {
|
||||
let const_anon = if self.body_depth == 1
|
||||
&& parent_def_kind == DefKind::Const
|
||||
&& parent_opt_item_name != Some(kw::Underscore)
|
||||
&& let Some(parent) = parent.as_local()
|
||||
&& let Node::Item(item) = cx.tcx.hir_node_by_def_id(parent)
|
||||
&& let ItemKind::Const(ty, _, _) = item.kind
|
||||
&& let TyKind::Tup(&[]) = ty.kind
|
||||
{
|
||||
Some(item.ident.span)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
cx.emit_span_lint(
|
||||
NON_LOCAL_DEFINITIONS,
|
||||
item.span,
|
||||
NonLocalDefinitionsDiag::Impl {
|
||||
depth: self.body_depth,
|
||||
body_kind_descr: cx.tcx.def_kind_descr(parent_def_kind, parent),
|
||||
body_name: parent_opt_item_name
|
||||
.map(|s| s.to_ident_string())
|
||||
.unwrap_or_else(|| "<unnameable>".to_string()),
|
||||
cargo_update: cargo_update(),
|
||||
const_anon,
|
||||
},
|
||||
)
|
||||
if of_trait_has_local_parent {
|
||||
return;
|
||||
}
|
||||
|
||||
// Part 3: Is the impl definition leaking outside it's defining scope?
|
||||
//
|
||||
// We always consider inherent impls to be leaking.
|
||||
let impl_has_enough_non_local_candidates = cx
|
||||
.tcx
|
||||
.impl_trait_ref(def_id)
|
||||
.map(|binder| {
|
||||
impl_trait_ref_has_enough_non_local_candidates(
|
||||
cx.tcx,
|
||||
item.span,
|
||||
def_id,
|
||||
binder,
|
||||
|did| did_has_local_parent(did, cx.tcx, parent, parent_parent),
|
||||
)
|
||||
})
|
||||
.unwrap_or(false);
|
||||
|
||||
if impl_has_enough_non_local_candidates {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get the span of the parent const item ident (if it's a not a const anon).
|
||||
//
|
||||
// Used to suggest changing the const item to a const anon.
|
||||
let span_for_const_anon_suggestion = if self.body_depth == 1
|
||||
&& parent_def_kind == DefKind::Const
|
||||
&& parent_opt_item_name != Some(kw::Underscore)
|
||||
&& let Some(parent) = parent.as_local()
|
||||
&& let Node::Item(item) = cx.tcx.hir_node_by_def_id(parent)
|
||||
&& let ItemKind::Const(ty, _, _) = item.kind
|
||||
&& let TyKind::Tup(&[]) = ty.kind
|
||||
{
|
||||
Some(item.ident.span)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
cx.emit_span_lint(
|
||||
NON_LOCAL_DEFINITIONS,
|
||||
item.span,
|
||||
NonLocalDefinitionsDiag::Impl {
|
||||
depth: self.body_depth,
|
||||
body_kind_descr: cx.tcx.def_kind_descr(parent_def_kind, parent),
|
||||
body_name: parent_opt_item_name
|
||||
.map(|s| s.to_ident_string())
|
||||
.unwrap_or_else(|| "<unnameable>".to_string()),
|
||||
cargo_update: cargo_update(),
|
||||
const_anon: span_for_const_anon_suggestion,
|
||||
},
|
||||
)
|
||||
}
|
||||
ItemKind::Macro(_macro, MacroKind::Bang)
|
||||
if cx.tcx.has_attr(item.owner_id.def_id, sym::macro_export) =>
|
||||
@ -207,6 +250,81 @@ impl<'tcx> LateLintPass<'tcx> for NonLocalDefinitions {
|
||||
}
|
||||
}
|
||||
|
||||
// Detecting if the impl definition is leaking outside of it's defining scope.
|
||||
//
|
||||
// Rule: for each impl, instantiate all local types with inference vars and
|
||||
// then assemble candidates for that goal, if there are more than 1 (non-private
|
||||
// impls), it does not leak.
|
||||
//
|
||||
// https://github.com/rust-lang/rust/issues/121621#issuecomment-1976826895
|
||||
fn impl_trait_ref_has_enough_non_local_candidates<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
infer_span: Span,
|
||||
trait_def_id: DefId,
|
||||
binder: EarlyBinder<TraitRef<'tcx>>,
|
||||
mut did_has_local_parent: impl FnMut(DefId) -> bool,
|
||||
) -> bool {
|
||||
let infcx = tcx.infer_ctxt().build();
|
||||
let trait_ref = binder.instantiate(tcx, infcx.fresh_args_for_item(infer_span, trait_def_id));
|
||||
|
||||
let trait_ref = trait_ref.fold_with(&mut ReplaceLocalTypesWithInfer {
|
||||
infcx: &infcx,
|
||||
infer_span,
|
||||
did_has_local_parent: &mut did_has_local_parent,
|
||||
});
|
||||
|
||||
let poly_trait_obligation = Obligation::new(
|
||||
tcx,
|
||||
ObligationCause::dummy(),
|
||||
ty::ParamEnv::empty(),
|
||||
Binder::dummy(trait_ref),
|
||||
);
|
||||
|
||||
let ambiguities = compute_applicable_impls_for_diagnostics(&infcx, &poly_trait_obligation);
|
||||
|
||||
let mut it = ambiguities.iter().filter(|ambi| match ambi {
|
||||
CandidateSource::DefId(did) => !did_has_local_parent(*did),
|
||||
CandidateSource::ParamEnv(_) => unreachable!(),
|
||||
});
|
||||
|
||||
let _ = it.next();
|
||||
it.next().is_some()
|
||||
}
|
||||
|
||||
/// Replace every local type by inference variable.
|
||||
///
|
||||
/// ```text
|
||||
/// <Global<Local> as std::cmp::PartialEq<Global<Local>>>
|
||||
/// to
|
||||
/// <Global<_> as std::cmp::PartialEq<Global<_>>>
|
||||
/// ```
|
||||
struct ReplaceLocalTypesWithInfer<'a, 'tcx, F: FnMut(DefId) -> bool> {
|
||||
infcx: &'a InferCtxt<'tcx>,
|
||||
did_has_local_parent: F,
|
||||
infer_span: Span,
|
||||
}
|
||||
|
||||
impl<'a, 'tcx, F: FnMut(DefId) -> bool> TypeFolder<TyCtxt<'tcx>>
|
||||
for ReplaceLocalTypesWithInfer<'a, 'tcx, F>
|
||||
{
|
||||
fn interner(&self) -> TyCtxt<'tcx> {
|
||||
self.infcx.tcx
|
||||
}
|
||||
|
||||
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
|
||||
if let Some(ty_did) = t.ty_def_id()
|
||||
&& (self.did_has_local_parent)(ty_did)
|
||||
{
|
||||
self.infcx.next_ty_var(TypeVariableOrigin {
|
||||
kind: TypeVariableOriginKind::TypeInference,
|
||||
span: self.infer_span,
|
||||
})
|
||||
} else {
|
||||
t.super_fold_with(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a path and a parent impl def id, this checks if the if parent resolution
|
||||
/// def id correspond to the def id of the parent impl definition.
|
||||
///
|
||||
@ -216,16 +334,29 @@ impl<'tcx> LateLintPass<'tcx> for NonLocalDefinitions {
|
||||
/// std::convert::PartialEq<Foo<Bar>>
|
||||
/// ^^^^^^^^^^^^^^^^^^^^^^^
|
||||
/// ```
|
||||
#[inline]
|
||||
fn path_has_local_parent(
|
||||
path: &Path<'_>,
|
||||
cx: &LateContext<'_>,
|
||||
impl_parent: DefId,
|
||||
impl_parent_parent: Option<DefId>,
|
||||
) -> bool {
|
||||
path.res.opt_def_id().is_some_and(|did| {
|
||||
did.is_local() && {
|
||||
let res_parent = cx.tcx.parent(did);
|
||||
res_parent == impl_parent || Some(res_parent) == impl_parent_parent
|
||||
}
|
||||
})
|
||||
path.res
|
||||
.opt_def_id()
|
||||
.is_some_and(|did| did_has_local_parent(did, cx.tcx, impl_parent, impl_parent_parent))
|
||||
}
|
||||
|
||||
/// Given a def id and a parent impl def id, this checks if the parent
|
||||
/// def id correspond to the def id of the parent impl definition.
|
||||
#[inline]
|
||||
fn did_has_local_parent(
|
||||
did: DefId,
|
||||
tcx: TyCtxt<'_>,
|
||||
impl_parent: DefId,
|
||||
impl_parent_parent: Option<DefId>,
|
||||
) -> bool {
|
||||
did.is_local() && {
|
||||
let res_parent = tcx.parent(did);
|
||||
res_parent == impl_parent || Some(res_parent) == impl_parent_parent
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
#include "LLVMWrapper.h"
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ProfileData/Coverage/CoverageMapping.h"
|
||||
#include "llvm/ProfileData/Coverage/CoverageMappingWriter.h"
|
||||
#include "llvm/ProfileData/InstrProf.h"
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
@ -103,35 +103,30 @@ fromRust(LLVMRustCounterExprKind Kind) {
|
||||
}
|
||||
|
||||
extern "C" void LLVMRustCoverageWriteFilenamesSectionToBuffer(
|
||||
const char *const Filenames[],
|
||||
size_t FilenamesLen,
|
||||
const size_t *const Lengths,
|
||||
size_t LengthsLen,
|
||||
const char *const Filenames[], size_t FilenamesLen, // String start pointers
|
||||
const size_t *const Lengths, size_t LengthsLen, // Corresponding lengths
|
||||
RustStringRef BufferOut) {
|
||||
if (FilenamesLen != LengthsLen) {
|
||||
report_fatal_error(
|
||||
"Mismatched lengths in LLVMRustCoverageWriteFilenamesSectionToBuffer");
|
||||
}
|
||||
|
||||
SmallVector<std::string,32> FilenameRefs;
|
||||
SmallVector<std::string, 32> FilenameRefs;
|
||||
FilenameRefs.reserve(FilenamesLen);
|
||||
for (size_t i = 0; i < FilenamesLen; i++) {
|
||||
FilenameRefs.emplace_back(Filenames[i], Lengths[i]);
|
||||
}
|
||||
auto FilenamesWriter =
|
||||
coverage::CoverageFilenamesSectionWriter(ArrayRef<std::string>(FilenameRefs));
|
||||
auto FilenamesWriter = coverage::CoverageFilenamesSectionWriter(
|
||||
ArrayRef<std::string>(FilenameRefs));
|
||||
auto OS = RawRustStringOstream(BufferOut);
|
||||
FilenamesWriter.write(OS);
|
||||
}
|
||||
|
||||
extern "C" void LLVMRustCoverageWriteMappingToBuffer(
|
||||
const unsigned *VirtualFileMappingIDs,
|
||||
unsigned NumVirtualFileMappingIDs,
|
||||
const LLVMRustCounterExpression *RustExpressions,
|
||||
unsigned NumExpressions,
|
||||
const unsigned *VirtualFileMappingIDs, unsigned NumVirtualFileMappingIDs,
|
||||
const LLVMRustCounterExpression *RustExpressions, unsigned NumExpressions,
|
||||
const LLVMRustCounterMappingRegion *RustMappingRegions,
|
||||
unsigned NumMappingRegions,
|
||||
RustStringRef BufferOut) {
|
||||
unsigned NumMappingRegions, RustStringRef BufferOut) {
|
||||
// Convert from FFI representation to LLVM representation.
|
||||
SmallVector<coverage::CounterMappingRegion, 0> MappingRegions;
|
||||
MappingRegions.reserve(NumMappingRegions);
|
||||
@ -142,7 +137,7 @@ extern "C" void LLVMRustCoverageWriteMappingToBuffer(
|
||||
#if LLVM_VERSION_GE(18, 0) && LLVM_VERSION_LT(19, 0)
|
||||
coverage::CounterMappingRegion::MCDCParameters{},
|
||||
#endif
|
||||
Region.FileID, Region.ExpandedFileID,
|
||||
Region.FileID, Region.ExpandedFileID, // File IDs, then region info.
|
||||
Region.LineStart, Region.ColumnStart, Region.LineEnd, Region.ColumnEnd,
|
||||
fromRust(Region.Kind));
|
||||
}
|
||||
@ -158,29 +153,25 @@ extern "C" void LLVMRustCoverageWriteMappingToBuffer(
|
||||
|
||||
auto CoverageMappingWriter = coverage::CoverageMappingWriter(
|
||||
ArrayRef<unsigned>(VirtualFileMappingIDs, NumVirtualFileMappingIDs),
|
||||
Expressions,
|
||||
MappingRegions);
|
||||
Expressions, MappingRegions);
|
||||
auto OS = RawRustStringOstream(BufferOut);
|
||||
CoverageMappingWriter.write(OS);
|
||||
}
|
||||
|
||||
extern "C" LLVMValueRef LLVMRustCoverageCreatePGOFuncNameVar(
|
||||
LLVMValueRef F,
|
||||
const char *FuncName,
|
||||
size_t FuncNameLen) {
|
||||
extern "C" LLVMValueRef
|
||||
LLVMRustCoverageCreatePGOFuncNameVar(LLVMValueRef F, const char *FuncName,
|
||||
size_t FuncNameLen) {
|
||||
auto FuncNameRef = StringRef(FuncName, FuncNameLen);
|
||||
return wrap(createPGOFuncNameVar(*cast<Function>(unwrap(F)), FuncNameRef));
|
||||
}
|
||||
|
||||
extern "C" uint64_t LLVMRustCoverageHashByteArray(
|
||||
const char *Bytes,
|
||||
size_t NumBytes) {
|
||||
extern "C" uint64_t LLVMRustCoverageHashByteArray(const char *Bytes,
|
||||
size_t NumBytes) {
|
||||
auto StrRef = StringRef(Bytes, NumBytes);
|
||||
return IndexedInstrProf::ComputeHash(StrRef);
|
||||
}
|
||||
|
||||
static void WriteSectionNameToString(LLVMModuleRef M,
|
||||
InstrProfSectKind SK,
|
||||
static void WriteSectionNameToString(LLVMModuleRef M, InstrProfSectKind SK,
|
||||
RustStringRef Str) {
|
||||
auto TargetTriple = Triple(unwrap(M)->getTargetTriple());
|
||||
auto name = getInstrProfSectionName(SK, TargetTriple.getObjectFormat());
|
||||
@ -193,8 +184,9 @@ extern "C" void LLVMRustCoverageWriteMapSectionNameToString(LLVMModuleRef M,
|
||||
WriteSectionNameToString(M, IPSK_covmap, Str);
|
||||
}
|
||||
|
||||
extern "C" void LLVMRustCoverageWriteFuncSectionNameToString(LLVMModuleRef M,
|
||||
RustStringRef Str) {
|
||||
extern "C" void
|
||||
LLVMRustCoverageWriteFuncSectionNameToString(LLVMModuleRef M,
|
||||
RustStringRef Str) {
|
||||
WriteSectionNameToString(M, IPSK_covfun, Str);
|
||||
}
|
||||
|
||||
@ -205,5 +197,8 @@ extern "C" void LLVMRustCoverageWriteMappingVarNameToString(RustStringRef Str) {
|
||||
}
|
||||
|
||||
extern "C" uint32_t LLVMRustCoverageMappingVersion() {
|
||||
return coverage::CovMapVersion::Version6;
|
||||
// This should always be `CurrentVersion`, because that's the version LLVM
|
||||
// will use when encoding the data we give it. If for some reason we ever
|
||||
// want to override the version number we _emit_, do it on the Rust side.
|
||||
return coverage::CovMapVersion::CurrentVersion;
|
||||
}
|
||||
|
@ -194,9 +194,10 @@ impl DepNodeExt for DepNode {
|
||||
/// has been removed.
|
||||
fn extract_def_id(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
|
||||
if tcx.fingerprint_style(self.kind) == FingerprintStyle::DefPathHash {
|
||||
Some(tcx.def_path_hash_to_def_id(DefPathHash(self.hash.into()), &mut || {
|
||||
panic!("Failed to extract DefId: {:?} {}", self.kind, self.hash)
|
||||
}))
|
||||
Some(tcx.def_path_hash_to_def_id(
|
||||
DefPathHash(self.hash.into()),
|
||||
&("Failed to extract DefId", self.kind, self.hash),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -390,9 +391,10 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
|
||||
let (local_hash, local_id) = Fingerprint::from(dep_node.hash).split();
|
||||
let def_path_hash = DefPathHash::new(tcx.stable_crate_id(LOCAL_CRATE), local_hash);
|
||||
let def_id = tcx
|
||||
.def_path_hash_to_def_id(def_path_hash, &mut || {
|
||||
panic!("Failed to extract HirId: {:?} {}", dep_node.kind, dep_node.hash)
|
||||
})
|
||||
.def_path_hash_to_def_id(
|
||||
def_path_hash,
|
||||
&("Failed to extract HirId", dep_node.kind, dep_node.hash),
|
||||
)
|
||||
.expect_local();
|
||||
let local_id = local_id
|
||||
.as_u64()
|
||||
|
@ -179,7 +179,7 @@ pub fn provide(providers: &mut Providers) {
|
||||
.parenting
|
||||
.get(&owner_id.def_id)
|
||||
.copied()
|
||||
.unwrap_or(ItemLocalId::from_u32(0)),
|
||||
.unwrap_or(ItemLocalId::ZERO),
|
||||
}
|
||||
})
|
||||
};
|
||||
|
@ -33,10 +33,6 @@ rustc_index::newtype_index! {
|
||||
pub struct CounterId {}
|
||||
}
|
||||
|
||||
impl CounterId {
|
||||
pub const START: Self = Self::ZERO;
|
||||
}
|
||||
|
||||
rustc_index::newtype_index! {
|
||||
/// ID of a coverage-counter expression. Values ascend from 0.
|
||||
///
|
||||
@ -55,10 +51,6 @@ rustc_index::newtype_index! {
|
||||
pub struct ExpressionId {}
|
||||
}
|
||||
|
||||
impl ExpressionId {
|
||||
pub const START: Self = Self::ZERO;
|
||||
}
|
||||
|
||||
/// Enum that can hold a constant zero value, the ID of an physical coverage
|
||||
/// counter, or the ID of a coverage-counter expression.
|
||||
///
|
||||
|
@ -341,7 +341,7 @@ macro_rules! make_mir_visitor {
|
||||
|
||||
ty::InstanceDef::Intrinsic(_def_id) |
|
||||
ty::InstanceDef::VTableShim(_def_id) |
|
||||
ty::InstanceDef::ReifyShim(_def_id) |
|
||||
ty::InstanceDef::ReifyShim(_def_id, _) |
|
||||
ty::InstanceDef::Virtual(_def_id, _) |
|
||||
ty::InstanceDef::ThreadLocalShim(_def_id) |
|
||||
ty::InstanceDef::ClosureOnceShim { call_once: _def_id, track_caller: _ } |
|
||||
|
@ -737,9 +737,10 @@ impl<'a, 'tcx> SpanDecoder for CacheDecoder<'a, 'tcx> {
|
||||
// If we get to this point, then all of the query inputs were green,
|
||||
// which means that the definition with this hash is guaranteed to
|
||||
// still exist in the current compilation session.
|
||||
self.tcx.def_path_hash_to_def_id(def_path_hash, &mut || {
|
||||
panic!("Failed to convert DefPathHash {def_path_hash:?}")
|
||||
})
|
||||
self.tcx.def_path_hash_to_def_id(
|
||||
def_path_hash,
|
||||
&("Failed to convert DefPathHash", def_path_hash),
|
||||
)
|
||||
}
|
||||
|
||||
fn decode_attr_id(&mut self) -> rustc_span::AttrId {
|
||||
|
@ -121,8 +121,6 @@ pub enum ProbeStep<'tcx> {
|
||||
/// used whenever there are multiple candidates to prove the
|
||||
/// current goalby .
|
||||
NestedProbe(Probe<'tcx>),
|
||||
CommitIfOkStart,
|
||||
CommitIfOkSuccess,
|
||||
}
|
||||
|
||||
/// What kind of probe we're in. In case the probe represents a candidate, or
|
||||
@ -132,6 +130,8 @@ pub enum ProbeStep<'tcx> {
|
||||
pub enum ProbeKind<'tcx> {
|
||||
/// The root inference context while proving a goal.
|
||||
Root { result: QueryResult<'tcx> },
|
||||
/// Trying to normalize an alias by at least one step in `NormalizesTo`.
|
||||
TryNormalizeNonRigid { result: QueryResult<'tcx> },
|
||||
/// Probe entered when normalizing the self ty during candidate assembly
|
||||
NormalizedSelfTyAssembly,
|
||||
/// Some candidate to prove the current goal.
|
||||
@ -143,9 +143,6 @@ pub enum ProbeKind<'tcx> {
|
||||
/// Used in the probe that wraps normalizing the non-self type for the unsize
|
||||
/// trait, which is also structurally matched on.
|
||||
UnsizeAssembly,
|
||||
/// A call to `EvalCtxt::commit_if_ok` which failed, causing the work
|
||||
/// to be discarded.
|
||||
CommitIfOk,
|
||||
/// During upcasting from some source object to target object type, used to
|
||||
/// do a probe to find out what projection type(s) may be used to prove that
|
||||
/// the source type upholds all of the target type's object bounds.
|
||||
|
@ -100,6 +100,9 @@ impl<'a, 'b> ProofTreeFormatter<'a, 'b> {
|
||||
ProbeKind::Root { result } => {
|
||||
write!(self.f, "ROOT RESULT: {result:?}")
|
||||
}
|
||||
ProbeKind::TryNormalizeNonRigid { result } => {
|
||||
write!(self.f, "TRY NORMALIZE NON-RIGID: {result:?}")
|
||||
}
|
||||
ProbeKind::NormalizedSelfTyAssembly => {
|
||||
write!(self.f, "NORMALIZING SELF TY FOR ASSEMBLY:")
|
||||
}
|
||||
@ -109,9 +112,6 @@ impl<'a, 'b> ProofTreeFormatter<'a, 'b> {
|
||||
ProbeKind::UpcastProjectionCompatibility => {
|
||||
write!(self.f, "PROBING FOR PROJECTION COMPATIBILITY FOR UPCASTING:")
|
||||
}
|
||||
ProbeKind::CommitIfOk => {
|
||||
write!(self.f, "COMMIT_IF_OK:")
|
||||
}
|
||||
ProbeKind::MiscCandidate { name, result } => {
|
||||
write!(self.f, "CANDIDATE {name}: {result:?}")
|
||||
}
|
||||
@ -132,8 +132,6 @@ impl<'a, 'b> ProofTreeFormatter<'a, 'b> {
|
||||
}
|
||||
ProbeStep::EvaluateGoals(eval) => this.format_added_goals_evaluation(eval)?,
|
||||
ProbeStep::NestedProbe(probe) => this.format_probe(probe)?,
|
||||
ProbeStep::CommitIfOkStart => writeln!(this.f, "COMMIT_IF_OK START")?,
|
||||
ProbeStep::CommitIfOkSuccess => writeln!(this.f, "COMMIT_IF_OK SUCCESS")?,
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -1121,7 +1121,11 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
/// Converts a `DefPathHash` to its corresponding `DefId` in the current compilation
|
||||
/// session, if it still exists. This is used during incremental compilation to
|
||||
/// turn a deserialized `DefPathHash` into its current `DefId`.
|
||||
pub fn def_path_hash_to_def_id(self, hash: DefPathHash, err: &mut dyn FnMut() -> !) -> DefId {
|
||||
pub fn def_path_hash_to_def_id(
|
||||
self,
|
||||
hash: DefPathHash,
|
||||
err_msg: &dyn std::fmt::Debug,
|
||||
) -> DefId {
|
||||
debug!("def_path_hash_to_def_id({:?})", hash);
|
||||
|
||||
let stable_crate_id = hash.stable_crate_id();
|
||||
@ -1129,7 +1133,11 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
// If this is a DefPathHash from the local crate, we can look up the
|
||||
// DefId in the tcx's `Definitions`.
|
||||
if stable_crate_id == self.stable_crate_id(LOCAL_CRATE) {
|
||||
self.untracked.definitions.read().local_def_path_hash_to_def_id(hash, err).to_def_id()
|
||||
self.untracked
|
||||
.definitions
|
||||
.read()
|
||||
.local_def_path_hash_to_def_id(hash, err_msg)
|
||||
.to_def_id()
|
||||
} else {
|
||||
// If this is a DefPathHash from an upstream crate, let the CrateStore map
|
||||
// it to a DefId.
|
||||
|
@ -31,6 +31,28 @@ pub struct Instance<'tcx> {
|
||||
pub args: GenericArgsRef<'tcx>,
|
||||
}
|
||||
|
||||
/// Describes why a `ReifyShim` was created. This is needed to distingish a ReifyShim created to
|
||||
/// adjust for things like `#[track_caller]` in a vtable from a `ReifyShim` created to produce a
|
||||
/// function pointer from a vtable entry.
|
||||
/// Currently, this is only used when KCFI is enabled, as only KCFI needs to treat those two
|
||||
/// `ReifyShim`s differently.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
#[derive(TyEncodable, TyDecodable, HashStable)]
|
||||
pub enum ReifyReason {
|
||||
/// The `ReifyShim` was created to produce a function pointer. This happens when:
|
||||
/// * A vtable entry is directly converted to a function call (e.g. creating a fn ptr from a
|
||||
/// method on a `dyn` object).
|
||||
/// * A function with `#[track_caller]` is converted to a function pointer
|
||||
/// * If KCFI is enabled, creating a function pointer from a method on an object-safe trait.
|
||||
/// This includes the case of converting `::call`-like methods on closure-likes to function
|
||||
/// pointers.
|
||||
FnPtr,
|
||||
/// This `ReifyShim` was created to populate a vtable. Currently, this happens when a
|
||||
/// `#[track_caller]` mismatch occurs between the implementation of a method and the method.
|
||||
/// This includes the case of `::call`-like methods in closure-likes' vtables.
|
||||
Vtable,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)]
|
||||
pub enum InstanceDef<'tcx> {
|
||||
@ -67,7 +89,13 @@ pub enum InstanceDef<'tcx> {
|
||||
/// Because this is a required part of the function's ABI but can't be tracked
|
||||
/// as a property of the function pointer, we use a single "caller location"
|
||||
/// (the definition of the function itself).
|
||||
ReifyShim(DefId),
|
||||
///
|
||||
/// The second field encodes *why* this shim was created. This allows distinguishing between
|
||||
/// a `ReifyShim` that appears in a vtable vs one that appears as a function pointer.
|
||||
///
|
||||
/// This field will only be populated if we are compiling in a mode that needs these shims
|
||||
/// to be separable, currently only when KCFI is enabled.
|
||||
ReifyShim(DefId, Option<ReifyReason>),
|
||||
|
||||
/// `<fn() as FnTrait>::call_*` (generated `FnTrait` implementation for `fn()` pointers).
|
||||
///
|
||||
@ -194,7 +222,7 @@ impl<'tcx> InstanceDef<'tcx> {
|
||||
match self {
|
||||
InstanceDef::Item(def_id)
|
||||
| InstanceDef::VTableShim(def_id)
|
||||
| InstanceDef::ReifyShim(def_id)
|
||||
| InstanceDef::ReifyShim(def_id, _)
|
||||
| InstanceDef::FnPtrShim(def_id, _)
|
||||
| InstanceDef::Virtual(def_id, _)
|
||||
| InstanceDef::Intrinsic(def_id)
|
||||
@ -354,7 +382,9 @@ fn fmt_instance(
|
||||
match instance.def {
|
||||
InstanceDef::Item(_) => Ok(()),
|
||||
InstanceDef::VTableShim(_) => write!(f, " - shim(vtable)"),
|
||||
InstanceDef::ReifyShim(_) => write!(f, " - shim(reify)"),
|
||||
InstanceDef::ReifyShim(_, None) => write!(f, " - shim(reify)"),
|
||||
InstanceDef::ReifyShim(_, Some(ReifyReason::FnPtr)) => write!(f, " - shim(reify-fnptr)"),
|
||||
InstanceDef::ReifyShim(_, Some(ReifyReason::Vtable)) => write!(f, " - shim(reify-vtable)"),
|
||||
InstanceDef::ThreadLocalShim(_) => write!(f, " - shim(tls)"),
|
||||
InstanceDef::Intrinsic(_) => write!(f, " - intrinsic"),
|
||||
InstanceDef::Virtual(_, num) => write!(f, " - virtual#{num}"),
|
||||
@ -476,15 +506,34 @@ impl<'tcx> Instance<'tcx> {
|
||||
debug!("resolve(def_id={:?}, args={:?})", def_id, args);
|
||||
// Use either `resolve_closure` or `resolve_for_vtable`
|
||||
assert!(!tcx.is_closure_like(def_id), "Called `resolve_for_fn_ptr` on closure: {def_id:?}");
|
||||
let reason = tcx.sess.is_sanitizer_kcfi_enabled().then_some(ReifyReason::FnPtr);
|
||||
Instance::resolve(tcx, param_env, def_id, args).ok().flatten().map(|mut resolved| {
|
||||
match resolved.def {
|
||||
InstanceDef::Item(def) if resolved.def.requires_caller_location(tcx) => {
|
||||
debug!(" => fn pointer created for function with #[track_caller]");
|
||||
resolved.def = InstanceDef::ReifyShim(def);
|
||||
resolved.def = InstanceDef::ReifyShim(def, reason);
|
||||
}
|
||||
InstanceDef::Virtual(def_id, _) => {
|
||||
debug!(" => fn pointer created for virtual call");
|
||||
resolved.def = InstanceDef::ReifyShim(def_id);
|
||||
resolved.def = InstanceDef::ReifyShim(def_id, reason);
|
||||
}
|
||||
// Reify `Trait::method` implementations if KCFI is enabled
|
||||
// FIXME(maurer) only reify it if it is a vtable-safe function
|
||||
_ if tcx.sess.is_sanitizer_kcfi_enabled()
|
||||
&& tcx.associated_item(def_id).trait_item_def_id.is_some() =>
|
||||
{
|
||||
// If this function could also go in a vtable, we need to `ReifyShim` it with
|
||||
// KCFI because it can only attach one type per function.
|
||||
resolved.def = InstanceDef::ReifyShim(resolved.def_id(), reason)
|
||||
}
|
||||
// Reify `::call`-like method implementations if KCFI is enabled
|
||||
_ if tcx.sess.is_sanitizer_kcfi_enabled()
|
||||
&& tcx.is_closure_like(resolved.def_id()) =>
|
||||
{
|
||||
// Reroute through a reify via the *unresolved* instance. The resolved one can't
|
||||
// be directly reified because it's closure-like. The reify can handle the
|
||||
// unresolved instance.
|
||||
resolved = Instance { def: InstanceDef::ReifyShim(def_id, reason), args }
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@ -508,6 +557,7 @@ impl<'tcx> Instance<'tcx> {
|
||||
debug!(" => associated item with unsizeable self: Self");
|
||||
Some(Instance { def: InstanceDef::VTableShim(def_id), args })
|
||||
} else {
|
||||
let reason = tcx.sess.is_sanitizer_kcfi_enabled().then_some(ReifyReason::Vtable);
|
||||
Instance::resolve(tcx, param_env, def_id, args).ok().flatten().map(|mut resolved| {
|
||||
match resolved.def {
|
||||
InstanceDef::Item(def) => {
|
||||
@ -544,18 +594,18 @@ impl<'tcx> Instance<'tcx> {
|
||||
// Create a shim for the `FnOnce/FnMut/Fn` method we are calling
|
||||
// - unlike functions, invoking a closure always goes through a
|
||||
// trait.
|
||||
resolved = Instance { def: InstanceDef::ReifyShim(def_id), args };
|
||||
resolved = Instance { def: InstanceDef::ReifyShim(def_id, reason), args };
|
||||
} else {
|
||||
debug!(
|
||||
" => vtable fn pointer created for function with #[track_caller]: {:?}", def
|
||||
);
|
||||
resolved.def = InstanceDef::ReifyShim(def);
|
||||
resolved.def = InstanceDef::ReifyShim(def, reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
InstanceDef::Virtual(def_id, _) => {
|
||||
debug!(" => vtable fn pointer created for virtual call");
|
||||
resolved.def = InstanceDef::ReifyShim(def_id);
|
||||
resolved.def = InstanceDef::ReifyShim(def_id, reason)
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ pub use self::context::{
|
||||
tls, CtxtInterners, CurrentGcx, DeducedParamAttrs, Feed, FreeRegionInfo, GlobalCtxt, Lift,
|
||||
TyCtxt, TyCtxtFeed,
|
||||
};
|
||||
pub use self::instance::{Instance, InstanceDef, ShortInstance, UnusedGenericParams};
|
||||
pub use self::instance::{Instance, InstanceDef, ReifyReason, ShortInstance, UnusedGenericParams};
|
||||
pub use self::list::List;
|
||||
pub use self::parameterized::ParameterizedOverTcx;
|
||||
pub use self::predicate::{
|
||||
|
@ -49,7 +49,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
let value = self.erase_regions(value);
|
||||
debug!(?value);
|
||||
|
||||
if !value.has_projections() {
|
||||
if !value.has_aliases() {
|
||||
value
|
||||
} else {
|
||||
value.fold_with(&mut NormalizeAfterErasingRegionsFolder { tcx: self, param_env })
|
||||
@ -81,7 +81,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
let value = self.erase_regions(value);
|
||||
debug!(?value);
|
||||
|
||||
if !value.has_projections() {
|
||||
if !value.has_aliases() {
|
||||
Ok(value)
|
||||
} else {
|
||||
let mut folder = TryNormalizeAfterErasingRegionsFolder::new(self, param_env);
|
||||
|
@ -449,6 +449,7 @@ TrivialTypeTraversalAndLiftImpls! {
|
||||
crate::ty::ClosureKind,
|
||||
crate::ty::ParamConst,
|
||||
crate::ty::ParamTy,
|
||||
crate::ty::instance::ReifyReason,
|
||||
interpret::AllocId,
|
||||
interpret::CtfeProvenance,
|
||||
interpret::Scalar,
|
||||
|
@ -96,6 +96,10 @@ pub struct TypeckResults<'tcx> {
|
||||
/// <https://github.com/rust-lang/rfcs/blob/master/text/2005-match-ergonomics.md#definitions>
|
||||
pat_adjustments: ItemLocalMap<Vec<Ty<'tcx>>>,
|
||||
|
||||
/// Set of reference patterns that match against a match-ergonomics inserted reference
|
||||
/// (as opposed to against a reference in the scrutinee type).
|
||||
skipped_ref_pats: ItemLocalSet,
|
||||
|
||||
/// Records the reasons that we picked the kind of each closure;
|
||||
/// not all closures are present in the map.
|
||||
closure_kind_origins: ItemLocalMap<(Span, HirPlace<'tcx>)>,
|
||||
@ -228,6 +232,7 @@ impl<'tcx> TypeckResults<'tcx> {
|
||||
adjustments: Default::default(),
|
||||
pat_binding_modes: Default::default(),
|
||||
pat_adjustments: Default::default(),
|
||||
skipped_ref_pats: Default::default(),
|
||||
closure_kind_origins: Default::default(),
|
||||
liberated_fn_sigs: Default::default(),
|
||||
fru_field_types: Default::default(),
|
||||
@ -435,6 +440,14 @@ impl<'tcx> TypeckResults<'tcx> {
|
||||
LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_adjustments }
|
||||
}
|
||||
|
||||
pub fn skipped_ref_pats(&self) -> LocalSetInContext<'_> {
|
||||
LocalSetInContext { hir_owner: self.hir_owner, data: &self.skipped_ref_pats }
|
||||
}
|
||||
|
||||
pub fn skipped_ref_pats_mut(&mut self) -> LocalSetInContextMut<'_> {
|
||||
LocalSetInContextMut { hir_owner: self.hir_owner, data: &mut self.skipped_ref_pats }
|
||||
}
|
||||
|
||||
/// Does the pattern recursively contain a `ref mut` binding in it?
|
||||
///
|
||||
/// This is used to determined whether a `deref` pattern should emit a `Deref`
|
||||
@ -629,6 +642,49 @@ impl<'a, V> LocalTableInContextMut<'a, V> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct LocalSetInContext<'a> {
|
||||
hir_owner: OwnerId,
|
||||
data: &'a ItemLocalSet,
|
||||
}
|
||||
|
||||
impl<'a> LocalSetInContext<'a> {
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.data.is_empty()
|
||||
}
|
||||
|
||||
pub fn contains(&self, id: hir::HirId) -> bool {
|
||||
validate_hir_id_for_typeck_results(self.hir_owner, id);
|
||||
self.data.contains(&id.local_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LocalSetInContextMut<'a> {
|
||||
hir_owner: OwnerId,
|
||||
data: &'a mut ItemLocalSet,
|
||||
}
|
||||
|
||||
impl<'a> LocalSetInContextMut<'a> {
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.data.is_empty()
|
||||
}
|
||||
|
||||
pub fn contains(&self, id: hir::HirId) -> bool {
|
||||
validate_hir_id_for_typeck_results(self.hir_owner, id);
|
||||
self.data.contains(&id.local_id)
|
||||
}
|
||||
pub fn insert(&mut self, id: hir::HirId) -> bool {
|
||||
validate_hir_id_for_typeck_results(self.hir_owner, id);
|
||||
self.data.insert(id.local_id)
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, id: hir::HirId) -> bool {
|
||||
validate_hir_id_for_typeck_results(self.hir_owner, id);
|
||||
self.data.remove(&id.local_id)
|
||||
}
|
||||
}
|
||||
|
||||
rustc_index::newtype_index! {
|
||||
#[derive(HashStable)]
|
||||
#[encodable]
|
||||
|
@ -214,12 +214,77 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
///
|
||||
/// ## False edges
|
||||
///
|
||||
/// We don't want to have the exact structure of the decision tree be
|
||||
/// visible through borrow checking. False edges ensure that the CFG as
|
||||
/// seen by borrow checking doesn't encode this. False edges are added:
|
||||
/// We don't want to have the exact structure of the decision tree be visible through borrow
|
||||
/// checking. Specifically we want borrowck to think that:
|
||||
/// - at any point, any or none of the patterns and guards seen so far may have been tested;
|
||||
/// - after the match, any of the patterns may have matched.
|
||||
///
|
||||
/// * From each pre-binding block to the next pre-binding block.
|
||||
/// * From each otherwise block to the next pre-binding block.
|
||||
/// For example, all of these would fail to error if borrowck could see the real CFG (examples
|
||||
/// taken from `tests/ui/nll/match-cfg-fake-edges.rs`):
|
||||
/// ```ignore (too many errors, this is already in the test suite)
|
||||
/// let x = String::new();
|
||||
/// let _ = match true {
|
||||
/// _ => {},
|
||||
/// _ => drop(x),
|
||||
/// };
|
||||
/// // Borrowck must not know the second arm is never run.
|
||||
/// drop(x); //~ ERROR use of moved value
|
||||
///
|
||||
/// let x;
|
||||
/// # let y = true;
|
||||
/// match y {
|
||||
/// _ if { x = 2; true } => {},
|
||||
/// // Borrowck must not know the guard is always run.
|
||||
/// _ => drop(x), //~ ERROR used binding `x` is possibly-uninitialized
|
||||
/// };
|
||||
///
|
||||
/// let x = String::new();
|
||||
/// # let y = true;
|
||||
/// match y {
|
||||
/// false if { drop(x); true } => {},
|
||||
/// // Borrowck must not know the guard is not run in the `true` case.
|
||||
/// true => drop(x), //~ ERROR use of moved value: `x`
|
||||
/// false => {},
|
||||
/// };
|
||||
///
|
||||
/// # let mut y = (true, true);
|
||||
/// let r = &mut y.1;
|
||||
/// match y {
|
||||
/// //~^ ERROR cannot use `y.1` because it was mutably borrowed
|
||||
/// (false, true) => {}
|
||||
/// // Borrowck must not know we don't test `y.1` when `y.0` is `true`.
|
||||
/// (true, _) => drop(r),
|
||||
/// (false, _) => {}
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// We add false edges to act as if we were naively matching each arm in order. What we need is
|
||||
/// a (fake) path from each candidate to the next, specifically from candidate C's pre-binding
|
||||
/// block to next candidate D's pre-binding block. For maximum precision (needed for deref
|
||||
/// patterns), we choose the earliest node on D's success path that doesn't also lead to C (to
|
||||
/// avoid loops).
|
||||
///
|
||||
/// This turns out to be easy to compute: that block is the `start_block` of the first call to
|
||||
/// `match_candidates` where D is the first candidate in the list.
|
||||
///
|
||||
/// For example:
|
||||
/// ```rust
|
||||
/// # let (x, y) = (true, true);
|
||||
/// match (x, y) {
|
||||
/// (true, true) => 1,
|
||||
/// (false, true) => 2,
|
||||
/// (true, false) => 3,
|
||||
/// _ => 4,
|
||||
/// }
|
||||
/// # ;
|
||||
/// ```
|
||||
/// In this example, the pre-binding block of arm 1 has a false edge to the block for result
|
||||
/// `false` of the first test on `x`. The other arms have false edges to the pre-binding blocks
|
||||
/// of the next arm.
|
||||
///
|
||||
/// On top of this, we also add a false edge from the otherwise_block of each guard to the
|
||||
/// aforementioned start block of the next candidate, to ensure borrock doesn't rely on which
|
||||
/// guards may have run.
|
||||
#[instrument(level = "debug", skip(self, arms))]
|
||||
pub(crate) fn match_expr(
|
||||
&mut self,
|
||||
@ -365,7 +430,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
for candidate in candidates {
|
||||
candidate.visit_leaves(|leaf_candidate| {
|
||||
if let Some(ref mut prev) = previous_candidate {
|
||||
prev.next_candidate_pre_binding_block = leaf_candidate.pre_binding_block;
|
||||
assert!(leaf_candidate.false_edge_start_block.is_some());
|
||||
prev.next_candidate_start_block = leaf_candidate.false_edge_start_block;
|
||||
}
|
||||
previous_candidate = Some(leaf_candidate);
|
||||
});
|
||||
@ -1010,8 +1076,12 @@ struct Candidate<'pat, 'tcx> {
|
||||
|
||||
/// The block before the `bindings` have been established.
|
||||
pre_binding_block: Option<BasicBlock>,
|
||||
/// The pre-binding block of the next candidate.
|
||||
next_candidate_pre_binding_block: Option<BasicBlock>,
|
||||
|
||||
/// The earliest block that has only candidates >= this one as descendents. Used for false
|
||||
/// edges, see the doc for [`Builder::match_expr`].
|
||||
false_edge_start_block: Option<BasicBlock>,
|
||||
/// The `false_edge_start_block` of the next candidate.
|
||||
next_candidate_start_block: Option<BasicBlock>,
|
||||
}
|
||||
|
||||
impl<'tcx, 'pat> Candidate<'pat, 'tcx> {
|
||||
@ -1033,7 +1103,8 @@ impl<'tcx, 'pat> Candidate<'pat, 'tcx> {
|
||||
or_span: None,
|
||||
otherwise_block: None,
|
||||
pre_binding_block: None,
|
||||
next_candidate_pre_binding_block: None,
|
||||
false_edge_start_block: None,
|
||||
next_candidate_start_block: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1325,6 +1396,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
otherwise_block: BasicBlock,
|
||||
candidates: &mut [&mut Candidate<'_, 'tcx>],
|
||||
) {
|
||||
if let [first, ..] = candidates {
|
||||
if first.false_edge_start_block.is_none() {
|
||||
first.false_edge_start_block = Some(start_block);
|
||||
}
|
||||
}
|
||||
|
||||
match candidates {
|
||||
[] => {
|
||||
// If there are no candidates that still need testing, we're done. Since all matches are
|
||||
@ -1545,6 +1622,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
.into_iter()
|
||||
.map(|flat_pat| Candidate::from_flat_pat(flat_pat, candidate.has_guard))
|
||||
.collect();
|
||||
candidate.subcandidates[0].false_edge_start_block = candidate.false_edge_start_block;
|
||||
}
|
||||
|
||||
/// Try to merge all of the subcandidates of the given candidate into one. This avoids
|
||||
@ -1564,6 +1642,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
let any_matches = self.cfg.start_new_block();
|
||||
let or_span = candidate.or_span.take().unwrap();
|
||||
let source_info = self.source_info(or_span);
|
||||
if candidate.false_edge_start_block.is_none() {
|
||||
candidate.false_edge_start_block =
|
||||
candidate.subcandidates[0].false_edge_start_block;
|
||||
}
|
||||
for subcandidate in mem::take(&mut candidate.subcandidates) {
|
||||
let or_block = subcandidate.pre_binding_block.unwrap();
|
||||
self.cfg.goto(or_block, source_info, any_matches);
|
||||
@ -1979,12 +2061,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
|
||||
let mut block = candidate.pre_binding_block.unwrap();
|
||||
|
||||
if candidate.next_candidate_pre_binding_block.is_some() {
|
||||
if candidate.next_candidate_start_block.is_some() {
|
||||
let fresh_block = self.cfg.start_new_block();
|
||||
self.false_edges(
|
||||
block,
|
||||
fresh_block,
|
||||
candidate.next_candidate_pre_binding_block,
|
||||
candidate.next_candidate_start_block,
|
||||
candidate_source_info,
|
||||
);
|
||||
block = fresh_block;
|
||||
@ -2132,7 +2214,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
self.false_edges(
|
||||
otherwise_post_guard_block,
|
||||
otherwise_block,
|
||||
candidate.next_candidate_pre_binding_block,
|
||||
candidate.next_candidate_start_block,
|
||||
source_info,
|
||||
);
|
||||
|
||||
|
@ -65,7 +65,14 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
|
||||
// we wrap the unadjusted pattern in `PatKind::Deref` repeatedly, consuming the
|
||||
// adjustments in *reverse order* (last-in-first-out, so that the last `Deref` inserted
|
||||
// gets the least-dereferenced type).
|
||||
let unadjusted_pat = self.lower_pattern_unadjusted(pat);
|
||||
let unadjusted_pat = match pat.kind {
|
||||
hir::PatKind::Ref(inner, _)
|
||||
if self.typeck_results.skipped_ref_pats().contains(pat.hir_id) =>
|
||||
{
|
||||
self.lower_pattern_unadjusted(inner)
|
||||
}
|
||||
_ => self.lower_pattern_unadjusted(pat),
|
||||
};
|
||||
self.typeck_results.pat_adjustments().get(pat.hir_id).unwrap_or(&vec![]).iter().rev().fold(
|
||||
unadjusted_pat,
|
||||
|pat: Box<_>, ref_ty| {
|
||||
|
@ -91,15 +91,17 @@ impl<'tcx> MirPass<'tcx> for ByMoveBody {
|
||||
return;
|
||||
}
|
||||
|
||||
let ty::Coroutine(_, coroutine_args) = *coroutine_ty.kind() else { bug!("{body:#?}") };
|
||||
// We don't need to generate a by-move coroutine if the kind of the coroutine is
|
||||
// already `FnOnce` -- that means that any upvars that the closure consumes have
|
||||
// already been taken by-value.
|
||||
let coroutine_kind = coroutine_args.as_coroutine().kind_ty().to_opt_closure_kind().unwrap();
|
||||
if coroutine_kind == ty::ClosureKind::FnOnce {
|
||||
// We don't need to generate a by-move coroutine if the coroutine body was
|
||||
// produced by the `CoroutineKindShim`, since it's already by-move.
|
||||
if matches!(body.source.instance, ty::InstanceDef::CoroutineKindShim { .. }) {
|
||||
return;
|
||||
}
|
||||
|
||||
let ty::Coroutine(_, args) = *coroutine_ty.kind() else { bug!("{body:#?}") };
|
||||
let args = args.as_coroutine();
|
||||
|
||||
let coroutine_kind = args.kind_ty().to_opt_closure_kind().unwrap();
|
||||
|
||||
let parent_def_id = tcx.local_parent(coroutine_def_id);
|
||||
let ty::CoroutineClosure(_, parent_args) =
|
||||
*tcx.type_of(parent_def_id).instantiate_identity().kind()
|
||||
@ -128,6 +130,12 @@ impl<'tcx> MirPass<'tcx> for ByMoveBody {
|
||||
// the outer closure body -- we need to change the coroutine to take the
|
||||
// upvar by value.
|
||||
if coroutine_capture.is_by_ref() && !parent_capture.is_by_ref() {
|
||||
assert_ne!(
|
||||
coroutine_kind,
|
||||
ty::ClosureKind::FnOnce,
|
||||
"`FnOnce` coroutine-closures return coroutines that capture from \
|
||||
their body; it will always result in a borrowck error!"
|
||||
);
|
||||
by_ref_fields.insert(FieldIdx::from_usize(num_args + idx));
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@ fn coverage_ids_info<'tcx>(
|
||||
_ => None,
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(CounterId::START);
|
||||
.unwrap_or(CounterId::ZERO);
|
||||
|
||||
CoverageIdsInfo { max_counter_id }
|
||||
}
|
||||
|
@ -324,7 +324,7 @@ impl<'tcx> Inliner<'tcx> {
|
||||
// do not need to catch this here, we can wait until the inliner decides to continue
|
||||
// inlining a second time.
|
||||
InstanceDef::VTableShim(_)
|
||||
| InstanceDef::ReifyShim(_)
|
||||
| InstanceDef::ReifyShim(..)
|
||||
| InstanceDef::FnPtrShim(..)
|
||||
| InstanceDef::ClosureOnceShim { .. }
|
||||
| InstanceDef::ConstructCoroutineInClosureShim { .. }
|
||||
@ -1077,7 +1077,7 @@ fn try_instance_mir<'tcx>(
|
||||
let fields = def.all_fields();
|
||||
for field in fields {
|
||||
let field_ty = field.ty(tcx, args);
|
||||
if field_ty.has_param() && field_ty.has_projections() {
|
||||
if field_ty.has_param() && field_ty.has_aliases() {
|
||||
return Err("cannot build drop shim for polymorphic type");
|
||||
}
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
|
||||
// again, a function item can end up getting inlined. Thus we'll be able to cause
|
||||
// a cycle that way
|
||||
InstanceDef::VTableShim(_)
|
||||
| InstanceDef::ReifyShim(_)
|
||||
| InstanceDef::ReifyShim(..)
|
||||
| InstanceDef::FnPtrShim(..)
|
||||
| InstanceDef::ClosureOnceShim { .. }
|
||||
| InstanceDef::ConstructCoroutineInClosureShim { .. }
|
||||
|
@ -55,7 +55,7 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'
|
||||
// a virtual call, or a direct call to a function for which
|
||||
// indirect calls must be codegen'd differently than direct ones
|
||||
// (such as `#[track_caller]`).
|
||||
ty::InstanceDef::ReifyShim(def_id) => {
|
||||
ty::InstanceDef::ReifyShim(def_id, _) => {
|
||||
build_call_shim(tcx, instance, None, CallKind::Direct(def_id))
|
||||
}
|
||||
ty::InstanceDef::ClosureOnceShim { call_once: _, track_caller: _ } => {
|
||||
|
@ -13,6 +13,7 @@ use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::query::DepGraphQuery;
|
||||
use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
|
||||
@ -81,7 +82,7 @@ pub(crate) struct DepGraphData<D: Deps> {
|
||||
|
||||
/// The dep-graph from the previous compilation session. It contains all
|
||||
/// nodes and edges as well as all fingerprints of nodes that have them.
|
||||
previous: SerializedDepGraph,
|
||||
previous: Arc<SerializedDepGraph>,
|
||||
|
||||
colors: DepNodeColorMap,
|
||||
|
||||
@ -113,7 +114,7 @@ where
|
||||
impl<D: Deps> DepGraph<D> {
|
||||
pub fn new(
|
||||
profiler: &SelfProfilerRef,
|
||||
prev_graph: SerializedDepGraph,
|
||||
prev_graph: Arc<SerializedDepGraph>,
|
||||
prev_work_products: WorkProductMap,
|
||||
encoder: FileEncoder,
|
||||
record_graph: bool,
|
||||
@ -127,6 +128,7 @@ impl<D: Deps> DepGraph<D> {
|
||||
encoder,
|
||||
record_graph,
|
||||
record_stats,
|
||||
prev_graph.clone(),
|
||||
);
|
||||
|
||||
let colors = DepNodeColorMap::new(prev_graph_node_count);
|
||||
@ -1084,6 +1086,7 @@ impl<D: Deps> CurrentDepGraph<D> {
|
||||
encoder: FileEncoder,
|
||||
record_graph: bool,
|
||||
record_stats: bool,
|
||||
previous: Arc<SerializedDepGraph>,
|
||||
) -> Self {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
@ -1116,6 +1119,7 @@ impl<D: Deps> CurrentDepGraph<D> {
|
||||
record_graph,
|
||||
record_stats,
|
||||
profiler,
|
||||
previous,
|
||||
),
|
||||
new_node_to_index: Sharded::new(|| {
|
||||
FxHashMap::with_capacity_and_hasher(
|
||||
@ -1236,16 +1240,14 @@ impl<D: Deps> CurrentDepGraph<D> {
|
||||
match prev_index_to_index[prev_index] {
|
||||
Some(dep_node_index) => dep_node_index,
|
||||
None => {
|
||||
let key = prev_graph.index_to_node(prev_index);
|
||||
let edges = prev_graph
|
||||
.edge_targets_from(prev_index)
|
||||
.map(|i| prev_index_to_index[i].unwrap())
|
||||
.collect();
|
||||
let fingerprint = prev_graph.fingerprint_by_index(prev_index);
|
||||
let dep_node_index = self.encoder.send(key, fingerprint, edges);
|
||||
let dep_node_index = self.encoder.send_promoted(prev_index, &*prev_index_to_index);
|
||||
prev_index_to_index[prev_index] = Some(dep_node_index);
|
||||
#[cfg(debug_assertions)]
|
||||
self.record_edge(dep_node_index, key, fingerprint);
|
||||
self.record_edge(
|
||||
dep_node_index,
|
||||
prev_graph.index_to_node(prev_index),
|
||||
prev_graph.fingerprint_by_index(prev_index),
|
||||
);
|
||||
dep_node_index
|
||||
}
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ use crate::dep_graph::edges::EdgesVec;
|
||||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
use rustc_data_structures::fingerprint::PackedFingerprint;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::outline;
|
||||
use rustc_data_structures::profiling::SelfProfilerRef;
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use rustc_data_structures::unhash::UnhashMap;
|
||||
@ -49,6 +50,7 @@ use rustc_serialize::opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixed
|
||||
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
|
||||
use std::iter;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
|
||||
// unused so that we can store multiple index types in `CompressedHybridIndex`,
|
||||
@ -94,7 +96,7 @@ impl SerializedDepGraph {
|
||||
pub fn edge_targets_from(
|
||||
&self,
|
||||
source: SerializedDepNodeIndex,
|
||||
) -> impl Iterator<Item = SerializedDepNodeIndex> + '_ {
|
||||
) -> impl Iterator<Item = SerializedDepNodeIndex> + Clone + '_ {
|
||||
let header = self.edge_list_indices[source];
|
||||
let mut raw = &self.edge_list_data[header.start()..];
|
||||
// Figure out where the edge list for `source` ends by getting the start index of the next
|
||||
@ -176,7 +178,7 @@ fn mask(bits: usize) -> usize {
|
||||
|
||||
impl SerializedDepGraph {
|
||||
#[instrument(level = "debug", skip(d))]
|
||||
pub fn decode<D: Deps>(d: &mut MemDecoder<'_>) -> SerializedDepGraph {
|
||||
pub fn decode<D: Deps>(d: &mut MemDecoder<'_>) -> Arc<SerializedDepGraph> {
|
||||
// The last 16 bytes are the node count and edge count.
|
||||
debug!("position: {:?}", d.position());
|
||||
let (node_count, edge_count, graph_size) =
|
||||
@ -254,7 +256,13 @@ impl SerializedDepGraph {
|
||||
index[node.kind.as_usize()].insert(node.hash, idx);
|
||||
}
|
||||
|
||||
SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data, index }
|
||||
Arc::new(SerializedDepGraph {
|
||||
nodes,
|
||||
fingerprints,
|
||||
edge_list_indices,
|
||||
edge_list_data,
|
||||
index,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -299,21 +307,24 @@ impl<D: Deps> SerializedNodeHeader<D> {
|
||||
const MAX_INLINE_LEN: usize = (u16::MAX as usize >> (Self::TOTAL_BITS - Self::LEN_BITS)) - 1;
|
||||
|
||||
#[inline]
|
||||
fn new(node_info: &NodeInfo) -> Self {
|
||||
fn new(
|
||||
node: DepNode,
|
||||
fingerprint: Fingerprint,
|
||||
edge_max_index: u32,
|
||||
edge_count: usize,
|
||||
) -> Self {
|
||||
debug_assert_eq!(Self::TOTAL_BITS, Self::LEN_BITS + Self::WIDTH_BITS + Self::KIND_BITS);
|
||||
|
||||
let NodeInfo { node, fingerprint, edges } = node_info;
|
||||
|
||||
let mut head = node.kind.as_inner();
|
||||
|
||||
let free_bytes = edges.max_index().leading_zeros() as usize / 8;
|
||||
let free_bytes = edge_max_index.leading_zeros() as usize / 8;
|
||||
let bytes_per_index = (DEP_NODE_SIZE - free_bytes).saturating_sub(1);
|
||||
head |= (bytes_per_index as u16) << Self::KIND_BITS;
|
||||
|
||||
// Encode number of edges + 1 so that we can reserve 0 to indicate that the len doesn't fit
|
||||
// in this bitfield.
|
||||
if edges.len() <= Self::MAX_INLINE_LEN {
|
||||
head |= (edges.len() as u16 + 1) << (Self::KIND_BITS + Self::WIDTH_BITS);
|
||||
if edge_count <= Self::MAX_INLINE_LEN {
|
||||
head |= (edge_count as u16 + 1) << (Self::KIND_BITS + Self::WIDTH_BITS);
|
||||
}
|
||||
|
||||
let hash: Fingerprint = node.hash.into();
|
||||
@ -327,10 +338,10 @@ impl<D: Deps> SerializedNodeHeader<D> {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let res = Self { bytes, _marker: PhantomData };
|
||||
assert_eq!(node_info.fingerprint, res.fingerprint());
|
||||
assert_eq!(node_info.node, res.node());
|
||||
assert_eq!(fingerprint, res.fingerprint());
|
||||
assert_eq!(node, res.node());
|
||||
if let Some(len) = res.len() {
|
||||
assert_eq!(node_info.edges.len(), len);
|
||||
assert_eq!(edge_count, len);
|
||||
}
|
||||
}
|
||||
Self { bytes, _marker: PhantomData }
|
||||
@ -393,21 +404,61 @@ struct NodeInfo {
|
||||
|
||||
impl NodeInfo {
|
||||
fn encode<D: Deps>(&self, e: &mut FileEncoder) {
|
||||
let header = SerializedNodeHeader::<D>::new(self);
|
||||
let NodeInfo { node, fingerprint, ref edges } = *self;
|
||||
let header =
|
||||
SerializedNodeHeader::<D>::new(node, fingerprint, edges.max_index(), edges.len());
|
||||
e.write_array(header.bytes);
|
||||
|
||||
if header.len().is_none() {
|
||||
e.emit_usize(self.edges.len());
|
||||
e.emit_usize(edges.len());
|
||||
}
|
||||
|
||||
let bytes_per_index = header.bytes_per_index();
|
||||
for node_index in self.edges.iter() {
|
||||
for node_index in edges.iter() {
|
||||
e.write_with(|dest| {
|
||||
*dest = node_index.as_u32().to_le_bytes();
|
||||
bytes_per_index
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode a node that was promoted from the previous graph. It reads the edges directly from
|
||||
/// the previous dep graph and expects all edges to already have a new dep node index assigned.
|
||||
/// This avoids the overhead of constructing `EdgesVec`, which would be needed to call `encode`.
|
||||
#[inline]
|
||||
fn encode_promoted<D: Deps>(
|
||||
e: &mut FileEncoder,
|
||||
node: DepNode,
|
||||
fingerprint: Fingerprint,
|
||||
prev_index: SerializedDepNodeIndex,
|
||||
prev_index_to_index: &IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>,
|
||||
previous: &SerializedDepGraph,
|
||||
) -> usize {
|
||||
let edges = previous.edge_targets_from(prev_index);
|
||||
let edge_count = edges.size_hint().0;
|
||||
|
||||
// Find the highest edge in the new dep node indices
|
||||
let edge_max =
|
||||
edges.clone().map(|i| prev_index_to_index[i].unwrap().as_u32()).max().unwrap_or(0);
|
||||
|
||||
let header = SerializedNodeHeader::<D>::new(node, fingerprint, edge_max, edge_count);
|
||||
e.write_array(header.bytes);
|
||||
|
||||
if header.len().is_none() {
|
||||
e.emit_usize(edge_count);
|
||||
}
|
||||
|
||||
let bytes_per_index = header.bytes_per_index();
|
||||
for node_index in edges {
|
||||
let node_index = prev_index_to_index[node_index].unwrap();
|
||||
e.write_with(|dest| {
|
||||
*dest = node_index.as_u32().to_le_bytes();
|
||||
bytes_per_index
|
||||
});
|
||||
}
|
||||
|
||||
edge_count
|
||||
}
|
||||
}
|
||||
|
||||
struct Stat {
|
||||
@ -417,6 +468,7 @@ struct Stat {
|
||||
}
|
||||
|
||||
struct EncoderState<D: Deps> {
|
||||
previous: Arc<SerializedDepGraph>,
|
||||
encoder: FileEncoder,
|
||||
total_node_count: usize,
|
||||
total_edge_count: usize,
|
||||
@ -428,8 +480,9 @@ struct EncoderState<D: Deps> {
|
||||
}
|
||||
|
||||
impl<D: Deps> EncoderState<D> {
|
||||
fn new(encoder: FileEncoder, record_stats: bool) -> Self {
|
||||
fn new(encoder: FileEncoder, record_stats: bool, previous: Arc<SerializedDepGraph>) -> Self {
|
||||
Self {
|
||||
previous,
|
||||
encoder,
|
||||
total_edge_count: 0,
|
||||
total_node_count: 0,
|
||||
@ -439,36 +492,99 @@ impl<D: Deps> EncoderState<D> {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn record(
|
||||
&mut self,
|
||||
node: DepNode,
|
||||
edge_count: usize,
|
||||
edges: impl FnOnce(&mut Self) -> Vec<DepNodeIndex>,
|
||||
record_graph: &Option<Lock<DepGraphQuery>>,
|
||||
) -> DepNodeIndex {
|
||||
let index = DepNodeIndex::new(self.total_node_count);
|
||||
|
||||
self.total_node_count += 1;
|
||||
self.kind_stats[node.kind.as_usize()] += 1;
|
||||
self.total_edge_count += edge_count;
|
||||
|
||||
if let Some(record_graph) = &record_graph {
|
||||
// Call `edges` before the outlined code to allow the closure to be optimized out.
|
||||
let edges = edges(self);
|
||||
|
||||
// Outline the build of the full dep graph as it's typically disabled and cold.
|
||||
outline(move || {
|
||||
// Do not ICE when a query is called from within `with_query`.
|
||||
if let Some(record_graph) = &mut record_graph.try_lock() {
|
||||
record_graph.push(index, node, &edges);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(stats) = &mut self.stats {
|
||||
let kind = node.kind;
|
||||
|
||||
// Outline the stats code as it's typically disabled and cold.
|
||||
outline(move || {
|
||||
let stat =
|
||||
stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 });
|
||||
stat.node_counter += 1;
|
||||
stat.edge_counter += edge_count as u64;
|
||||
});
|
||||
}
|
||||
|
||||
index
|
||||
}
|
||||
|
||||
/// Encodes a node to the current graph.
|
||||
fn encode_node(
|
||||
&mut self,
|
||||
node: &NodeInfo,
|
||||
record_graph: &Option<Lock<DepGraphQuery>>,
|
||||
) -> DepNodeIndex {
|
||||
let index = DepNodeIndex::new(self.total_node_count);
|
||||
self.total_node_count += 1;
|
||||
self.kind_stats[node.node.kind.as_usize()] += 1;
|
||||
node.encode::<D>(&mut self.encoder);
|
||||
self.record(
|
||||
node.node,
|
||||
node.edges.len(),
|
||||
|_| node.edges[..].iter().copied().collect(),
|
||||
record_graph,
|
||||
)
|
||||
}
|
||||
|
||||
let edge_count = node.edges.len();
|
||||
self.total_edge_count += edge_count;
|
||||
/// Encodes a node that was promoted from the previous graph. It reads the information directly from
|
||||
/// the previous dep graph for performance reasons.
|
||||
///
|
||||
/// This differs from `encode_node` where you have to explictly provide the relevant `NodeInfo`.
|
||||
///
|
||||
/// It expects all edges to already have a new dep node index assigned.
|
||||
#[inline]
|
||||
fn encode_promoted_node(
|
||||
&mut self,
|
||||
prev_index: SerializedDepNodeIndex,
|
||||
record_graph: &Option<Lock<DepGraphQuery>>,
|
||||
prev_index_to_index: &IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>,
|
||||
) -> DepNodeIndex {
|
||||
let node = self.previous.index_to_node(prev_index);
|
||||
|
||||
if let Some(record_graph) = &record_graph {
|
||||
// Do not ICE when a query is called from within `with_query`.
|
||||
if let Some(record_graph) = &mut record_graph.try_lock() {
|
||||
record_graph.push(index, node.node, &node.edges);
|
||||
}
|
||||
}
|
||||
let fingerprint = self.previous.fingerprint_by_index(prev_index);
|
||||
let edge_count = NodeInfo::encode_promoted::<D>(
|
||||
&mut self.encoder,
|
||||
node,
|
||||
fingerprint,
|
||||
prev_index,
|
||||
prev_index_to_index,
|
||||
&self.previous,
|
||||
);
|
||||
|
||||
if let Some(stats) = &mut self.stats {
|
||||
let kind = node.node.kind;
|
||||
|
||||
let stat = stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 });
|
||||
stat.node_counter += 1;
|
||||
stat.edge_counter += edge_count as u64;
|
||||
}
|
||||
|
||||
let encoder = &mut self.encoder;
|
||||
node.encode::<D>(encoder);
|
||||
index
|
||||
self.record(
|
||||
node,
|
||||
edge_count,
|
||||
|this| {
|
||||
this.previous
|
||||
.edge_targets_from(prev_index)
|
||||
.map(|i| prev_index_to_index[i].unwrap())
|
||||
.collect()
|
||||
},
|
||||
record_graph,
|
||||
)
|
||||
}
|
||||
|
||||
fn finish(self, profiler: &SelfProfilerRef) -> FileEncodeResult {
|
||||
@ -479,6 +595,7 @@ impl<D: Deps> EncoderState<D> {
|
||||
stats: _,
|
||||
kind_stats,
|
||||
marker: _,
|
||||
previous: _,
|
||||
} = self;
|
||||
|
||||
let node_count = total_node_count.try_into().unwrap();
|
||||
@ -520,9 +637,10 @@ impl<D: Deps> GraphEncoder<D> {
|
||||
record_graph: bool,
|
||||
record_stats: bool,
|
||||
profiler: &SelfProfilerRef,
|
||||
previous: Arc<SerializedDepGraph>,
|
||||
) -> Self {
|
||||
let record_graph = record_graph.then(|| Lock::new(DepGraphQuery::new(prev_node_count)));
|
||||
let status = Lock::new(Some(EncoderState::new(encoder, record_stats)));
|
||||
let status = Lock::new(Some(EncoderState::new(encoder, record_stats, previous)));
|
||||
GraphEncoder { status, record_graph, profiler: profiler.clone() }
|
||||
}
|
||||
|
||||
@ -596,6 +714,22 @@ impl<D: Deps> GraphEncoder<D> {
|
||||
self.status.lock().as_mut().unwrap().encode_node(&node, &self.record_graph)
|
||||
}
|
||||
|
||||
/// Encodes a node that was promoted from the previous graph. It reads the information directly from
|
||||
/// the previous dep graph and expects all edges to already have a new dep node index assigned.
|
||||
#[inline]
|
||||
pub(crate) fn send_promoted(
|
||||
&self,
|
||||
prev_index: SerializedDepNodeIndex,
|
||||
prev_index_to_index: &IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>,
|
||||
) -> DepNodeIndex {
|
||||
let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph");
|
||||
self.status.lock().as_mut().unwrap().encode_promoted_node(
|
||||
prev_index,
|
||||
&self.record_graph,
|
||||
prev_index_to_index,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn finish(&self) -> FileEncodeResult {
|
||||
let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph_finish");
|
||||
|
||||
|
@ -605,6 +605,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||
&& !this.tcx.features().f16
|
||||
&& !ident.span.allows_unstable(sym::f16)
|
||||
&& finalize.is_some()
|
||||
&& innermost_result.is_none()
|
||||
{
|
||||
feature_err(
|
||||
this.tcx.sess,
|
||||
@ -618,6 +619,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||
&& !this.tcx.features().f128
|
||||
&& !ident.span.allows_unstable(sym::f128)
|
||||
&& finalize.is_some()
|
||||
&& innermost_result.is_none()
|
||||
{
|
||||
feature_err(
|
||||
this.tcx.sess,
|
||||
|
@ -1456,6 +1456,7 @@ symbols! {
|
||||
receiver,
|
||||
recursion_limit,
|
||||
reexport_test_harness_main,
|
||||
ref_pat_everywhere,
|
||||
ref_unwind_safe_trait,
|
||||
reference,
|
||||
reflect,
|
||||
|
@ -2,7 +2,7 @@ use rustc_data_structures::stable_hasher::{Hash64, HashStable, StableHasher};
|
||||
use rustc_hir::def_id::CrateNum;
|
||||
use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
|
||||
use rustc_middle::ty::print::{PrettyPrinter, Print, PrintError, Printer};
|
||||
use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeVisitableExt};
|
||||
use rustc_middle::ty::{self, Instance, ReifyReason, Ty, TyCtxt, TypeVisitableExt};
|
||||
use rustc_middle::ty::{GenericArg, GenericArgKind};
|
||||
|
||||
use std::fmt::{self, Write};
|
||||
@ -71,8 +71,14 @@ pub(super) fn mangle<'tcx>(
|
||||
ty::InstanceDef::VTableShim(..) => {
|
||||
printer.write_str("{{vtable-shim}}").unwrap();
|
||||
}
|
||||
ty::InstanceDef::ReifyShim(..) => {
|
||||
printer.write_str("{{reify-shim}}").unwrap();
|
||||
ty::InstanceDef::ReifyShim(_, reason) => {
|
||||
printer.write_str("{{reify-shim").unwrap();
|
||||
match reason {
|
||||
Some(ReifyReason::FnPtr) => printer.write_str("-fnptr").unwrap(),
|
||||
Some(ReifyReason::Vtable) => printer.write_str("-vtable").unwrap(),
|
||||
None => (),
|
||||
}
|
||||
printer.write_str("}}").unwrap();
|
||||
}
|
||||
// FIXME(async_closures): This shouldn't be needed when we fix
|
||||
// `Instance::ty`/`Instance::def_id`.
|
||||
|
@ -4,7 +4,7 @@
|
||||
/// For more information about LLVM CFI and cross-language LLVM CFI support for the Rust compiler,
|
||||
/// see design document in the tracking issue #89653.
|
||||
use bitflags::bitflags;
|
||||
use rustc_middle::ty::{Instance, Ty, TyCtxt};
|
||||
use rustc_middle::ty::{Instance, InstanceDef, ReifyReason, Ty, TyCtxt};
|
||||
use rustc_target::abi::call::FnAbi;
|
||||
use std::hash::Hasher;
|
||||
use twox_hash::XxHash64;
|
||||
@ -26,7 +26,12 @@ bitflags! {
|
||||
const NORMALIZE_INTEGERS = 4;
|
||||
/// Do not perform self type erasure for attaching a secondary type id to methods with their
|
||||
/// concrete self so they can be used as function pointers.
|
||||
const NO_SELF_TYPE_ERASURE = 8;
|
||||
///
|
||||
/// (This applies to typeid_for_instance only and should be used to attach a secondary type
|
||||
/// id to methods during their declaration/definition so they match the type ids returned by
|
||||
/// either typeid_for_instance or typeid_for_fnabi at call sites during code generation for
|
||||
/// type membership tests when methods are used as function pointers.)
|
||||
const USE_CONCRETE_SELF = 8;
|
||||
}
|
||||
}
|
||||
|
||||
@ -67,8 +72,26 @@ pub fn kcfi_typeid_for_fnabi<'tcx>(
|
||||
pub fn kcfi_typeid_for_instance<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
instance: Instance<'tcx>,
|
||||
options: TypeIdOptions,
|
||||
mut options: TypeIdOptions,
|
||||
) -> u32 {
|
||||
// KCFI support for Rust shares most of its implementation with the CFI support, with some key
|
||||
// differences:
|
||||
//
|
||||
// 1. KCFI performs type tests differently and are implemented as different LLVM passes than CFI
|
||||
// to not require LTO.
|
||||
// 2. KCFI has the limitation that a function or method may have one type id assigned only.
|
||||
//
|
||||
// Because of the limitation listed above (2), the current KCFI implementation (not CFI) does
|
||||
// reifying of types (i.e., adds shims/trampolines for indirect calls in these cases) for:
|
||||
//
|
||||
// * Supporting casting between function items, closures, and Fn trait objects.
|
||||
// * Supporting methods being cast as function pointers.
|
||||
//
|
||||
// This was implemented for KCFI support in #123106 and #123052 (which introduced the
|
||||
// ReifyReason). The tracking issue for KCFI support for Rust is #123479.
|
||||
if matches!(instance.def, InstanceDef::ReifyShim(_, Some(ReifyReason::FnPtr))) {
|
||||
options.insert(TypeIdOptions::USE_CONCRETE_SELF);
|
||||
}
|
||||
// A KCFI type metadata identifier is a 32-bit constant produced by taking the lower half of the
|
||||
// xxHash64 of the type metadata identifier. (See llvm/llvm-project@cff5bef.)
|
||||
let mut hash: XxHash64 = Default::default();
|
||||
|
@ -11,13 +11,14 @@ use rustc_data_structures::base_n;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::lang_items::LangItem;
|
||||
use rustc_middle::ty::fold::{TypeFolder, TypeSuperFoldable};
|
||||
use rustc_middle::ty::layout::IntegerExt;
|
||||
use rustc_middle::ty::TypeVisitableExt;
|
||||
use rustc_middle::ty::{
|
||||
self, Const, ExistentialPredicate, FloatTy, FnSig, Instance, IntTy, List, Region, RegionKind,
|
||||
TermKind, Ty, TyCtxt, UintTy,
|
||||
};
|
||||
use rustc_middle::ty::{GenericArg, GenericArgKind, GenericArgsRef};
|
||||
use rustc_middle::ty::{TypeFoldable, TypeVisitableExt};
|
||||
use rustc_span::def_id::DefId;
|
||||
use rustc_span::sym;
|
||||
use rustc_target::abi::call::{Conv, FnAbi, PassMode};
|
||||
@ -182,14 +183,15 @@ fn encode_fnsig<'tcx>(
|
||||
// Encode the return type
|
||||
let transform_ty_options = TransformTyOptions::from_bits(options.bits())
|
||||
.unwrap_or_else(|| bug!("encode_fnsig: invalid option(s) `{:?}`", options.bits()));
|
||||
let ty = transform_ty(tcx, fn_sig.output(), &mut Vec::new(), transform_ty_options);
|
||||
let mut type_folder = TransformTy::new(tcx, transform_ty_options);
|
||||
let ty = fn_sig.output().fold_with(&mut type_folder);
|
||||
s.push_str(&encode_ty(tcx, ty, dict, encode_ty_options));
|
||||
|
||||
// Encode the parameter types
|
||||
let tys = fn_sig.inputs();
|
||||
if !tys.is_empty() {
|
||||
for ty in tys {
|
||||
let ty = transform_ty(tcx, *ty, &mut Vec::new(), transform_ty_options);
|
||||
let ty = ty.fold_with(&mut type_folder);
|
||||
s.push_str(&encode_ty(tcx, ty, dict, encode_ty_options));
|
||||
}
|
||||
|
||||
@ -523,15 +525,9 @@ fn encode_ty<'tcx>(
|
||||
|
||||
ty::Array(ty0, len) => {
|
||||
// A<array-length><element-type>
|
||||
let len = len.eval_target_usize(tcx, ty::ParamEnv::reveal_all());
|
||||
let mut s = String::from("A");
|
||||
let _ = write!(
|
||||
s,
|
||||
"{}",
|
||||
&len.try_to_scalar()
|
||||
.unwrap()
|
||||
.to_target_usize(&tcx.data_layout)
|
||||
.expect("Array lens are defined in usize")
|
||||
);
|
||||
let _ = write!(s, "{}", &len);
|
||||
s.push_str(&encode_ty(tcx, *ty0, dict, options));
|
||||
compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
|
||||
typeid.push_str(&s);
|
||||
@ -756,278 +752,208 @@ fn encode_ty<'tcx>(
|
||||
typeid
|
||||
}
|
||||
|
||||
/// Transforms predicates for being encoded and used in the substitution dictionary.
|
||||
fn transform_predicates<'tcx>(
|
||||
struct TransformTy<'tcx> {
|
||||
tcx: TyCtxt<'tcx>,
|
||||
predicates: &List<ty::PolyExistentialPredicate<'tcx>>,
|
||||
) -> &'tcx List<ty::PolyExistentialPredicate<'tcx>> {
|
||||
tcx.mk_poly_existential_predicates_from_iter(predicates.iter().filter_map(|predicate| {
|
||||
match predicate.skip_binder() {
|
||||
ty::ExistentialPredicate::Trait(trait_ref) => {
|
||||
let trait_ref = ty::TraitRef::identity(tcx, trait_ref.def_id);
|
||||
Some(ty::Binder::dummy(ty::ExistentialPredicate::Trait(
|
||||
ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref),
|
||||
)))
|
||||
}
|
||||
ty::ExistentialPredicate::Projection(..) => None,
|
||||
ty::ExistentialPredicate::AutoTrait(..) => Some(predicate),
|
||||
}
|
||||
}))
|
||||
options: TransformTyOptions,
|
||||
parents: Vec<Ty<'tcx>>,
|
||||
}
|
||||
|
||||
/// Transforms args for being encoded and used in the substitution dictionary.
|
||||
fn transform_args<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
args: GenericArgsRef<'tcx>,
|
||||
parents: &mut Vec<Ty<'tcx>>,
|
||||
options: TransformTyOptions,
|
||||
) -> GenericArgsRef<'tcx> {
|
||||
let args = args.iter().map(|arg| match arg.unpack() {
|
||||
GenericArgKind::Type(ty) if ty.is_c_void(tcx) => Ty::new_unit(tcx).into(),
|
||||
GenericArgKind::Type(ty) => transform_ty(tcx, ty, parents, options).into(),
|
||||
_ => arg,
|
||||
});
|
||||
tcx.mk_args_from_iter(args)
|
||||
impl<'tcx> TransformTy<'tcx> {
|
||||
fn new(tcx: TyCtxt<'tcx>, options: TransformTyOptions) -> Self {
|
||||
TransformTy { tcx, options, parents: Vec::new() }
|
||||
}
|
||||
}
|
||||
|
||||
// Transforms a ty:Ty for being encoded and used in the substitution dictionary. It transforms all
|
||||
// c_void types into unit types unconditionally, generalizes pointers if
|
||||
// TransformTyOptions::GENERALIZE_POINTERS option is set, and normalizes integers if
|
||||
// TransformTyOptions::NORMALIZE_INTEGERS option is set.
|
||||
fn transform_ty<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
mut ty: Ty<'tcx>,
|
||||
parents: &mut Vec<Ty<'tcx>>,
|
||||
options: TransformTyOptions,
|
||||
) -> Ty<'tcx> {
|
||||
match ty.kind() {
|
||||
ty::Float(..) | ty::Str | ty::Never | ty::Foreign(..) | ty::CoroutineWitness(..) => {}
|
||||
impl<'tcx> TypeFolder<TyCtxt<'tcx>> for TransformTy<'tcx> {
|
||||
// Transforms a ty:Ty for being encoded and used in the substitution dictionary. It transforms
|
||||
// all c_void types into unit types unconditionally, generalizes pointers if
|
||||
// TransformTyOptions::GENERALIZE_POINTERS option is set, and normalizes integers if
|
||||
// TransformTyOptions::NORMALIZE_INTEGERS option is set.
|
||||
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
|
||||
match t.kind() {
|
||||
ty::Array(..)
|
||||
| ty::Closure(..)
|
||||
| ty::Coroutine(..)
|
||||
| ty::CoroutineClosure(..)
|
||||
| ty::CoroutineWitness(..)
|
||||
| ty::Float(..)
|
||||
| ty::FnDef(..)
|
||||
| ty::Foreign(..)
|
||||
| ty::Never
|
||||
| ty::Slice(..)
|
||||
| ty::Str
|
||||
| ty::Tuple(..) => t.super_fold_with(self),
|
||||
|
||||
ty::Bool => {
|
||||
if options.contains(EncodeTyOptions::NORMALIZE_INTEGERS) {
|
||||
// Note: on all platforms that Rust's currently supports, its size and alignment are
|
||||
// 1, and its ABI class is INTEGER - see Rust Layout and ABIs.
|
||||
//
|
||||
// (See https://rust-lang.github.io/unsafe-code-guidelines/layout/scalars.html#bool.)
|
||||
//
|
||||
// Clang represents bool as an 8-bit unsigned integer.
|
||||
ty = tcx.types.u8;
|
||||
}
|
||||
}
|
||||
|
||||
ty::Char => {
|
||||
if options.contains(EncodeTyOptions::NORMALIZE_INTEGERS) {
|
||||
// Since #118032, char is guaranteed to have the same size, alignment, and function
|
||||
// call ABI as u32 on all platforms.
|
||||
ty = tcx.types.u32;
|
||||
}
|
||||
}
|
||||
|
||||
ty::Int(..) | ty::Uint(..) => {
|
||||
if options.contains(EncodeTyOptions::NORMALIZE_INTEGERS) {
|
||||
// Note: C99 7.18.2.4 requires uintptr_t and intptr_t to be at least 16-bit wide.
|
||||
// All platforms we currently support have a C platform, and as a consequence,
|
||||
// isize/usize are at least 16-bit wide for all of them.
|
||||
//
|
||||
// (See https://rust-lang.github.io/unsafe-code-guidelines/layout/scalars.html#isize-and-usize.)
|
||||
match ty.kind() {
|
||||
ty::Int(IntTy::Isize) => match tcx.sess.target.pointer_width {
|
||||
16 => ty = tcx.types.i16,
|
||||
32 => ty = tcx.types.i32,
|
||||
64 => ty = tcx.types.i64,
|
||||
128 => ty = tcx.types.i128,
|
||||
_ => bug!(
|
||||
"transform_ty: unexpected pointer width `{}`",
|
||||
tcx.sess.target.pointer_width
|
||||
),
|
||||
},
|
||||
ty::Uint(UintTy::Usize) => match tcx.sess.target.pointer_width {
|
||||
16 => ty = tcx.types.u16,
|
||||
32 => ty = tcx.types.u32,
|
||||
64 => ty = tcx.types.u64,
|
||||
128 => ty = tcx.types.u128,
|
||||
_ => bug!(
|
||||
"transform_ty: unexpected pointer width `{}`",
|
||||
tcx.sess.target.pointer_width
|
||||
),
|
||||
},
|
||||
_ => (),
|
||||
ty::Bool => {
|
||||
if self.options.contains(EncodeTyOptions::NORMALIZE_INTEGERS) {
|
||||
// Note: on all platforms that Rust's currently supports, its size and alignment
|
||||
// are 1, and its ABI class is INTEGER - see Rust Layout and ABIs.
|
||||
//
|
||||
// (See https://rust-lang.github.io/unsafe-code-guidelines/layout/scalars.html#bool.)
|
||||
//
|
||||
// Clang represents bool as an 8-bit unsigned integer.
|
||||
self.tcx.types.u8
|
||||
} else {
|
||||
t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ if ty.is_unit() => {}
|
||||
|
||||
ty::Tuple(tys) => {
|
||||
ty = Ty::new_tup_from_iter(
|
||||
tcx,
|
||||
tys.iter().map(|ty| transform_ty(tcx, ty, parents, options)),
|
||||
);
|
||||
}
|
||||
|
||||
ty::Array(ty0, len) => {
|
||||
let len = len.eval_target_usize(tcx, ty::ParamEnv::reveal_all());
|
||||
|
||||
ty = Ty::new_array(tcx, transform_ty(tcx, *ty0, parents, options), len);
|
||||
}
|
||||
|
||||
ty::Slice(ty0) => {
|
||||
ty = Ty::new_slice(tcx, transform_ty(tcx, *ty0, parents, options));
|
||||
}
|
||||
|
||||
ty::Adt(adt_def, args) => {
|
||||
if ty.is_c_void(tcx) {
|
||||
ty = Ty::new_unit(tcx);
|
||||
} else if options.contains(TransformTyOptions::GENERALIZE_REPR_C) && adt_def.repr().c()
|
||||
{
|
||||
ty = Ty::new_adt(tcx, *adt_def, ty::List::empty());
|
||||
} else if adt_def.repr().transparent() && adt_def.is_struct() && !parents.contains(&ty)
|
||||
{
|
||||
// Don't transform repr(transparent) types with an user-defined CFI encoding to
|
||||
// preserve the user-defined CFI encoding.
|
||||
if let Some(_) = tcx.get_attr(adt_def.did(), sym::cfi_encoding) {
|
||||
return ty;
|
||||
ty::Char => {
|
||||
if self.options.contains(EncodeTyOptions::NORMALIZE_INTEGERS) {
|
||||
// Since #118032, char is guaranteed to have the same size, alignment, and
|
||||
// function call ABI as u32 on all platforms.
|
||||
self.tcx.types.u32
|
||||
} else {
|
||||
t
|
||||
}
|
||||
let variant = adt_def.non_enum_variant();
|
||||
let param_env = tcx.param_env(variant.def_id);
|
||||
let field = variant.fields.iter().find(|field| {
|
||||
let ty = tcx.type_of(field.did).instantiate_identity();
|
||||
let is_zst =
|
||||
tcx.layout_of(param_env.and(ty)).is_ok_and(|layout| layout.is_zst());
|
||||
!is_zst
|
||||
});
|
||||
if let Some(field) = field {
|
||||
let ty0 = tcx.type_of(field.did).instantiate(tcx, args);
|
||||
// Generalize any repr(transparent) user-defined type that is either a pointer
|
||||
// or reference, and either references itself or any other type that contains or
|
||||
// references itself, to avoid a reference cycle.
|
||||
}
|
||||
|
||||
// If the self reference is not through a pointer, for example, due
|
||||
// to using `PhantomData`, need to skip normalizing it if we hit it again.
|
||||
parents.push(ty);
|
||||
if ty0.is_any_ptr() && ty0.contains(ty) {
|
||||
ty = transform_ty(
|
||||
tcx,
|
||||
ty0,
|
||||
parents,
|
||||
options | TransformTyOptions::GENERALIZE_POINTERS,
|
||||
);
|
||||
} else {
|
||||
ty = transform_ty(tcx, ty0, parents, options);
|
||||
ty::Int(..) | ty::Uint(..) => {
|
||||
if self.options.contains(EncodeTyOptions::NORMALIZE_INTEGERS) {
|
||||
// Note: C99 7.18.2.4 requires uintptr_t and intptr_t to be at least 16-bit
|
||||
// wide. All platforms we currently support have a C platform, and as a
|
||||
// consequence, isize/usize are at least 16-bit wide for all of them.
|
||||
//
|
||||
// (See https://rust-lang.github.io/unsafe-code-guidelines/layout/scalars.html#isize-and-usize.)
|
||||
match t.kind() {
|
||||
ty::Int(IntTy::Isize) => match self.tcx.sess.target.pointer_width {
|
||||
16 => self.tcx.types.i16,
|
||||
32 => self.tcx.types.i32,
|
||||
64 => self.tcx.types.i64,
|
||||
128 => self.tcx.types.i128,
|
||||
_ => bug!(
|
||||
"fold_ty: unexpected pointer width `{}`",
|
||||
self.tcx.sess.target.pointer_width
|
||||
),
|
||||
},
|
||||
ty::Uint(UintTy::Usize) => match self.tcx.sess.target.pointer_width {
|
||||
16 => self.tcx.types.u16,
|
||||
32 => self.tcx.types.u32,
|
||||
64 => self.tcx.types.u64,
|
||||
128 => self.tcx.types.u128,
|
||||
_ => bug!(
|
||||
"fold_ty: unexpected pointer width `{}`",
|
||||
self.tcx.sess.target.pointer_width
|
||||
),
|
||||
},
|
||||
_ => t,
|
||||
}
|
||||
parents.pop();
|
||||
} else {
|
||||
// Transform repr(transparent) types without non-ZST field into ()
|
||||
ty = Ty::new_unit(tcx);
|
||||
}
|
||||
} else {
|
||||
ty = Ty::new_adt(tcx, *adt_def, transform_args(tcx, args, parents, options));
|
||||
}
|
||||
}
|
||||
|
||||
ty::FnDef(def_id, args) => {
|
||||
ty = Ty::new_fn_def(tcx, *def_id, transform_args(tcx, args, parents, options));
|
||||
}
|
||||
|
||||
ty::Closure(def_id, args) => {
|
||||
ty = Ty::new_closure(tcx, *def_id, transform_args(tcx, args, parents, options));
|
||||
}
|
||||
|
||||
ty::CoroutineClosure(def_id, args) => {
|
||||
ty = Ty::new_coroutine_closure(
|
||||
tcx,
|
||||
*def_id,
|
||||
transform_args(tcx, args, parents, options),
|
||||
);
|
||||
}
|
||||
|
||||
ty::Coroutine(def_id, args) => {
|
||||
ty = Ty::new_coroutine(tcx, *def_id, transform_args(tcx, args, parents, options));
|
||||
}
|
||||
|
||||
ty::Ref(region, ty0, ..) => {
|
||||
if options.contains(TransformTyOptions::GENERALIZE_POINTERS) {
|
||||
if ty.is_mutable_ptr() {
|
||||
ty = Ty::new_mut_ref(tcx, tcx.lifetimes.re_static, Ty::new_unit(tcx));
|
||||
} else {
|
||||
ty = Ty::new_imm_ref(tcx, tcx.lifetimes.re_static, Ty::new_unit(tcx));
|
||||
}
|
||||
} else {
|
||||
if ty.is_mutable_ptr() {
|
||||
ty = Ty::new_mut_ref(tcx, *region, transform_ty(tcx, *ty0, parents, options));
|
||||
} else {
|
||||
ty = Ty::new_imm_ref(tcx, *region, transform_ty(tcx, *ty0, parents, options));
|
||||
t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ty::RawPtr(ptr_ty, _) => {
|
||||
if options.contains(TransformTyOptions::GENERALIZE_POINTERS) {
|
||||
if ty.is_mutable_ptr() {
|
||||
ty = Ty::new_mut_ptr(tcx, Ty::new_unit(tcx));
|
||||
ty::Adt(..) if t.is_c_void(self.tcx) => self.tcx.types.unit,
|
||||
|
||||
ty::Adt(adt_def, args) => {
|
||||
if adt_def.repr().transparent() && adt_def.is_struct() && !self.parents.contains(&t)
|
||||
{
|
||||
// Don't transform repr(transparent) types with an user-defined CFI encoding to
|
||||
// preserve the user-defined CFI encoding.
|
||||
if let Some(_) = self.tcx.get_attr(adt_def.did(), sym::cfi_encoding) {
|
||||
return t;
|
||||
}
|
||||
let variant = adt_def.non_enum_variant();
|
||||
let param_env = self.tcx.param_env(variant.def_id);
|
||||
let field = variant.fields.iter().find(|field| {
|
||||
let ty = self.tcx.type_of(field.did).instantiate_identity();
|
||||
let is_zst = self
|
||||
.tcx
|
||||
.layout_of(param_env.and(ty))
|
||||
.is_ok_and(|layout| layout.is_zst());
|
||||
!is_zst
|
||||
});
|
||||
if let Some(field) = field {
|
||||
let ty0 = self.tcx.type_of(field.did).instantiate(self.tcx, args);
|
||||
// Generalize any repr(transparent) user-defined type that is either a
|
||||
// pointer or reference, and either references itself or any other type that
|
||||
// contains or references itself, to avoid a reference cycle.
|
||||
|
||||
// If the self reference is not through a pointer, for example, due
|
||||
// to using `PhantomData`, need to skip normalizing it if we hit it again.
|
||||
self.parents.push(t);
|
||||
let ty = if ty0.is_any_ptr() && ty0.contains(t) {
|
||||
let options = self.options;
|
||||
self.options |= TransformTyOptions::GENERALIZE_POINTERS;
|
||||
let ty = ty0.fold_with(self);
|
||||
self.options = options;
|
||||
ty
|
||||
} else {
|
||||
ty0.fold_with(self)
|
||||
};
|
||||
self.parents.pop();
|
||||
ty
|
||||
} else {
|
||||
// Transform repr(transparent) types without non-ZST field into ()
|
||||
self.tcx.types.unit
|
||||
}
|
||||
} else {
|
||||
ty = Ty::new_imm_ptr(tcx, Ty::new_unit(tcx));
|
||||
}
|
||||
} else {
|
||||
if ty.is_mutable_ptr() {
|
||||
ty = Ty::new_mut_ptr(tcx, transform_ty(tcx, *ptr_ty, parents, options));
|
||||
} else {
|
||||
ty = Ty::new_imm_ptr(tcx, transform_ty(tcx, *ptr_ty, parents, options));
|
||||
t.super_fold_with(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ty::FnPtr(fn_sig) => {
|
||||
if options.contains(TransformTyOptions::GENERALIZE_POINTERS) {
|
||||
ty = Ty::new_imm_ptr(tcx, Ty::new_unit(tcx));
|
||||
} else {
|
||||
let parameters: Vec<Ty<'tcx>> = fn_sig
|
||||
.skip_binder()
|
||||
.inputs()
|
||||
.iter()
|
||||
.map(|ty| transform_ty(tcx, *ty, parents, options))
|
||||
.collect();
|
||||
let output = transform_ty(tcx, fn_sig.skip_binder().output(), parents, options);
|
||||
ty = Ty::new_fn_ptr(
|
||||
tcx,
|
||||
ty::Binder::bind_with_vars(
|
||||
tcx.mk_fn_sig(
|
||||
parameters,
|
||||
output,
|
||||
fn_sig.c_variadic(),
|
||||
fn_sig.unsafety(),
|
||||
fn_sig.abi(),
|
||||
),
|
||||
fn_sig.bound_vars(),
|
||||
),
|
||||
ty::Ref(..) => {
|
||||
if self.options.contains(TransformTyOptions::GENERALIZE_POINTERS) {
|
||||
if t.is_mutable_ptr() {
|
||||
Ty::new_mut_ref(self.tcx, self.tcx.lifetimes.re_static, self.tcx.types.unit)
|
||||
} else {
|
||||
Ty::new_imm_ref(self.tcx, self.tcx.lifetimes.re_static, self.tcx.types.unit)
|
||||
}
|
||||
} else {
|
||||
t.super_fold_with(self)
|
||||
}
|
||||
}
|
||||
|
||||
ty::RawPtr(..) => {
|
||||
if self.options.contains(TransformTyOptions::GENERALIZE_POINTERS) {
|
||||
if t.is_mutable_ptr() {
|
||||
Ty::new_mut_ptr(self.tcx, self.tcx.types.unit)
|
||||
} else {
|
||||
Ty::new_imm_ptr(self.tcx, self.tcx.types.unit)
|
||||
}
|
||||
} else {
|
||||
t.super_fold_with(self)
|
||||
}
|
||||
}
|
||||
|
||||
ty::FnPtr(..) => {
|
||||
if self.options.contains(TransformTyOptions::GENERALIZE_POINTERS) {
|
||||
Ty::new_imm_ptr(self.tcx, self.tcx.types.unit)
|
||||
} else {
|
||||
t.super_fold_with(self)
|
||||
}
|
||||
}
|
||||
|
||||
ty::Dynamic(predicates, _region, kind) => {
|
||||
let predicates = self.tcx.mk_poly_existential_predicates_from_iter(
|
||||
predicates.iter().filter_map(|predicate| match predicate.skip_binder() {
|
||||
ty::ExistentialPredicate::Trait(trait_ref) => {
|
||||
let trait_ref = ty::TraitRef::identity(self.tcx, trait_ref.def_id);
|
||||
Some(ty::Binder::dummy(ty::ExistentialPredicate::Trait(
|
||||
ty::ExistentialTraitRef::erase_self_ty(self.tcx, trait_ref),
|
||||
)))
|
||||
}
|
||||
ty::ExistentialPredicate::Projection(..) => None,
|
||||
ty::ExistentialPredicate::AutoTrait(..) => Some(predicate),
|
||||
}),
|
||||
);
|
||||
|
||||
Ty::new_dynamic(self.tcx, predicates, self.tcx.lifetimes.re_erased, *kind)
|
||||
}
|
||||
}
|
||||
|
||||
ty::Dynamic(predicates, _region, kind) => {
|
||||
ty = Ty::new_dynamic(
|
||||
tcx,
|
||||
transform_predicates(tcx, predicates),
|
||||
tcx.lifetimes.re_erased,
|
||||
*kind,
|
||||
);
|
||||
}
|
||||
ty::Alias(..) => {
|
||||
self.fold_ty(self.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), t))
|
||||
}
|
||||
|
||||
ty::Alias(..) => {
|
||||
ty = transform_ty(
|
||||
tcx,
|
||||
tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty),
|
||||
parents,
|
||||
options,
|
||||
);
|
||||
}
|
||||
|
||||
ty::Bound(..) | ty::Error(..) | ty::Infer(..) | ty::Param(..) | ty::Placeholder(..) => {
|
||||
bug!("transform_ty: unexpected `{:?}`", ty.kind());
|
||||
ty::Bound(..) | ty::Error(..) | ty::Infer(..) | ty::Param(..) | ty::Placeholder(..) => {
|
||||
bug!("fold_ty: unexpected `{:?}`", t.kind());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ty
|
||||
fn interner(&self) -> TyCtxt<'tcx> {
|
||||
self.tcx
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a type metadata identifier for the specified FnAbi using the Itanium C++ ABI with vendor
|
||||
@ -1068,7 +994,8 @@ pub fn typeid_for_fnabi<'tcx>(
|
||||
// Encode the return type
|
||||
let transform_ty_options = TransformTyOptions::from_bits(options.bits())
|
||||
.unwrap_or_else(|| bug!("typeid_for_fnabi: invalid option(s) `{:?}`", options.bits()));
|
||||
let ty = transform_ty(tcx, fn_abi.ret.layout.ty, &mut Vec::new(), transform_ty_options);
|
||||
let mut type_folder = TransformTy::new(tcx, transform_ty_options);
|
||||
let ty = fn_abi.ret.layout.ty.fold_with(&mut type_folder);
|
||||
typeid.push_str(&encode_ty(tcx, ty, &mut dict, encode_ty_options));
|
||||
|
||||
// Encode the parameter types
|
||||
@ -1080,7 +1007,7 @@ pub fn typeid_for_fnabi<'tcx>(
|
||||
let mut pushed_arg = false;
|
||||
for arg in fn_abi.args.iter().filter(|arg| arg.mode != PassMode::Ignore) {
|
||||
pushed_arg = true;
|
||||
let ty = transform_ty(tcx, arg.layout.ty, &mut Vec::new(), transform_ty_options);
|
||||
let ty = arg.layout.ty.fold_with(&mut type_folder);
|
||||
typeid.push_str(&encode_ty(tcx, ty, &mut dict, encode_ty_options));
|
||||
}
|
||||
if !pushed_arg {
|
||||
@ -1093,8 +1020,7 @@ pub fn typeid_for_fnabi<'tcx>(
|
||||
if fn_abi.args[n].mode == PassMode::Ignore {
|
||||
continue;
|
||||
}
|
||||
let ty =
|
||||
transform_ty(tcx, fn_abi.args[n].layout.ty, &mut Vec::new(), transform_ty_options);
|
||||
let ty = fn_abi.args[n].layout.ty.fold_with(&mut type_folder);
|
||||
typeid.push_str(&encode_ty(tcx, ty, &mut dict, encode_ty_options));
|
||||
}
|
||||
|
||||
@ -1172,7 +1098,7 @@ pub fn typeid_for_instance<'tcx>(
|
||||
instance.args = tcx.mk_args_trait(invoke_ty, trait_ref.args.into_iter().skip(1));
|
||||
}
|
||||
|
||||
if !options.contains(EncodeTyOptions::NO_SELF_TYPE_ERASURE) {
|
||||
if !options.contains(EncodeTyOptions::USE_CONCRETE_SELF) {
|
||||
if let Some(impl_id) = tcx.impl_of_method(instance.def_id())
|
||||
&& let Some(trait_ref) = tcx.impl_trait_ref(impl_id)
|
||||
{
|
||||
|
@ -8,8 +8,8 @@ use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
|
||||
use rustc_middle::ty::layout::IntegerExt;
|
||||
use rustc_middle::ty::print::{Print, PrintError, Printer};
|
||||
use rustc_middle::ty::{
|
||||
self, EarlyBinder, FloatTy, Instance, IntTy, Ty, TyCtxt, TypeVisitable, TypeVisitableExt,
|
||||
UintTy,
|
||||
self, EarlyBinder, FloatTy, Instance, IntTy, ReifyReason, Ty, TyCtxt, TypeVisitable,
|
||||
TypeVisitableExt, UintTy,
|
||||
};
|
||||
use rustc_middle::ty::{GenericArg, GenericArgKind};
|
||||
use rustc_span::symbol::kw;
|
||||
@ -44,7 +44,9 @@ pub(super) fn mangle<'tcx>(
|
||||
let shim_kind = match instance.def {
|
||||
ty::InstanceDef::ThreadLocalShim(_) => Some("tls"),
|
||||
ty::InstanceDef::VTableShim(_) => Some("vtable"),
|
||||
ty::InstanceDef::ReifyShim(_) => Some("reify"),
|
||||
ty::InstanceDef::ReifyShim(_, None) => Some("reify"),
|
||||
ty::InstanceDef::ReifyShim(_, Some(ReifyReason::FnPtr)) => Some("reify-fnptr"),
|
||||
ty::InstanceDef::ReifyShim(_, Some(ReifyReason::Vtable)) => Some("reify-vtable"),
|
||||
|
||||
ty::InstanceDef::ConstructCoroutineInClosureShim { .. }
|
||||
| ty::InstanceDef::CoroutineKindShim { .. } => Some("fn_once"),
|
||||
|
@ -102,6 +102,7 @@ fn pre_link_args(os: &'static str, arch: Arch, abi: &'static str) -> LinkArgs {
|
||||
"ios" => ios_deployment_target(arch, abi),
|
||||
"tvos" => tvos_deployment_target(),
|
||||
"watchos" => watchos_deployment_target(),
|
||||
"visionos" => visionos_deployment_target(),
|
||||
"macos" => macos_deployment_target(arch),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
@ -202,6 +203,8 @@ pub fn sdk_version(platform: u32) -> Option<(u32, u32)> {
|
||||
| object::macho::PLATFORM_TVOSSIMULATOR
|
||||
| object::macho::PLATFORM_MACCATALYST => Some((16, 2)),
|
||||
object::macho::PLATFORM_WATCHOS | object::macho::PLATFORM_WATCHOSSIMULATOR => Some((9, 1)),
|
||||
// FIXME: Upgrade to `object-rs` 0.33+ implementation with visionOS platform definition
|
||||
11 | 12 => Some((1, 0)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@ -216,6 +219,9 @@ pub fn platform(target: &Target) -> Option<u32> {
|
||||
("watchos", _) => object::macho::PLATFORM_WATCHOS,
|
||||
("tvos", "sim") => object::macho::PLATFORM_TVOSSIMULATOR,
|
||||
("tvos", _) => object::macho::PLATFORM_TVOS,
|
||||
// FIXME: Upgrade to `object-rs` 0.33+ implementation with visionOS platform definition
|
||||
("visionos", "sim") => 12,
|
||||
("visionos", _) => 11,
|
||||
_ => return None,
|
||||
})
|
||||
}
|
||||
@ -240,6 +246,7 @@ pub fn deployment_target(target: &Target) -> Option<(u32, u32)> {
|
||||
}
|
||||
"watchos" => watchos_deployment_target(),
|
||||
"tvos" => tvos_deployment_target(),
|
||||
"visionos" => visionos_deployment_target(),
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
@ -290,6 +297,7 @@ fn link_env_remove(os: &'static str) -> StaticCow<[StaticCow<str>]> {
|
||||
|| sdkroot.contains("AppleTVSimulator.platform")
|
||||
|| sdkroot.contains("WatchOS.platform")
|
||||
|| sdkroot.contains("WatchSimulator.platform")
|
||||
|| sdkroot.contains("XROS.platform")
|
||||
{
|
||||
env_remove.push("SDKROOT".into())
|
||||
}
|
||||
@ -299,6 +307,7 @@ fn link_env_remove(os: &'static str) -> StaticCow<[StaticCow<str>]> {
|
||||
// although this is apparently ignored when using the linker at "/usr/bin/ld".
|
||||
env_remove.push("IPHONEOS_DEPLOYMENT_TARGET".into());
|
||||
env_remove.push("TVOS_DEPLOYMENT_TARGET".into());
|
||||
env_remove.push("XROS_DEPLOYMENT_TARGET".into());
|
||||
env_remove.into()
|
||||
} else {
|
||||
// Otherwise if cross-compiling for a different OS/SDK (including Mac Catalyst), remove any part
|
||||
@ -363,3 +372,18 @@ pub fn watchos_sim_llvm_target(arch: Arch) -> String {
|
||||
let (major, minor) = watchos_deployment_target();
|
||||
format!("{}-apple-watchos{}.{}.0-simulator", arch.target_name(), major, minor)
|
||||
}
|
||||
|
||||
fn visionos_deployment_target() -> (u32, u32) {
|
||||
// If you are looking for the default deployment target, prefer `rustc --print deployment-target`.
|
||||
from_set_deployment_target("XROS_DEPLOYMENT_TARGET").unwrap_or((1, 0))
|
||||
}
|
||||
|
||||
pub fn visionos_llvm_target(arch: Arch) -> String {
|
||||
let (major, minor) = visionos_deployment_target();
|
||||
format!("{}-apple-visionos{}.{}.0", arch.target_name(), major, minor)
|
||||
}
|
||||
|
||||
pub fn visionos_sim_llvm_target(arch: Arch) -> String {
|
||||
let (major, minor) = visionos_deployment_target();
|
||||
format!("{}-apple-visionos{}.{}.0-simulator", arch.target_name(), major, minor)
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
use crate::spec::targets::{
|
||||
aarch64_apple_darwin, aarch64_apple_ios_sim, aarch64_apple_watchos_sim, i686_apple_darwin,
|
||||
x86_64_apple_darwin, x86_64_apple_ios, x86_64_apple_tvos, x86_64_apple_watchos_sim,
|
||||
aarch64_apple_darwin, aarch64_apple_ios_sim, aarch64_apple_visionos_sim,
|
||||
aarch64_apple_watchos_sim, i686_apple_darwin, x86_64_apple_darwin, x86_64_apple_ios,
|
||||
x86_64_apple_tvos, x86_64_apple_watchos_sim,
|
||||
};
|
||||
|
||||
#[test]
|
||||
@ -12,6 +13,7 @@ fn simulator_targets_set_abi() {
|
||||
aarch64_apple_ios_sim::target(),
|
||||
// Note: There is currently no ARM64 tvOS simulator target
|
||||
aarch64_apple_watchos_sim::target(),
|
||||
aarch64_apple_visionos_sim::target(),
|
||||
];
|
||||
|
||||
for target in &all_sim_targets {
|
||||
@ -32,7 +34,11 @@ fn macos_link_environment_unmodified() {
|
||||
// for the host.
|
||||
assert_eq!(
|
||||
target.link_env_remove,
|
||||
crate::spec::cvs!["IPHONEOS_DEPLOYMENT_TARGET", "TVOS_DEPLOYMENT_TARGET"],
|
||||
crate::spec::cvs![
|
||||
"IPHONEOS_DEPLOYMENT_TARGET",
|
||||
"TVOS_DEPLOYMENT_TARGET",
|
||||
"XROS_DEPLOYMENT_TARGET"
|
||||
],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -1557,6 +1557,9 @@ supported_targets! {
|
||||
("aarch64-apple-watchos", aarch64_apple_watchos),
|
||||
("aarch64-apple-watchos-sim", aarch64_apple_watchos_sim),
|
||||
|
||||
("aarch64-apple-visionos", aarch64_apple_visionos),
|
||||
("aarch64-apple-visionos-sim", aarch64_apple_visionos_sim),
|
||||
|
||||
("armebv7r-none-eabi", armebv7r_none_eabi),
|
||||
("armebv7r-none-eabihf", armebv7r_none_eabihf),
|
||||
("armv7r-none-eabi", armv7r_none_eabi),
|
||||
|
@ -0,0 +1,27 @@
|
||||
use crate::spec::base::apple::{opts, visionos_llvm_target, Arch};
|
||||
use crate::spec::{FramePointer, SanitizerSet, Target, TargetOptions};
|
||||
|
||||
pub fn target() -> Target {
|
||||
let arch = Arch::Arm64;
|
||||
let mut base = opts("visionos", arch);
|
||||
base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::THREAD;
|
||||
|
||||
Target {
|
||||
llvm_target: visionos_llvm_target(arch).into(),
|
||||
metadata: crate::spec::TargetMetadata {
|
||||
description: Some("ARM64 Apple visionOS".into()),
|
||||
tier: Some(3),
|
||||
host_tools: Some(false),
|
||||
std: Some(false),
|
||||
},
|
||||
pointer_width: 64,
|
||||
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
|
||||
arch: arch.target_arch(),
|
||||
options: TargetOptions {
|
||||
features: "+neon,+fp-armv8,+apple-a16".into(),
|
||||
max_atomic_width: Some(128),
|
||||
frame_pointer: FramePointer::NonLeaf,
|
||||
..base
|
||||
},
|
||||
}
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
use crate::spec::base::apple::{opts, visionos_sim_llvm_target, Arch};
|
||||
use crate::spec::{FramePointer, SanitizerSet, Target, TargetOptions};
|
||||
|
||||
pub fn target() -> Target {
|
||||
let arch = Arch::Arm64_sim;
|
||||
let mut base = opts("visionos", arch);
|
||||
base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::THREAD;
|
||||
|
||||
Target {
|
||||
llvm_target: visionos_sim_llvm_target(arch).into(),
|
||||
metadata: crate::spec::TargetMetadata {
|
||||
description: Some("ARM64 Apple visionOS simulator".into()),
|
||||
tier: Some(3),
|
||||
host_tools: Some(false),
|
||||
std: Some(false),
|
||||
},
|
||||
pointer_width: 64,
|
||||
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
|
||||
arch: arch.target_arch(),
|
||||
options: TargetOptions {
|
||||
features: "+neon,+fp-armv8,+apple-a16".into(),
|
||||
max_atomic_width: Some(128),
|
||||
frame_pointer: FramePointer::NonLeaf,
|
||||
..base
|
||||
},
|
||||
}
|
||||
}
|
@ -22,7 +22,7 @@ pub fn target() -> Target {
|
||||
linker: Some("rust-lld".into()),
|
||||
relocation_model: RelocModel::Static,
|
||||
panic_strategy: PanicStrategy::Abort,
|
||||
features: "+vfp3,-d32,-fp16".into(),
|
||||
features: "+vfp3d16".into(),
|
||||
max_atomic_width: Some(64),
|
||||
emit_debug_gdb_scripts: false,
|
||||
// GCC defaults to 8 for arm-none here.
|
||||
|
@ -21,7 +21,7 @@ pub fn target() -> Target {
|
||||
linker: Some("rust-lld".into()),
|
||||
relocation_model: RelocModel::Static,
|
||||
panic_strategy: PanicStrategy::Abort,
|
||||
features: "+vfp3,-d32,-fp16".into(),
|
||||
features: "+vfp3d16".into(),
|
||||
max_atomic_width: Some(64),
|
||||
emit_debug_gdb_scripts: false,
|
||||
// GCC defaults to 8 for arm-none here.
|
||||
|
@ -25,16 +25,15 @@ pub fn target() -> Target {
|
||||
|
||||
options: TargetOptions {
|
||||
abi: "eabihf".into(),
|
||||
// `+vfp4` is the lowest common denominator between the Cortex-M4 (vfp4-16) and the
|
||||
// Cortex-M7 (vfp5)
|
||||
// `-d32` both the Cortex-M4 and the Cortex-M7 only have 16 double-precision registers
|
||||
// available
|
||||
// `-fp64` The Cortex-M4 only supports single precision floating point operations
|
||||
// whereas in the Cortex-M7 double precision is optional
|
||||
// vfp4 is the lowest common denominator between the Cortex-M4F (vfp4) and the
|
||||
// Cortex-M7 (vfp5).
|
||||
// Both the Cortex-M4 and the Cortex-M7 only have 16 double-precision registers
|
||||
// available, and the Cortex-M4 only supports single-precision floating point operations
|
||||
// whereas in the Cortex-M7 double-precision is optional.
|
||||
//
|
||||
// Reference:
|
||||
// ARMv7-M Architecture Reference Manual - A2.5 The optional floating-point extension
|
||||
features: "+vfp4,-d32,-fp64".into(),
|
||||
features: "+vfp4d16sp".into(),
|
||||
max_atomic_width: Some(32),
|
||||
..base::thumb::opts()
|
||||
},
|
||||
|
@ -22,8 +22,7 @@ pub fn target() -> Target {
|
||||
// processor, the Cortex-M33 Technical Reference Manual states that
|
||||
// the FPU uses the FPv5 architecture, single-precision instructions
|
||||
// and 16 D registers.
|
||||
// These parameters map to the following LLVM features.
|
||||
features: "+fp-armv8,-fp64,-d32".into(),
|
||||
features: "+fp-armv8d16sp".into(),
|
||||
max_atomic_width: Some(32),
|
||||
..base::thumb::opts()
|
||||
},
|
||||
|
@ -2,8 +2,8 @@
|
||||
//! Doing this via a separate goal is called "deferred alias relation" and part
|
||||
//! of our more general approach to "lazy normalization".
|
||||
//!
|
||||
//! This is done by first normalizing both sides of the goal, ending up in
|
||||
//! either a concrete type, rigid alias, or an infer variable.
|
||||
//! This is done by first structurally normalizing both sides of the goal, ending
|
||||
//! up in either a concrete type, rigid alias, or an infer variable.
|
||||
//! These are related further according to the rules below:
|
||||
//!
|
||||
//! (1.) If we end up with two rigid aliases, then we relate them structurally.
|
||||
@ -14,18 +14,10 @@
|
||||
//!
|
||||
//! (3.) Otherwise, if we end with two rigid (non-projection) or infer types,
|
||||
//! relate them structurally.
|
||||
//!
|
||||
//! Subtle: when relating an opaque to another type, we emit a
|
||||
//! `NormalizesTo(opaque, ?fresh_var)` goal when trying to normalize the opaque.
|
||||
//! This nested goal starts out as ambiguous and does not actually define the opaque.
|
||||
//! However, if `?fresh_var` ends up geteting equated to another type, we retry the
|
||||
//! `NormalizesTo` goal, at which point the opaque is actually defined.
|
||||
|
||||
use super::EvalCtxt;
|
||||
use rustc_infer::traits::query::NoSolution;
|
||||
use rustc_infer::traits::solve::GoalSource;
|
||||
use rustc_middle::traits::solve::{Certainty, Goal, QueryResult};
|
||||
use rustc_middle::ty::{self, Ty};
|
||||
use rustc_middle::ty;
|
||||
|
||||
impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
#[instrument(level = "debug", skip(self), ret)]
|
||||
@ -36,21 +28,34 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
let tcx = self.tcx();
|
||||
let Goal { param_env, predicate: (lhs, rhs, direction) } = goal;
|
||||
|
||||
let Some(lhs) = self.try_normalize_term(param_env, lhs)? else {
|
||||
return self
|
||||
.evaluate_added_goals_and_make_canonical_response(Certainty::overflow(true));
|
||||
// Structurally normalize the lhs.
|
||||
let lhs = if let Some(alias) = lhs.to_alias_ty(self.tcx()) {
|
||||
let term = self.next_term_infer_of_kind(lhs);
|
||||
self.add_normalizes_to_goal(goal.with(tcx, ty::NormalizesTo { alias, term }));
|
||||
term
|
||||
} else {
|
||||
lhs
|
||||
};
|
||||
|
||||
let Some(rhs) = self.try_normalize_term(param_env, rhs)? else {
|
||||
return self
|
||||
.evaluate_added_goals_and_make_canonical_response(Certainty::overflow(true));
|
||||
// Structurally normalize the rhs.
|
||||
let rhs = if let Some(alias) = rhs.to_alias_ty(self.tcx()) {
|
||||
let term = self.next_term_infer_of_kind(rhs);
|
||||
self.add_normalizes_to_goal(goal.with(tcx, ty::NormalizesTo { alias, term }));
|
||||
term
|
||||
} else {
|
||||
rhs
|
||||
};
|
||||
|
||||
// Apply the constraints.
|
||||
self.try_evaluate_added_goals()?;
|
||||
let lhs = self.resolve_vars_if_possible(lhs);
|
||||
let rhs = self.resolve_vars_if_possible(rhs);
|
||||
debug!(?lhs, ?rhs);
|
||||
|
||||
let variance = match direction {
|
||||
ty::AliasRelationDirection::Equate => ty::Variance::Invariant,
|
||||
ty::AliasRelationDirection::Subtype => ty::Variance::Covariant,
|
||||
};
|
||||
|
||||
match (lhs.to_alias_ty(tcx), rhs.to_alias_ty(tcx)) {
|
||||
(None, None) => {
|
||||
self.relate(param_env, lhs, variance, rhs)?;
|
||||
@ -58,14 +63,18 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
}
|
||||
|
||||
(Some(alias), None) => {
|
||||
self.relate_rigid_alias_non_alias(param_env, alias, variance, rhs)
|
||||
self.relate_rigid_alias_non_alias(param_env, alias, variance, rhs)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
(None, Some(alias)) => {
|
||||
self.relate_rigid_alias_non_alias(
|
||||
param_env,
|
||||
alias,
|
||||
variance.xform(ty::Variance::Contravariant),
|
||||
lhs,
|
||||
)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
(None, Some(alias)) => self.relate_rigid_alias_non_alias(
|
||||
param_env,
|
||||
alias,
|
||||
variance.xform(ty::Variance::Contravariant),
|
||||
lhs,
|
||||
),
|
||||
|
||||
(Some(alias_lhs), Some(alias_rhs)) => {
|
||||
self.relate(param_env, alias_lhs, variance, alias_rhs)?;
|
||||
@ -73,104 +82,4 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Relate a rigid alias with another type. This is the same as
|
||||
/// an ordinary relate except that we treat the outer most alias
|
||||
/// constructor as rigid.
|
||||
#[instrument(level = "debug", skip(self, param_env), ret)]
|
||||
fn relate_rigid_alias_non_alias(
|
||||
&mut self,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
alias: ty::AliasTy<'tcx>,
|
||||
variance: ty::Variance,
|
||||
term: ty::Term<'tcx>,
|
||||
) -> QueryResult<'tcx> {
|
||||
// NOTE: this check is purely an optimization, the structural eq would
|
||||
// always fail if the term is not an inference variable.
|
||||
if term.is_infer() {
|
||||
let tcx = self.tcx();
|
||||
// We need to relate `alias` to `term` treating only the outermost
|
||||
// constructor as rigid, relating any contained generic arguments as
|
||||
// normal. We do this by first structurally equating the `term`
|
||||
// with the alias constructor instantiated with unconstrained infer vars,
|
||||
// and then relate this with the whole `alias`.
|
||||
//
|
||||
// Alternatively we could modify `Equate` for this case by adding another
|
||||
// variant to `StructurallyRelateAliases`.
|
||||
let identity_args = self.fresh_args_for_item(alias.def_id);
|
||||
let rigid_ctor = ty::AliasTy::new(tcx, alias.def_id, identity_args);
|
||||
self.eq_structurally_relating_aliases(param_env, term, rigid_ctor.to_ty(tcx).into())?;
|
||||
self.eq(param_env, alias, rigid_ctor)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
} else {
|
||||
Err(NoSolution)
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: This needs a name that reflects that it's okay to bottom-out with an inference var.
|
||||
/// Normalize the `term` to equate it later.
|
||||
#[instrument(level = "debug", skip(self, param_env), ret)]
|
||||
fn try_normalize_term(
|
||||
&mut self,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
term: ty::Term<'tcx>,
|
||||
) -> Result<Option<ty::Term<'tcx>>, NoSolution> {
|
||||
match term.unpack() {
|
||||
ty::TermKind::Ty(ty) => {
|
||||
Ok(self.try_normalize_ty_recur(param_env, 0, ty).map(Into::into))
|
||||
}
|
||||
ty::TermKind::Const(_) => {
|
||||
if let Some(alias) = term.to_alias_ty(self.tcx()) {
|
||||
let term = self.next_term_infer_of_kind(term);
|
||||
self.add_normalizes_to_goal(Goal::new(
|
||||
self.tcx(),
|
||||
param_env,
|
||||
ty::NormalizesTo { alias, term },
|
||||
));
|
||||
self.try_evaluate_added_goals()?;
|
||||
Ok(Some(self.resolve_vars_if_possible(term)))
|
||||
} else {
|
||||
Ok(Some(term))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self, param_env), ret)]
|
||||
fn try_normalize_ty_recur(
|
||||
&mut self,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
depth: usize,
|
||||
ty: Ty<'tcx>,
|
||||
) -> Option<Ty<'tcx>> {
|
||||
if !self.tcx().recursion_limit().value_within_limit(depth) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let ty::Alias(kind, alias) = *ty.kind() else {
|
||||
return Some(ty);
|
||||
};
|
||||
|
||||
match self.commit_if_ok(|this| {
|
||||
let tcx = this.tcx();
|
||||
let normalized_ty = this.next_ty_infer();
|
||||
let normalizes_to = ty::NormalizesTo { alias, term: normalized_ty.into() };
|
||||
match kind {
|
||||
ty::AliasKind::Opaque => {
|
||||
// HACK: Unlike for associated types, `normalizes-to` for opaques
|
||||
// is currently not treated as a function. We do not erase the
|
||||
// expected term.
|
||||
this.add_goal(GoalSource::Misc, Goal::new(tcx, param_env, normalizes_to));
|
||||
}
|
||||
ty::AliasKind::Projection | ty::AliasKind::Inherent | ty::AliasKind::Weak => {
|
||||
this.add_normalizes_to_goal(Goal::new(tcx, param_env, normalizes_to))
|
||||
}
|
||||
}
|
||||
this.try_evaluate_added_goals()?;
|
||||
Ok(this.resolve_vars_if_possible(normalized_ty))
|
||||
}) {
|
||||
Ok(ty) => self.try_normalize_ty_recur(param_env, depth + 1, ty),
|
||||
Err(NoSolution) => Some(ty),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -312,11 +312,18 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
fn forced_ambiguity(&mut self, cause: MaybeCause) -> Vec<Candidate<'tcx>> {
|
||||
let source = CandidateSource::BuiltinImpl(BuiltinImplSource::Misc);
|
||||
let certainty = Certainty::Maybe(cause);
|
||||
let result = self.evaluate_added_goals_and_make_canonical_response(certainty).unwrap();
|
||||
// This may fail if `try_evaluate_added_goals` overflows because it
|
||||
// fails to reach a fixpoint but ends up getting an error after
|
||||
// running for some additional step.
|
||||
//
|
||||
// FIXME: Add a test for this. It seems to be necessary for typenum but
|
||||
// is incredibly hard to minimize as it may rely on being inside of a
|
||||
// trait solver cycle.
|
||||
let result = self.evaluate_added_goals_and_make_canonical_response(certainty);
|
||||
let mut dummy_probe = self.inspect.new_probe();
|
||||
dummy_probe.probe_kind(ProbeKind::TraitCandidate { source, result: Ok(result) });
|
||||
dummy_probe.probe_kind(ProbeKind::TraitCandidate { source, result });
|
||||
self.inspect.finish_probe(dummy_probe);
|
||||
vec![Candidate { source, result }]
|
||||
if let Ok(result) = result { vec![Candidate { source, result }] } else { vec![] }
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
|
@ -332,7 +332,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
/// whether an alias is rigid by using the trait solver. When instantiating a response
|
||||
/// from the solver we assume that the solver correctly handled aliases and therefore
|
||||
/// always relate them structurally here.
|
||||
#[instrument(level = "debug", skip(infcx), ret)]
|
||||
#[instrument(level = "debug", skip(infcx))]
|
||||
fn unify_query_var_values(
|
||||
infcx: &InferCtxt<'tcx>,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
|
@ -1,47 +0,0 @@
|
||||
use super::{EvalCtxt, NestedGoals};
|
||||
use crate::solve::inspect;
|
||||
use rustc_middle::traits::query::NoSolution;
|
||||
|
||||
impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
|
||||
pub(in crate::solve) fn commit_if_ok<T>(
|
||||
&mut self,
|
||||
f: impl FnOnce(&mut EvalCtxt<'_, 'tcx>) -> Result<T, NoSolution>,
|
||||
) -> Result<T, NoSolution> {
|
||||
let mut nested_ecx = EvalCtxt {
|
||||
infcx: self.infcx,
|
||||
variables: self.variables,
|
||||
var_values: self.var_values,
|
||||
is_normalizes_to_goal: self.is_normalizes_to_goal,
|
||||
predefined_opaques_in_body: self.predefined_opaques_in_body,
|
||||
max_input_universe: self.max_input_universe,
|
||||
search_graph: self.search_graph,
|
||||
nested_goals: NestedGoals::new(),
|
||||
tainted: self.tainted,
|
||||
inspect: self.inspect.new_probe(),
|
||||
};
|
||||
|
||||
let result = nested_ecx.infcx.commit_if_ok(|_| f(&mut nested_ecx));
|
||||
if result.is_ok() {
|
||||
let EvalCtxt {
|
||||
infcx: _,
|
||||
variables: _,
|
||||
var_values: _,
|
||||
is_normalizes_to_goal: _,
|
||||
predefined_opaques_in_body: _,
|
||||
max_input_universe: _,
|
||||
search_graph: _,
|
||||
nested_goals,
|
||||
tainted,
|
||||
inspect,
|
||||
} = nested_ecx;
|
||||
self.nested_goals.extend(nested_goals);
|
||||
self.tainted = tainted;
|
||||
self.inspect.integrate_snapshot(inspect);
|
||||
} else {
|
||||
nested_ecx.inspect.probe_kind(inspect::ProbeKind::CommitIfOk);
|
||||
self.inspect.finish_probe(nested_ecx.inspect);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
}
|
@ -24,7 +24,6 @@ use rustc_middle::ty::{
|
||||
use rustc_session::config::DumpSolverProofTree;
|
||||
use rustc_span::DUMMY_SP;
|
||||
use std::io::Write;
|
||||
use std::iter;
|
||||
use std::ops::ControlFlow;
|
||||
|
||||
use crate::traits::vtable::{count_own_vtable_entries, prepare_vtable_segments, VtblSegment};
|
||||
@ -36,7 +35,6 @@ use super::{GoalSource, SolverMode};
|
||||
pub use select::InferCtxtSelectExt;
|
||||
|
||||
mod canonical;
|
||||
mod commit_if_ok;
|
||||
mod probe;
|
||||
mod select;
|
||||
|
||||
@ -124,11 +122,6 @@ impl<'tcx> NestedGoals<'tcx> {
|
||||
pub(super) fn is_empty(&self) -> bool {
|
||||
self.normalizes_to_goals.is_empty() && self.goals.is_empty()
|
||||
}
|
||||
|
||||
pub(super) fn extend(&mut self, other: NestedGoals<'tcx>) {
|
||||
self.normalizes_to_goals.extend(other.normalizes_to_goals);
|
||||
self.goals.extend(other.goals)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Debug, Hash, HashStable, Clone, Copy)]
|
||||
@ -511,12 +504,6 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
|
||||
|
||||
self.inspect.evaluate_added_goals_loop_start();
|
||||
|
||||
fn with_misc_source<'tcx>(
|
||||
it: impl IntoIterator<Item = Goal<'tcx, ty::Predicate<'tcx>>>,
|
||||
) -> impl Iterator<Item = (GoalSource, Goal<'tcx, ty::Predicate<'tcx>>)> {
|
||||
iter::zip(iter::repeat(GoalSource::Misc), it)
|
||||
}
|
||||
|
||||
// If this loop did not result in any progress, what's our final certainty.
|
||||
let mut unchanged_certainty = Some(Certainty::Yes);
|
||||
for goal in goals.normalizes_to_goals {
|
||||
@ -534,16 +521,28 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
|
||||
unconstrained_goal,
|
||||
)?;
|
||||
// Add the nested goals from normalization to our own nested goals.
|
||||
debug!(?nested_goals);
|
||||
goals.goals.extend(nested_goals);
|
||||
|
||||
// Finally, equate the goal's RHS with the unconstrained var.
|
||||
// We put the nested goals from this into goals instead of
|
||||
// next_goals to avoid needing to process the loop one extra
|
||||
// time if this goal returns something -- I don't think this
|
||||
// matters in practice, though.
|
||||
let eq_goals =
|
||||
self.eq_and_get_goals(goal.param_env, goal.predicate.term, unconstrained_rhs)?;
|
||||
goals.goals.extend(with_misc_source(eq_goals));
|
||||
//
|
||||
// SUBTLE:
|
||||
// We structurally relate aliases here. This is necessary
|
||||
// as we otherwise emit a nested `AliasRelate` goal in case the
|
||||
// returned term is a rigid alias, resulting in overflow.
|
||||
//
|
||||
// It is correct as both `goal.predicate.term` and `unconstrained_rhs`
|
||||
// start out as an unconstrained inference variable so any aliases get
|
||||
// fully normalized when instantiating it.
|
||||
//
|
||||
// FIXME: Strictly speaking this may be incomplete if the normalized-to
|
||||
// type contains an ambiguous alias referencing bound regions. We should
|
||||
// consider changing this to only use "shallow structural equality".
|
||||
self.eq_structurally_relating_aliases(
|
||||
goal.param_env,
|
||||
goal.predicate.term,
|
||||
unconstrained_rhs,
|
||||
)?;
|
||||
|
||||
// We only look at the `projection_ty` part here rather than
|
||||
// looking at the "has changed" return from evaluate_goal,
|
||||
@ -720,7 +719,8 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
) -> Result<(), NoSolution> {
|
||||
self.infcx
|
||||
.at(&ObligationCause::dummy(), param_env)
|
||||
.eq(DefineOpaqueTypes::No, lhs, rhs)
|
||||
// New solver ignores DefineOpaqueTypes, so choose Yes for consistency
|
||||
.eq(DefineOpaqueTypes::Yes, lhs, rhs)
|
||||
.map(|InferOk { value: (), obligations }| {
|
||||
self.add_goals(GoalSource::Misc, obligations.into_iter().map(|o| o.into()));
|
||||
})
|
||||
@ -730,6 +730,46 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
})
|
||||
}
|
||||
|
||||
/// This should be used when relating a rigid alias with another type.
|
||||
///
|
||||
/// Normally we emit a nested `AliasRelate` when equating an inference
|
||||
/// variable and an alias. This causes us to instead constrain the inference
|
||||
/// variable to the alias without emitting a nested alias relate goals.
|
||||
#[instrument(level = "debug", skip(self, param_env), ret)]
|
||||
pub(super) fn relate_rigid_alias_non_alias(
|
||||
&mut self,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
alias: ty::AliasTy<'tcx>,
|
||||
variance: ty::Variance,
|
||||
term: ty::Term<'tcx>,
|
||||
) -> Result<(), NoSolution> {
|
||||
// NOTE: this check is purely an optimization, the structural eq would
|
||||
// always fail if the term is not an inference variable.
|
||||
if term.is_infer() {
|
||||
let tcx = self.tcx();
|
||||
// We need to relate `alias` to `term` treating only the outermost
|
||||
// constructor as rigid, relating any contained generic arguments as
|
||||
// normal. We do this by first structurally equating the `term`
|
||||
// with the alias constructor instantiated with unconstrained infer vars,
|
||||
// and then relate this with the whole `alias`.
|
||||
//
|
||||
// Alternatively we could modify `Equate` for this case by adding another
|
||||
// variant to `StructurallyRelateAliases`.
|
||||
let identity_args = self.fresh_args_for_item(alias.def_id);
|
||||
let rigid_ctor = ty::AliasTy::new(tcx, alias.def_id, identity_args);
|
||||
let ctor_ty = rigid_ctor.to_ty(tcx);
|
||||
let InferOk { value: (), obligations } = self
|
||||
.infcx
|
||||
.at(&ObligationCause::dummy(), param_env)
|
||||
.trace(term, ctor_ty.into())
|
||||
.eq_structurally_relating_aliases(term, ctor_ty.into())?;
|
||||
debug_assert!(obligations.is_empty());
|
||||
self.relate(param_env, alias, variance, rigid_ctor)
|
||||
} else {
|
||||
Err(NoSolution)
|
||||
}
|
||||
}
|
||||
|
||||
/// This sohuld only be used when we're either instantiating a previously
|
||||
/// unconstrained "return value" or when we're sure that all aliases in
|
||||
/// the types are rigid.
|
||||
@ -759,7 +799,8 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
) -> Result<(), NoSolution> {
|
||||
self.infcx
|
||||
.at(&ObligationCause::dummy(), param_env)
|
||||
.sub(DefineOpaqueTypes::No, sub, sup)
|
||||
// New solver ignores DefineOpaqueTypes, so choose Yes for consistency
|
||||
.sub(DefineOpaqueTypes::Yes, sub, sup)
|
||||
.map(|InferOk { value: (), obligations }| {
|
||||
self.add_goals(GoalSource::Misc, obligations.into_iter().map(|o| o.into()));
|
||||
})
|
||||
@ -779,7 +820,8 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
) -> Result<(), NoSolution> {
|
||||
self.infcx
|
||||
.at(&ObligationCause::dummy(), param_env)
|
||||
.relate(DefineOpaqueTypes::No, lhs, variance, rhs)
|
||||
// New solver ignores DefineOpaqueTypes, so choose Yes for consistency
|
||||
.relate(DefineOpaqueTypes::Yes, lhs, variance, rhs)
|
||||
.map(|InferOk { value: (), obligations }| {
|
||||
self.add_goals(GoalSource::Misc, obligations.into_iter().map(|o| o.into()));
|
||||
})
|
||||
@ -803,7 +845,8 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
) -> Result<Vec<Goal<'tcx, ty::Predicate<'tcx>>>, NoSolution> {
|
||||
self.infcx
|
||||
.at(&ObligationCause::dummy(), param_env)
|
||||
.eq(DefineOpaqueTypes::No, lhs, rhs)
|
||||
// New solver ignores DefineOpaqueTypes, so choose Yes for consistency
|
||||
.eq(DefineOpaqueTypes::Yes, lhs, rhs)
|
||||
.map(|InferOk { value: (), obligations }| {
|
||||
obligations.into_iter().map(|o| o.into()).collect()
|
||||
})
|
||||
|
@ -182,7 +182,8 @@ fn rematch_impl<'tcx>(
|
||||
|
||||
let mut nested = infcx
|
||||
.at(&ObligationCause::dummy(), goal.param_env)
|
||||
.eq(DefineOpaqueTypes::No, goal.predicate.trait_ref, impl_trait_ref)
|
||||
// New solver ignores DefineOpaqueTypes, so choose Yes for consistency
|
||||
.eq(DefineOpaqueTypes::Yes, goal.predicate.trait_ref, impl_trait_ref)
|
||||
.map_err(|_| SelectionError::Unimplemented)?
|
||||
.into_obligations();
|
||||
|
||||
@ -257,7 +258,8 @@ fn rematch_unsize<'tcx>(
|
||||
nested.extend(
|
||||
infcx
|
||||
.at(&ObligationCause::dummy(), goal.param_env)
|
||||
.eq(DefineOpaqueTypes::No, a_elem_ty, b_elem_ty)
|
||||
// New solver ignores DefineOpaqueTypes, so choose Yes for consistency
|
||||
.eq(DefineOpaqueTypes::Yes, a_elem_ty, b_elem_ty)
|
||||
.expect("expected rematch to succeed")
|
||||
.into_obligations(),
|
||||
);
|
||||
@ -300,7 +302,8 @@ fn rematch_unsize<'tcx>(
|
||||
nested.extend(
|
||||
infcx
|
||||
.at(&ObligationCause::dummy(), goal.param_env)
|
||||
.eq(DefineOpaqueTypes::No, unsized_a_ty, b_ty)
|
||||
// New solver ignores DefineOpaqueTypes, so choose Yes for consistency
|
||||
.eq(DefineOpaqueTypes::Yes, unsized_a_ty, b_ty)
|
||||
.expect("expected rematch to succeed")
|
||||
.into_obligations(),
|
||||
);
|
||||
@ -329,7 +332,8 @@ fn rematch_unsize<'tcx>(
|
||||
nested.extend(
|
||||
infcx
|
||||
.at(&ObligationCause::dummy(), goal.param_env)
|
||||
.eq(DefineOpaqueTypes::No, unsized_a_ty, b_ty)
|
||||
// New solver ignores DefineOpaqueTypes, so choose Yes for consistency
|
||||
.eq(DefineOpaqueTypes::Yes, unsized_a_ty, b_ty)
|
||||
.expect("expected rematch to succeed")
|
||||
.into_obligations(),
|
||||
);
|
||||
|
@ -130,17 +130,14 @@ impl<'a, 'tcx> InspectGoal<'a, 'tcx> {
|
||||
self.candidates_recur(candidates, nested_goals, probe);
|
||||
nested_goals.truncate(num_goals);
|
||||
}
|
||||
inspect::ProbeStep::EvaluateGoals(_)
|
||||
| inspect::ProbeStep::CommitIfOkStart
|
||||
| inspect::ProbeStep::CommitIfOkSuccess => (),
|
||||
inspect::ProbeStep::EvaluateGoals(_) => (),
|
||||
}
|
||||
}
|
||||
|
||||
match probe.kind {
|
||||
inspect::ProbeKind::NormalizedSelfTyAssembly
|
||||
| inspect::ProbeKind::UnsizeAssembly
|
||||
| inspect::ProbeKind::UpcastProjectionCompatibility
|
||||
| inspect::ProbeKind::CommitIfOk => (),
|
||||
| inspect::ProbeKind::UpcastProjectionCompatibility => (),
|
||||
// We add a candidate for the root evaluation if there
|
||||
// is only one way to prove a given goal, e.g. for `WellFormed`.
|
||||
//
|
||||
@ -157,7 +154,8 @@ impl<'a, 'tcx> InspectGoal<'a, 'tcx> {
|
||||
});
|
||||
}
|
||||
}
|
||||
inspect::ProbeKind::MiscCandidate { name: _, result }
|
||||
inspect::ProbeKind::TryNormalizeNonRigid { result }
|
||||
| inspect::ProbeKind::MiscCandidate { name: _, result }
|
||||
| inspect::ProbeKind::TraitCandidate { source: _, result } => {
|
||||
candidates.push(InspectCandidate {
|
||||
goal: self,
|
||||
|
@ -220,8 +220,6 @@ enum WipProbeStep<'tcx> {
|
||||
AddGoal(GoalSource, inspect::CanonicalState<'tcx, Goal<'tcx, ty::Predicate<'tcx>>>),
|
||||
EvaluateGoals(WipAddedGoalsEvaluation<'tcx>),
|
||||
NestedProbe(WipProbe<'tcx>),
|
||||
CommitIfOkStart,
|
||||
CommitIfOkSuccess,
|
||||
}
|
||||
|
||||
impl<'tcx> WipProbeStep<'tcx> {
|
||||
@ -230,8 +228,6 @@ impl<'tcx> WipProbeStep<'tcx> {
|
||||
WipProbeStep::AddGoal(source, goal) => inspect::ProbeStep::AddGoal(source, goal),
|
||||
WipProbeStep::EvaluateGoals(eval) => inspect::ProbeStep::EvaluateGoals(eval.finalize()),
|
||||
WipProbeStep::NestedProbe(probe) => inspect::ProbeStep::NestedProbe(probe.finalize()),
|
||||
WipProbeStep::CommitIfOkStart => inspect::ProbeStep::CommitIfOkStart,
|
||||
WipProbeStep::CommitIfOkSuccess => inspect::ProbeStep::CommitIfOkSuccess,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -467,29 +463,6 @@ impl<'tcx> ProofTreeBuilder<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Used by `EvalCtxt::commit_if_ok` to flatten the work done inside
|
||||
/// of the probe into the parent.
|
||||
pub fn integrate_snapshot(&mut self, probe: ProofTreeBuilder<'tcx>) {
|
||||
if let Some(this) = self.as_mut() {
|
||||
match (this, *probe.state.unwrap()) {
|
||||
(
|
||||
DebugSolver::Probe(WipProbe { steps, .. })
|
||||
| DebugSolver::GoalEvaluationStep(WipGoalEvaluationStep {
|
||||
evaluation: WipProbe { steps, .. },
|
||||
..
|
||||
}),
|
||||
DebugSolver::Probe(probe),
|
||||
) => {
|
||||
steps.push(WipProbeStep::CommitIfOkStart);
|
||||
assert_eq!(probe.kind, None);
|
||||
steps.extend(probe.steps);
|
||||
steps.push(WipProbeStep::CommitIfOkSuccess);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_evaluate_added_goals(&mut self) -> ProofTreeBuilder<'tcx> {
|
||||
self.nested(|| WipAddedGoalsEvaluation { evaluations: vec![], result: None })
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ impl<'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for NormalizationFolder<'_, 'tcx> {
|
||||
fn try_fold_ty(&mut self, ty: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
|
||||
let infcx = self.at.infcx;
|
||||
debug_assert_eq!(ty, infcx.shallow_resolve(ty));
|
||||
if !ty.has_projections() {
|
||||
if !ty.has_aliases() {
|
||||
return Ok(ty);
|
||||
}
|
||||
|
||||
@ -204,7 +204,7 @@ impl<'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for NormalizationFolder<'_, 'tcx> {
|
||||
fn try_fold_const(&mut self, ct: ty::Const<'tcx>) -> Result<ty::Const<'tcx>, Self::Error> {
|
||||
let infcx = self.at.infcx;
|
||||
debug_assert_eq!(ct, infcx.shallow_resolve(ct));
|
||||
if !ct.has_projections() {
|
||||
if !ct.has_aliases() {
|
||||
return Ok(ct);
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@ use rustc_hir::def::DefKind;
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_hir::LangItem;
|
||||
use rustc_infer::traits::query::NoSolution;
|
||||
use rustc_infer::traits::solve::inspect::ProbeKind;
|
||||
use rustc_infer::traits::specialization_graph::LeafDef;
|
||||
use rustc_infer::traits::Reveal;
|
||||
use rustc_middle::traits::solve::{
|
||||
@ -30,14 +31,41 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
&mut self,
|
||||
goal: Goal<'tcx, NormalizesTo<'tcx>>,
|
||||
) -> QueryResult<'tcx> {
|
||||
let def_id = goal.predicate.def_id();
|
||||
let def_kind = self.tcx().def_kind(def_id);
|
||||
match def_kind {
|
||||
DefKind::OpaqueTy => return self.normalize_opaque_type(goal),
|
||||
_ => self.set_is_normalizes_to_goal(),
|
||||
}
|
||||
|
||||
self.set_is_normalizes_to_goal();
|
||||
debug_assert!(self.term_is_fully_unconstrained(goal));
|
||||
let normalize_result = self
|
||||
.probe(|&result| ProbeKind::TryNormalizeNonRigid { result })
|
||||
.enter(|this| this.normalize_at_least_one_step(goal));
|
||||
|
||||
match normalize_result {
|
||||
Ok(res) => Ok(res),
|
||||
Err(NoSolution) => {
|
||||
let Goal { param_env, predicate: NormalizesTo { alias, term } } = goal;
|
||||
if alias.opt_kind(self.tcx()).is_some() {
|
||||
self.relate_rigid_alias_non_alias(
|
||||
param_env,
|
||||
alias,
|
||||
ty::Variance::Invariant,
|
||||
term,
|
||||
)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
} else {
|
||||
// FIXME(generic_const_exprs): we currently do not support rigid
|
||||
// unevaluated constants.
|
||||
Err(NoSolution)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Normalize the given alias by at least one step. If the alias is rigid, this
|
||||
/// returns `NoSolution`.
|
||||
#[instrument(level = "debug", skip(self), ret)]
|
||||
fn normalize_at_least_one_step(
|
||||
&mut self,
|
||||
goal: Goal<'tcx, NormalizesTo<'tcx>>,
|
||||
) -> QueryResult<'tcx> {
|
||||
let def_id = goal.predicate.def_id();
|
||||
match self.tcx().def_kind(def_id) {
|
||||
DefKind::AssocTy | DefKind::AssocConst => {
|
||||
match self.tcx().associated_item(def_id).container {
|
||||
@ -52,35 +80,22 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
}
|
||||
DefKind::AnonConst => self.normalize_anon_const(goal),
|
||||
DefKind::TyAlias => self.normalize_weak_type(goal),
|
||||
DefKind::OpaqueTy => self.normalize_opaque_type(goal),
|
||||
kind => bug!("unknown DefKind {} in normalizes-to goal: {goal:#?}", kind.descr(def_id)),
|
||||
}
|
||||
}
|
||||
|
||||
/// When normalizing an associated item, constrain the result to `term`.
|
||||
/// When normalizing an associated item, constrain the expected term to `term`.
|
||||
///
|
||||
/// While `NormalizesTo` goals have the normalized-to term as an argument,
|
||||
/// this argument is always fully unconstrained for associated items.
|
||||
/// It is therefore appropriate to instead think of these `NormalizesTo` goals
|
||||
/// as function returning a term after normalizing.
|
||||
///
|
||||
/// When equating an inference variable and an alias, we tend to emit `alias-relate`
|
||||
/// goals and only actually instantiate the inference variable with an alias if the
|
||||
/// alias is rigid. However, this means that constraining the expected term of
|
||||
/// such goals ends up fully structurally normalizing the resulting type instead of
|
||||
/// only by one step. To avoid this we instead use structural equality here, resulting
|
||||
/// in each `NormalizesTo` only projects by a single step.
|
||||
///
|
||||
/// Not doing so, currently causes issues because trying to normalize an opaque type
|
||||
/// during alias-relate doesn't actually constrain the opaque if the concrete type
|
||||
/// is an inference variable. This means that `NormalizesTo` for associated types
|
||||
/// normalizing to an opaque type always resulted in ambiguity, breaking tests e.g.
|
||||
/// tests/ui/type-alias-impl-trait/issue-78450.rs.
|
||||
/// We know `term` to always be a fully unconstrained inference variable, so
|
||||
/// `eq` should never fail here. However, in case `term` contains aliases, we
|
||||
/// emit nested `AliasRelate` goals to structurally normalize the alias.
|
||||
pub fn instantiate_normalizes_to_term(
|
||||
&mut self,
|
||||
goal: Goal<'tcx, NormalizesTo<'tcx>>,
|
||||
term: ty::Term<'tcx>,
|
||||
) {
|
||||
self.eq_structurally_relating_aliases(goal.param_env, goal.predicate.term, term)
|
||||
self.eq(goal.param_env, goal.predicate.term, term)
|
||||
.expect("expected goal term to be fully unconstrained");
|
||||
}
|
||||
}
|
||||
|
@ -58,12 +58,6 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
let expected = self.structurally_normalize_ty(goal.param_env, expected)?;
|
||||
if expected.is_ty_var() {
|
||||
return self
|
||||
.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS);
|
||||
}
|
||||
|
||||
// Otherwise, define a new opaque type
|
||||
self.insert_hidden_type(opaque_type_key, goal.param_env, expected)?;
|
||||
self.add_item_bounds_for_hidden_type(
|
||||
|
@ -477,7 +477,8 @@ fn plug_infer_with_placeholders<'tcx>(
|
||||
if ty.is_ty_var() {
|
||||
let Ok(InferOk { value: (), obligations }) =
|
||||
self.infcx.at(&ObligationCause::dummy(), ty::ParamEnv::empty()).eq(
|
||||
DefineOpaqueTypes::No,
|
||||
// Comparing against a type variable never registers hidden types anyway
|
||||
DefineOpaqueTypes::Yes,
|
||||
ty,
|
||||
Ty::new_placeholder(
|
||||
self.infcx.tcx,
|
||||
@ -504,7 +505,9 @@ fn plug_infer_with_placeholders<'tcx>(
|
||||
if ct.is_ct_infer() {
|
||||
let Ok(InferOk { value: (), obligations }) =
|
||||
self.infcx.at(&ObligationCause::dummy(), ty::ParamEnv::empty()).eq(
|
||||
DefineOpaqueTypes::No,
|
||||
// The types of the constants are the same, so there is no hidden type
|
||||
// registration happening anyway.
|
||||
DefineOpaqueTypes::Yes,
|
||||
ct,
|
||||
ty::Const::new_placeholder(
|
||||
self.infcx.tcx,
|
||||
@ -532,7 +535,8 @@ fn plug_infer_with_placeholders<'tcx>(
|
||||
if r.is_var() {
|
||||
let Ok(InferOk { value: (), obligations }) =
|
||||
self.infcx.at(&ObligationCause::dummy(), ty::ParamEnv::empty()).eq(
|
||||
DefineOpaqueTypes::No,
|
||||
// Lifetimes don't contain opaque types (or any types for that matter).
|
||||
DefineOpaqueTypes::Yes,
|
||||
r,
|
||||
ty::Region::new_placeholder(
|
||||
self.infcx.tcx,
|
||||
|
@ -7,18 +7,21 @@ use rustc_span::{Span, DUMMY_SP};
|
||||
|
||||
use crate::traits::ObligationCtxt;
|
||||
|
||||
pub enum Ambiguity {
|
||||
#[derive(Debug)]
|
||||
pub enum CandidateSource {
|
||||
DefId(DefId),
|
||||
ParamEnv(Span),
|
||||
}
|
||||
|
||||
pub fn recompute_applicable_impls<'tcx>(
|
||||
pub fn compute_applicable_impls_for_diagnostics<'tcx>(
|
||||
infcx: &InferCtxt<'tcx>,
|
||||
obligation: &PolyTraitObligation<'tcx>,
|
||||
) -> Vec<Ambiguity> {
|
||||
) -> Vec<CandidateSource> {
|
||||
let tcx = infcx.tcx;
|
||||
let param_env = obligation.param_env;
|
||||
|
||||
let predicate_polarity = obligation.predicate.skip_binder().polarity;
|
||||
|
||||
let impl_may_apply = |impl_def_id| {
|
||||
let ocx = ObligationCtxt::new(infcx);
|
||||
infcx.enter_forall(obligation.predicate, |placeholder_obligation| {
|
||||
@ -40,6 +43,15 @@ pub fn recompute_applicable_impls<'tcx>(
|
||||
return false;
|
||||
}
|
||||
|
||||
let impl_trait_header = tcx.impl_trait_header(impl_def_id).unwrap();
|
||||
let impl_polarity = impl_trait_header.polarity;
|
||||
|
||||
match (impl_polarity, predicate_polarity) {
|
||||
(ty::ImplPolarity::Positive, ty::PredicatePolarity::Positive)
|
||||
| (ty::ImplPolarity::Negative, ty::PredicatePolarity::Negative) => {}
|
||||
_ => return false,
|
||||
}
|
||||
|
||||
let impl_predicates = tcx.predicates_of(impl_def_id).instantiate(tcx, impl_args);
|
||||
ocx.register_obligations(impl_predicates.predicates.iter().map(|&predicate| {
|
||||
Obligation::new(tcx, ObligationCause::dummy(), param_env, predicate)
|
||||
@ -86,7 +98,7 @@ pub fn recompute_applicable_impls<'tcx>(
|
||||
obligation.predicate.skip_binder().trait_ref.self_ty(),
|
||||
|impl_def_id| {
|
||||
if infcx.probe(|_| impl_may_apply(impl_def_id)) {
|
||||
ambiguities.push(Ambiguity::DefId(impl_def_id))
|
||||
ambiguities.push(CandidateSource::DefId(impl_def_id))
|
||||
}
|
||||
},
|
||||
);
|
||||
@ -101,9 +113,9 @@ pub fn recompute_applicable_impls<'tcx>(
|
||||
if kind.rebind(trait_pred.trait_ref)
|
||||
== ty::Binder::dummy(ty::TraitRef::identity(tcx, trait_pred.def_id()))
|
||||
{
|
||||
ambiguities.push(Ambiguity::ParamEnv(tcx.def_span(trait_pred.def_id())))
|
||||
ambiguities.push(CandidateSource::ParamEnv(tcx.def_span(trait_pred.def_id())))
|
||||
} else {
|
||||
ambiguities.push(Ambiguity::ParamEnv(span))
|
||||
ambiguities.push(CandidateSource::ParamEnv(span))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
// ignore-tidy-filelength :(
|
||||
|
||||
mod ambiguity;
|
||||
pub mod ambiguity;
|
||||
mod infer_ctxt_ext;
|
||||
pub mod on_unimplemented;
|
||||
pub mod suggestions;
|
||||
|
@ -3842,7 +3842,9 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
|
||||
self.probe(|_| {
|
||||
match self
|
||||
.at(&ObligationCause::misc(expr.span, body_id), param_env)
|
||||
.eq(DefineOpaqueTypes::No, expected, actual)
|
||||
// Doesn't actually matter if we define opaque types here, this is just used for
|
||||
// diagnostics, and the result is never kept around.
|
||||
.eq(DefineOpaqueTypes::Yes, expected, actual)
|
||||
{
|
||||
Ok(_) => (), // We ignore nested obligations here for now.
|
||||
Err(err) => type_diffs.push(err),
|
||||
|
@ -10,7 +10,7 @@ use crate::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
|
||||
use crate::infer::InferCtxtExt as _;
|
||||
use crate::infer::{self, InferCtxt};
|
||||
use crate::traits::error_reporting::infer_ctxt_ext::InferCtxtExt;
|
||||
use crate::traits::error_reporting::{ambiguity, ambiguity::Ambiguity::*};
|
||||
use crate::traits::error_reporting::{ambiguity, ambiguity::CandidateSource::*};
|
||||
use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
|
||||
use crate::traits::specialize::to_pretty_impl_header;
|
||||
use crate::traits::NormalizeExt;
|
||||
@ -2386,7 +2386,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
|
||||
)
|
||||
};
|
||||
|
||||
let mut ambiguities = ambiguity::recompute_applicable_impls(
|
||||
let mut ambiguities = ambiguity::compute_applicable_impls_for_diagnostics(
|
||||
self.infcx,
|
||||
&obligation.with(self.tcx, trait_ref),
|
||||
);
|
||||
@ -2702,7 +2702,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
|
||||
fn annotate_source_of_ambiguity(
|
||||
&self,
|
||||
err: &mut Diag<'_>,
|
||||
ambiguities: &[ambiguity::Ambiguity],
|
||||
ambiguities: &[ambiguity::CandidateSource],
|
||||
predicate: ty::Predicate<'tcx>,
|
||||
) {
|
||||
let mut spans = vec![];
|
||||
@ -2711,7 +2711,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
|
||||
let mut has_param_env = false;
|
||||
for ambiguity in ambiguities {
|
||||
match ambiguity {
|
||||
ambiguity::Ambiguity::DefId(impl_def_id) => {
|
||||
ambiguity::CandidateSource::DefId(impl_def_id) => {
|
||||
match self.tcx.span_of_impl(*impl_def_id) {
|
||||
Ok(span) => spans.push(span),
|
||||
Err(name) => {
|
||||
@ -2722,7 +2722,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
|
||||
}
|
||||
}
|
||||
}
|
||||
ambiguity::Ambiguity::ParamEnv(span) => {
|
||||
ambiguity::CandidateSource::ParamEnv(span) => {
|
||||
has_param_env = true;
|
||||
spans.push(*span);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user