Merge branch 'master' into issue-69276

This commit is contained in:
csmoe 2020-05-18 08:46:24 +08:00
commit 2f311b07c8
589 changed files with 7983 additions and 3099 deletions

View File

@ -34,6 +34,7 @@ jobs:
CI_JOB_NAME: "${{ matrix.name }}" CI_JOB_NAME: "${{ matrix.name }}"
SCCACHE_BUCKET: rust-lang-gha-caches SCCACHE_BUCKET: rust-lang-gha-caches
TOOLSTATE_REPO: "https://github.com/pietroalbini/rust-toolstate" TOOLSTATE_REPO: "https://github.com/pietroalbini/rust-toolstate"
CACHE_DOMAIN: ci-caches-gha.rust-lang.org
if: "github.event_name == 'pull_request'" if: "github.event_name == 'pull_request'"
strategy: strategy:
matrix: matrix:
@ -146,6 +147,7 @@ jobs:
TOOLSTATE_PUBLISH: 1 TOOLSTATE_PUBLISH: 1
CACHES_AWS_ACCESS_KEY_ID: AKIA46X5W6CZOMUQATD5 CACHES_AWS_ACCESS_KEY_ID: AKIA46X5W6CZOMUQATD5
ARTIFACTS_AWS_ACCESS_KEY_ID: AKIA46X5W6CZH5AYXDVF ARTIFACTS_AWS_ACCESS_KEY_ID: AKIA46X5W6CZH5AYXDVF
CACHE_DOMAIN: ci-caches-gha.rust-lang.org
if: "github.event_name == 'push' && github.ref == 'refs/heads/try' && github.repository == 'rust-lang-ci/rust'" if: "github.event_name == 'push' && github.ref == 'refs/heads/try' && github.repository == 'rust-lang-ci/rust'"
strategy: strategy:
matrix: matrix:
@ -255,6 +257,7 @@ jobs:
TOOLSTATE_PUBLISH: 1 TOOLSTATE_PUBLISH: 1
CACHES_AWS_ACCESS_KEY_ID: AKIA46X5W6CZOMUQATD5 CACHES_AWS_ACCESS_KEY_ID: AKIA46X5W6CZOMUQATD5
ARTIFACTS_AWS_ACCESS_KEY_ID: AKIA46X5W6CZH5AYXDVF ARTIFACTS_AWS_ACCESS_KEY_ID: AKIA46X5W6CZH5AYXDVF
CACHE_DOMAIN: ci-caches-gha.rust-lang.org
if: "github.event_name == 'push' && github.ref == 'refs/heads/auto' && github.repository == 'rust-lang-ci/rust'" if: "github.event_name == 'push' && github.ref == 'refs/heads/auto' && github.repository == 'rust-lang-ci/rust'"
strategy: strategy:
matrix: matrix:
@ -606,6 +609,7 @@ jobs:
TOOLSTATE_PUBLISH: 1 TOOLSTATE_PUBLISH: 1
CACHES_AWS_ACCESS_KEY_ID: AKIA46X5W6CZOMUQATD5 CACHES_AWS_ACCESS_KEY_ID: AKIA46X5W6CZOMUQATD5
ARTIFACTS_AWS_ACCESS_KEY_ID: AKIA46X5W6CZH5AYXDVF ARTIFACTS_AWS_ACCESS_KEY_ID: AKIA46X5W6CZH5AYXDVF
CACHE_DOMAIN: ci-caches-gha.rust-lang.org
if: "github.event_name == 'push' && github.ref == 'refs/heads/master' && github.repository == 'rust-lang-ci/rust'" if: "github.event_name == 'push' && github.ref == 'refs/heads/master' && github.repository == 'rust-lang-ci/rust'"
steps: steps:
- name: checkout the source code - name: checkout the source code

View File

@ -438,7 +438,7 @@ dependencies = [
"proc-macro2 1.0.3", "proc-macro2 1.0.3",
"quote 1.0.2", "quote 1.0.2",
"syn 1.0.11", "syn 1.0.11",
"synstructure 0.12.1", "synstructure",
] ]
[[package]] [[package]]
@ -537,7 +537,6 @@ dependencies = [
"compiletest_rs", "compiletest_rs",
"derive-new", "derive-new",
"lazy_static 1.4.0", "lazy_static 1.4.0",
"regex",
"rustc-workspace-hack", "rustc-workspace-hack",
"rustc_tools_util 0.2.0", "rustc_tools_util 0.2.0",
"semver", "semver",
@ -938,13 +937,13 @@ checksum = "a0afaad2b26fa326569eb264b1363e8ae3357618c43982b3f285f0774ce76b69"
[[package]] [[package]]
name = "derive-new" name = "derive-new"
version = "0.5.6" version = "0.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ca414e896ae072546f4d789f452daaecf60ddee4c9df5dc6d5936d769e3d87c" checksum = "71f31892cd5c62e414316f2963c5689242c43d8e7bbcaaeca97e5e28c95d91d9"
dependencies = [ dependencies = [
"proc-macro2 0.4.30", "proc-macro2 1.0.3",
"quote 0.6.12", "quote 1.0.2",
"syn 0.15.35", "syn 1.0.11",
] ]
[[package]] [[package]]
@ -1145,14 +1144,14 @@ dependencies = [
[[package]] [[package]]
name = "failure_derive" name = "failure_derive"
version = "0.1.5" version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea1063915fd7ef4309e222a5a07cf9c319fb9c7836b1f89b85458672dbb127e1" checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4"
dependencies = [ dependencies = [
"proc-macro2 0.4.30", "proc-macro2 1.0.3",
"quote 0.6.12", "quote 1.0.2",
"syn 0.15.35", "syn 1.0.11",
"synstructure 0.10.2", "synstructure",
] ]
[[package]] [[package]]
@ -1405,30 +1404,18 @@ dependencies = [
[[package]] [[package]]
name = "handlebars" name = "handlebars"
version = "2.0.1" version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df044dd42cdb7e32f28557b661406fc0f2494be75199779998810dbc35030e0d" checksum = "ba758d094d31274eb49d15da6f326b96bf3185239a6359bf684f3d5321148900"
dependencies = [ dependencies = [
"hashbrown 0.5.0",
"lazy_static 1.4.0",
"log", "log",
"pest", "pest",
"pest_derive", "pest_derive",
"quick-error", "quick-error",
"regex",
"serde", "serde",
"serde_json", "serde_json",
] ]
[[package]]
name = "hashbrown"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1de41fb8dba9714efd92241565cdff73f78508c95697dd56787d3cba27e2353"
dependencies = [
"serde",
]
[[package]] [[package]]
name = "hashbrown" name = "hashbrown"
version = "0.6.2" version = "0.6.2"
@ -2055,9 +2042,9 @@ dependencies = [
[[package]] [[package]]
name = "mdbook" name = "mdbook"
version = "0.3.5" version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "031bdd9d4893c983e2f69ebc4b59070feee8276a584c4aabdcb351235ea28016" checksum = "e7ec525f7ebccc2dd935c263717250cd37f9a4b264a77c5dbc950ea2734d8159"
dependencies = [ dependencies = [
"ammonia", "ammonia",
"chrono", "chrono",
@ -2557,15 +2544,15 @@ dependencies = [
[[package]] [[package]]
name = "pest_generator" name = "pest_generator"
version = "2.1.0" version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63120576c4efd69615b5537d3d052257328a4ca82876771d6944424ccfd9f646" checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55"
dependencies = [ dependencies = [
"pest", "pest",
"pest_meta", "pest_meta",
"proc-macro2 0.4.30", "proc-macro2 1.0.3",
"quote 0.6.12", "quote 1.0.2",
"syn 0.15.35", "syn 1.0.11",
] ]
[[package]] [[package]]
@ -2785,9 +2772,9 @@ checksum = "6ddd112cca70a4d30883b2d21568a1d376ff8be4758649f64f973c6845128ad3"
[[package]] [[package]]
name = "quick-error" name = "quick-error"
version = "1.2.2" version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
[[package]] [[package]]
name = "quine-mc_cluskey" name = "quine-mc_cluskey"
@ -3449,7 +3436,7 @@ dependencies = [
"proc-macro2 1.0.3", "proc-macro2 1.0.3",
"quote 1.0.2", "quote 1.0.2",
"syn 1.0.11", "syn 1.0.11",
"synstructure 0.12.1", "synstructure",
] ]
[[package]] [[package]]
@ -4059,7 +4046,7 @@ dependencies = [
"proc-macro2 1.0.3", "proc-macro2 1.0.3",
"quote 1.0.2", "quote 1.0.2",
"syn 1.0.11", "syn 1.0.11",
"synstructure 0.12.1", "synstructure",
] ]
[[package]] [[package]]
@ -4630,13 +4617,13 @@ dependencies = [
[[package]] [[package]]
name = "serde_derive" name = "serde_derive"
version = "1.0.81" version = "1.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "477b13b646f5b5b56fc95bedfc3b550d12141ce84f466f6c44b9a17589923885" checksum = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c"
dependencies = [ dependencies = [
"proc-macro2 0.4.30", "proc-macro2 1.0.3",
"quote 0.6.12", "quote 1.0.2",
"syn 0.15.35", "syn 1.0.11",
] ]
[[package]] [[package]]
@ -4778,9 +4765,9 @@ checksum = "ffbc596e092fe5f598b12ef46cc03754085ac2f4d8c739ad61c4ae266cc3b3fa"
[[package]] [[package]]
name = "stacker" name = "stacker"
version = "0.1.8" version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32c2467b8abbb417e4e62fd62229719b9c9d77714a7fa989f1afad16ba9c9743" checksum = "72dd941b456e1c006d6b9f27c526d5b69281288aeea8cba82c19d3843d8ccdd2"
dependencies = [ dependencies = [
"cc", "cc",
"cfg-if", "cfg-if",
@ -4800,7 +4787,7 @@ dependencies = [
"core", "core",
"dlmalloc", "dlmalloc",
"fortanix-sgx-abi", "fortanix-sgx-abi",
"hashbrown 0.6.2", "hashbrown",
"hermit-abi", "hermit-abi",
"libc", "libc",
"panic_abort", "panic_abort",
@ -4932,18 +4919,6 @@ dependencies = [
"unicode-xid 0.2.0", "unicode-xid 0.2.0",
] ]
[[package]]
name = "synstructure"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02353edf96d6e4dc81aea2d8490a7e9db177bf8acb0e951c24940bf866cb313f"
dependencies = [
"proc-macro2 0.4.30",
"quote 0.6.12",
"syn 0.15.35",
"unicode-xid 0.1.0",
]
[[package]] [[package]]
name = "synstructure" name = "synstructure"
version = "0.12.1" version = "0.12.1"

View File

@ -314,6 +314,10 @@
# library. # library.
#debug-assertions = false #debug-assertions = false
# Whether or not debug assertions are enabled for the standard library.
# Overrides the `debug-assertions` option, if defined.
#debug-assertions-std = false
# Debuginfo level for most of Rust code, corresponds to the `-C debuginfo=N` option of `rustc`. # Debuginfo level for most of Rust code, corresponds to the `-C debuginfo=N` option of `rustc`.
# `0` - no debug info # `0` - no debug info
# `1` - line tables only # `1` - line tables only
@ -411,10 +415,6 @@
# sysroot. # sysroot.
#llvm-tools = false #llvm-tools = false
# Indicates whether LLDB will be made available in the sysroot.
# This is only built if LLVM is also being built.
#lldb = false
# Whether to deny warnings in crates # Whether to deny warnings in crates
#deny-warnings = true #deny-warnings = true

View File

@ -180,13 +180,16 @@ def format_build_time(duration):
def default_build_triple(): def default_build_triple():
"""Build triple as in LLVM""" """Build triple as in LLVM"""
default_encoding = sys.getdefaultencoding() default_encoding = sys.getdefaultencoding()
required = not sys.platform == 'win32' required = sys.platform != 'win32'
ostype = require(["uname", "-s"], exit=required).decode(default_encoding) ostype = require(["uname", "-s"], exit=required)
cputype = require(['uname', '-m'], exit=required).decode(default_encoding) cputype = require(['uname', '-m'], exit=required)
if ostype is None or cputype is None: if ostype is None or cputype is None:
return 'x86_64-pc-windows-msvc' return 'x86_64-pc-windows-msvc'
ostype = ostype.decode(default_encoding)
cputype = cputype.decode(default_encoding)
# The goal here is to come up with the same triple as LLVM would, # The goal here is to come up with the same triple as LLVM would,
# at least for the subset of platforms we're willing to target. # at least for the subset of platforms we're willing to target.
ostype_mapper = { ostype_mapper = {

View File

@ -439,7 +439,6 @@ impl<'a> Builder<'a> {
dist::Clippy, dist::Clippy,
dist::Miri, dist::Miri,
dist::LlvmTools, dist::LlvmTools,
dist::Lldb,
dist::Extended, dist::Extended,
dist::HashSign dist::HashSign
), ),
@ -916,7 +915,14 @@ impl<'a> Builder<'a> {
.env("RUSTC", self.out.join("bootstrap/debug/rustc")) .env("RUSTC", self.out.join("bootstrap/debug/rustc"))
.env("RUSTC_REAL", self.rustc(compiler)) .env("RUSTC_REAL", self.rustc(compiler))
.env("RUSTC_STAGE", stage.to_string()) .env("RUSTC_STAGE", stage.to_string())
.env("RUSTC_DEBUG_ASSERTIONS", self.config.rust_debug_assertions.to_string()) .env(
"RUSTC_DEBUG_ASSERTIONS",
if mode == Mode::Std {
self.config.rust_debug_assertions_std.to_string()
} else {
self.config.rust_debug_assertions.to_string()
},
)
.env("RUSTC_SYSROOT", &sysroot) .env("RUSTC_SYSROOT", &sysroot)
.env("RUSTC_LIBDIR", &libdir) .env("RUSTC_LIBDIR", &libdir)
.env("RUSTDOC", self.out.join("bootstrap/debug/rustdoc")) .env("RUSTDOC", self.out.join("bootstrap/debug/rustdoc"))

View File

@ -85,7 +85,6 @@ pub struct Config {
pub use_lld: bool, pub use_lld: bool,
pub lld_enabled: bool, pub lld_enabled: bool,
pub lldb_enabled: bool,
pub llvm_tools_enabled: bool, pub llvm_tools_enabled: bool,
pub llvm_cflags: Option<String>, pub llvm_cflags: Option<String>,
@ -98,6 +97,7 @@ pub struct Config {
pub rust_codegen_units: Option<u32>, pub rust_codegen_units: Option<u32>,
pub rust_codegen_units_std: Option<u32>, pub rust_codegen_units_std: Option<u32>,
pub rust_debug_assertions: bool, pub rust_debug_assertions: bool,
pub rust_debug_assertions_std: bool,
pub rust_debuginfo_level_rustc: u32, pub rust_debuginfo_level_rustc: u32,
pub rust_debuginfo_level_std: u32, pub rust_debuginfo_level_std: u32,
pub rust_debuginfo_level_tools: u32, pub rust_debuginfo_level_tools: u32,
@ -315,6 +315,7 @@ struct Rust {
codegen_units: Option<u32>, codegen_units: Option<u32>,
codegen_units_std: Option<u32>, codegen_units_std: Option<u32>,
debug_assertions: Option<bool>, debug_assertions: Option<bool>,
debug_assertions_std: Option<bool>,
debuginfo_level: Option<u32>, debuginfo_level: Option<u32>,
debuginfo_level_rustc: Option<u32>, debuginfo_level_rustc: Option<u32>,
debuginfo_level_std: Option<u32>, debuginfo_level_std: Option<u32>,
@ -337,7 +338,6 @@ struct Rust {
lld: Option<bool>, lld: Option<bool>,
use_lld: Option<bool>, use_lld: Option<bool>,
llvm_tools: Option<bool>, llvm_tools: Option<bool>,
lldb: Option<bool>,
deny_warnings: Option<bool>, deny_warnings: Option<bool>,
backtrace_on_ice: Option<bool>, backtrace_on_ice: Option<bool>,
verify_llvm_ir: Option<bool>, verify_llvm_ir: Option<bool>,
@ -520,6 +520,7 @@ impl Config {
let mut llvm_assertions = None; let mut llvm_assertions = None;
let mut debug = None; let mut debug = None;
let mut debug_assertions = None; let mut debug_assertions = None;
let mut debug_assertions_std = None;
let mut debuginfo_level = None; let mut debuginfo_level = None;
let mut debuginfo_level_rustc = None; let mut debuginfo_level_rustc = None;
let mut debuginfo_level_std = None; let mut debuginfo_level_std = None;
@ -562,6 +563,7 @@ impl Config {
if let Some(ref rust) = toml.rust { if let Some(ref rust) = toml.rust {
debug = rust.debug; debug = rust.debug;
debug_assertions = rust.debug_assertions; debug_assertions = rust.debug_assertions;
debug_assertions_std = rust.debug_assertions_std;
debuginfo_level = rust.debuginfo_level; debuginfo_level = rust.debuginfo_level;
debuginfo_level_rustc = rust.debuginfo_level_rustc; debuginfo_level_rustc = rust.debuginfo_level_rustc;
debuginfo_level_std = rust.debuginfo_level_std; debuginfo_level_std = rust.debuginfo_level_std;
@ -585,7 +587,6 @@ impl Config {
} }
set(&mut config.use_lld, rust.use_lld); set(&mut config.use_lld, rust.use_lld);
set(&mut config.lld_enabled, rust.lld); set(&mut config.lld_enabled, rust.lld);
set(&mut config.lldb_enabled, rust.lldb);
set(&mut config.llvm_tools_enabled, rust.llvm_tools); set(&mut config.llvm_tools_enabled, rust.llvm_tools);
config.rustc_parallel = rust.parallel_compiler.unwrap_or(false); config.rustc_parallel = rust.parallel_compiler.unwrap_or(false);
config.rustc_default_linker = rust.default_linker.clone(); config.rustc_default_linker = rust.default_linker.clone();
@ -661,6 +662,8 @@ impl Config {
let default = debug == Some(true); let default = debug == Some(true);
config.rust_debug_assertions = debug_assertions.unwrap_or(default); config.rust_debug_assertions = debug_assertions.unwrap_or(default);
config.rust_debug_assertions_std =
debug_assertions_std.unwrap_or(config.rust_debug_assertions);
let with_defaults = |debuginfo_level_specific: Option<u32>| { let with_defaults = |debuginfo_level_specific: Option<u32>| {
debuginfo_level_specific.or(debuginfo_level).unwrap_or(if debug == Some(true) { debuginfo_level_specific.or(debuginfo_level).unwrap_or(if debug == Some(true) {

View File

@ -57,7 +57,6 @@ o("cargo-native-static", "build.cargo-native-static", "static native libraries i
o("profiler", "build.profiler", "build the profiler runtime") o("profiler", "build.profiler", "build the profiler runtime")
o("full-tools", None, "enable all tools") o("full-tools", None, "enable all tools")
o("lld", "rust.lld", "build lld") o("lld", "rust.lld", "build lld")
o("lldb", "rust.lldb", "build lldb")
o("missing-tools", "dist.missing-tools", "allow failures when building tools") o("missing-tools", "dist.missing-tools", "allow failures when building tools")
o("use-libcxx", "llvm.use-libcxx", "build LLVM with libc++") o("use-libcxx", "llvm.use-libcxx", "build LLVM with libc++")
o("control-flow-guard", "rust.control-flow-guard", "Enable Control Flow Guard") o("control-flow-guard", "rust.control-flow-guard", "Enable Control Flow Guard")

View File

@ -38,8 +38,6 @@ pub fn pkgname(builder: &Builder<'_>, component: &str) -> String {
format!("{}-{}", component, builder.rustfmt_package_vers()) format!("{}-{}", component, builder.rustfmt_package_vers())
} else if component == "llvm-tools" { } else if component == "llvm-tools" {
format!("{}-{}", component, builder.llvm_tools_package_vers()) format!("{}-{}", component, builder.llvm_tools_package_vers())
} else if component == "lldb" {
format!("{}-{}", component, builder.lldb_package_vers())
} else { } else {
assert!(component.starts_with("rust")); assert!(component.starts_with("rust"));
format!("{}-{}", component, builder.rust_package_vers()) format!("{}-{}", component, builder.rust_package_vers())
@ -1645,7 +1643,6 @@ impl Step for Extended {
let llvm_tools_installer = builder.ensure(LlvmTools { target }); let llvm_tools_installer = builder.ensure(LlvmTools { target });
let clippy_installer = builder.ensure(Clippy { compiler, target }); let clippy_installer = builder.ensure(Clippy { compiler, target });
let miri_installer = builder.ensure(Miri { compiler, target }); let miri_installer = builder.ensure(Miri { compiler, target });
let lldb_installer = builder.ensure(Lldb { target });
let mingw_installer = builder.ensure(Mingw { host: target }); let mingw_installer = builder.ensure(Mingw { host: target });
let analysis_installer = builder.ensure(Analysis { compiler, target }); let analysis_installer = builder.ensure(Analysis { compiler, target });
@ -1681,7 +1678,6 @@ impl Step for Extended {
tarballs.extend(miri_installer.clone()); tarballs.extend(miri_installer.clone());
tarballs.extend(rustfmt_installer.clone()); tarballs.extend(rustfmt_installer.clone());
tarballs.extend(llvm_tools_installer); tarballs.extend(llvm_tools_installer);
tarballs.extend(lldb_installer);
tarballs.push(analysis_installer); tarballs.push(analysis_installer);
tarballs.push(std_installer); tarballs.push(std_installer);
if builder.config.docs { if builder.config.docs {
@ -2222,7 +2218,6 @@ impl Step for HashSign {
cmd.arg(builder.package_vers(&builder.release_num("miri"))); cmd.arg(builder.package_vers(&builder.release_num("miri")));
cmd.arg(builder.package_vers(&builder.release_num("rustfmt"))); cmd.arg(builder.package_vers(&builder.release_num("rustfmt")));
cmd.arg(builder.llvm_tools_package_vers()); cmd.arg(builder.llvm_tools_package_vers());
cmd.arg(builder.lldb_package_vers());
builder.create_dir(&distdir(builder)); builder.create_dir(&distdir(builder));
@ -2349,119 +2344,3 @@ impl Step for LlvmTools {
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target)))
} }
} }
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct Lldb {
pub target: Interned<String>,
}
impl Step for Lldb {
type Output = Option<PathBuf>;
const ONLY_HOSTS: bool = true;
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.path("src/llvm-project/lldb").path("src/tools/lldb")
}
fn make_run(run: RunConfig<'_>) {
run.builder.ensure(Lldb { target: run.target });
}
fn run(self, builder: &Builder<'_>) -> Option<PathBuf> {
let target = self.target;
if builder.config.dry_run {
return None;
}
let bindir = builder.llvm_out(target).join("bin");
let lldb_exe = bindir.join(exe("lldb", &target));
if !lldb_exe.exists() {
return None;
}
builder.info(&format!("Dist Lldb ({})", target));
let src = builder.src.join("src/llvm-project/lldb");
let name = pkgname(builder, "lldb");
let tmp = tmpdir(builder);
let image = tmp.join("lldb-image");
drop(fs::remove_dir_all(&image));
// Prepare the image directory
let root = image.join("lib/rustlib").join(&*target);
let dst = root.join("bin");
t!(fs::create_dir_all(&dst));
for program in &["lldb", "lldb-argdumper", "lldb-mi", "lldb-server"] {
let exe = bindir.join(exe(program, &target));
builder.install(&exe, &dst, 0o755);
}
// The libraries.
let libdir = builder.llvm_out(target).join("lib");
let dst = root.join("lib");
t!(fs::create_dir_all(&dst));
for entry in t!(fs::read_dir(&libdir)) {
let entry = entry.unwrap();
if let Ok(name) = entry.file_name().into_string() {
if name.starts_with("liblldb.") && !name.ends_with(".a") {
if t!(entry.file_type()).is_symlink() {
builder.copy_to_folder(&entry.path(), &dst);
} else {
builder.install(&entry.path(), &dst, 0o755);
}
}
}
}
// The lldb scripts might be installed in lib/python$version
// or in lib64/python$version. If lib64 exists, use it;
// otherwise lib.
let libdir = builder.llvm_out(target).join("lib64");
let (libdir, libdir_name) = if libdir.exists() {
(libdir, "lib64")
} else {
(builder.llvm_out(target).join("lib"), "lib")
};
for entry in t!(fs::read_dir(&libdir)) {
let entry = t!(entry);
if let Ok(name) = entry.file_name().into_string() {
if name.starts_with("python") {
let dst = root.join(libdir_name).join(entry.file_name());
t!(fs::create_dir_all(&dst));
builder.cp_r(&entry.path(), &dst);
break;
}
}
}
// Prepare the overlay
let overlay = tmp.join("lldb-overlay");
drop(fs::remove_dir_all(&overlay));
builder.create_dir(&overlay);
builder.install(&src.join("LICENSE.TXT"), &overlay, 0o644);
builder.create(&overlay.join("version"), &builder.lldb_vers());
// Generate the installer tarball
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--product-name=Rust")
.arg("--rel-manifest-dir=rustlib")
.arg("--success-message=lldb-installed.")
.arg("--image-dir")
.arg(&image)
.arg("--work-dir")
.arg(&tmpdir(builder))
.arg("--output-dir")
.arg(&distdir(builder))
.arg("--non-installed-overlay")
.arg(&overlay)
.arg(format!("--package-name={}-{}", name, target))
.arg("--legacy-manifest-dirs=rustlib,cargo")
.arg("--component-name=lldb-preview");
builder.run(&mut cmd);
Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target)))
}
}

View File

@ -503,6 +503,20 @@ Arguments:
} }
}; };
if let Subcommand::Check { .. } = &cmd {
if matches.opt_str("stage").is_some() {
println!("{}", "--stage not supported for x.py check, always treated as stage 0");
process::exit(1);
}
if matches.opt_str("keep-stage").is_some() {
println!(
"{}",
"--keep-stage not supported for x.py check, only one stage available"
);
process::exit(1);
}
}
Flags { Flags {
verbose: matches.opt_count("verbose"), verbose: matches.opt_count("verbose"),
stage: matches.opt_str("stage").map(|j| j.parse().expect("`stage` should be a number")), stage: matches.opt_str("stage").map(|j| j.parse().expect("`stage` should be a number")),

View File

@ -23,7 +23,7 @@ fn rustfmt(src: &Path, rustfmt: &Path, path: &Path, check: bool) {
if !status.success() { if !status.success() {
eprintln!( eprintln!(
"Running `{}` failed.\nIf you're running `tidy`, \ "Running `{}` failed.\nIf you're running `tidy`, \
try again with `--bless` flag. Or, you just want to format \ try again with `--bless`. Or, if you just want to format \
code, run `./x.py fmt` instead.", code, run `./x.py fmt` instead.",
cmd_debug, cmd_debug,
); );

View File

@ -1029,14 +1029,6 @@ impl Build {
self.rust_version() self.rust_version()
} }
fn lldb_package_vers(&self) -> String {
self.package_vers(channel::CFG_RELEASE_NUM)
}
fn lldb_vers(&self) -> String {
self.rust_version()
}
fn llvm_link_tools_dynamically(&self, target: Interned<String>) -> bool { fn llvm_link_tools_dynamically(&self, target: Interned<String>) -> bool {
target.contains("linux-gnu") || target.contains("apple-darwin") target.contains("linux-gnu") || target.contains("apple-darwin")
} }

View File

@ -184,7 +184,7 @@ impl Step for Llvm {
} }
// For distribution we want the LLVM tools to be *statically* linked to libstdc++ // For distribution we want the LLVM tools to be *statically* linked to libstdc++
if builder.config.llvm_tools_enabled || builder.config.lldb_enabled { if builder.config.llvm_tools_enabled {
if !target.contains("msvc") { if !target.contains("msvc") {
if target.contains("apple") { if target.contains("apple") {
cfg.define("CMAKE_EXE_LINKER_FLAGS", "-static-libstdc++"); cfg.define("CMAKE_EXE_LINKER_FLAGS", "-static-libstdc++");
@ -212,17 +212,9 @@ impl Step for Llvm {
enabled_llvm_projects.push("compiler-rt"); enabled_llvm_projects.push("compiler-rt");
} }
if builder.config.lldb_enabled { // We want libxml to be disabled.
enabled_llvm_projects.push("clang"); // See https://github.com/rust-lang/rust/pull/50104
enabled_llvm_projects.push("lldb"); cfg.define("LLVM_ENABLE_LIBXML2", "OFF");
// For the time being, disable code signing.
cfg.define("LLDB_CODESIGN_IDENTITY", "");
cfg.define("LLDB_NO_DEBUGSERVER", "ON");
} else {
// LLDB requires libxml2; but otherwise we want it to be disabled.
// See https://github.com/rust-lang/rust/pull/50104
cfg.define("LLVM_ENABLE_LIBXML2", "OFF");
}
if !enabled_llvm_projects.is_empty() { if !enabled_llvm_projects.is_empty() {
enabled_llvm_projects.sort(); enabled_llvm_projects.sort();

View File

@ -117,14 +117,6 @@ pub fn check(build: &mut Build) {
build.config.ninja = true; build.config.ninja = true;
} }
} }
if build.config.lldb_enabled {
cmd_finder.must_have("swig");
let out = output(Command::new("swig").arg("-version"));
if !out.contains("SWIG Version 3") && !out.contains("SWIG Version 4") {
panic!("Ensure that Swig 3.x.x or 4.x.x is installed.");
}
}
} }
build.config.python = build build.config.python = build

View File

@ -436,7 +436,6 @@ impl Step for Miri {
// miri tests need to know about the stage sysroot // miri tests need to know about the stage sysroot
cargo.env("MIRI_SYSROOT", miri_sysroot); cargo.env("MIRI_SYSROOT", miri_sysroot);
cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler));
cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler)); cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler));
cargo.env("MIRI_PATH", miri); cargo.env("MIRI_PATH", miri);
@ -1097,20 +1096,15 @@ impl Step for Compiletest {
.to_string() .to_string()
}) })
}; };
let lldb_exe = if builder.config.lldb_enabled { let lldb_exe = "lldb";
// Test against the lldb that was just built. let lldb_version = Command::new(lldb_exe)
builder.llvm_out(target).join("bin").join("lldb")
} else {
PathBuf::from("lldb")
};
let lldb_version = Command::new(&lldb_exe)
.arg("--version") .arg("--version")
.output() .output()
.map(|output| String::from_utf8_lossy(&output.stdout).to_string()) .map(|output| String::from_utf8_lossy(&output.stdout).to_string())
.ok(); .ok();
if let Some(ref vers) = lldb_version { if let Some(ref vers) = lldb_version {
cmd.arg("--lldb-version").arg(vers); cmd.arg("--lldb-version").arg(vers);
let lldb_python_dir = run(Command::new(&lldb_exe).arg("-P")).ok(); let lldb_python_dir = run(Command::new(lldb_exe).arg("-P")).ok();
if let Some(ref dir) = lldb_python_dir { if let Some(ref dir) = lldb_python_dir {
cmd.arg("--lldb-python-dir").arg(dir); cmd.arg("--lldb-python-dir").arg(dir);
} }

View File

@ -17,6 +17,8 @@ dist=$objdir/build/dist
source "$ci_dir/shared.sh" source "$ci_dir/shared.sh"
CACHE_DOMAIN="${CACHE_DOMAIN:-ci-caches.rust-lang.org}"
if [ -f "$docker_dir/$image/Dockerfile" ]; then if [ -f "$docker_dir/$image/Dockerfile" ]; then
if [ "$CI" != "" ]; then if [ "$CI" != "" ]; then
hash_key=/tmp/.docker-hash-key.txt hash_key=/tmp/.docker-hash-key.txt
@ -38,9 +40,7 @@ if [ -f "$docker_dir/$image/Dockerfile" ]; then
cksum=$(sha512sum $hash_key | \ cksum=$(sha512sum $hash_key | \
awk '{print $1}') awk '{print $1}')
s3url="s3://$SCCACHE_BUCKET/docker/$cksum" url="https://$CACHE_DOMAIN/docker/$cksum"
url="https://$SCCACHE_BUCKET.s3.amazonaws.com/docker/$cksum"
upload="aws s3 cp - $s3url"
echo "Attempting to download $url" echo "Attempting to download $url"
rm -f /tmp/rustci_docker_cache rm -f /tmp/rustci_docker_cache
@ -65,7 +65,9 @@ if [ -f "$docker_dir/$image/Dockerfile" ]; then
-f "$dockerfile" \ -f "$dockerfile" \
"$context" "$context"
if [ "$upload" != "" ]; then if [ "$CI" != "" ]; then
s3url="s3://$SCCACHE_BUCKET/docker/$cksum"
upload="aws s3 cp - $s3url"
digest=$(docker inspect rust-ci --format '{{.Id}}') digest=$(docker inspect rust-ci --format '{{.Id}}')
echo "Built container $digest" echo "Built container $digest"
if ! grep -q "$digest" <(echo "$loaded_images"); then if ! grep -q "$digest" <(echo "$loaded_images"); then

View File

@ -37,6 +37,7 @@ x--expand-yaml-anchors--remove:
- &public-variables - &public-variables
SCCACHE_BUCKET: rust-lang-gha-caches SCCACHE_BUCKET: rust-lang-gha-caches
TOOLSTATE_REPO: https://github.com/pietroalbini/rust-toolstate TOOLSTATE_REPO: https://github.com/pietroalbini/rust-toolstate
CACHE_DOMAIN: ci-caches-gha.rust-lang.org
- &prod-variables - &prod-variables
SCCACHE_BUCKET: rust-lang-gha-caches SCCACHE_BUCKET: rust-lang-gha-caches
@ -51,6 +52,7 @@ x--expand-yaml-anchors--remove:
# (caches, artifacts...). # (caches, artifacts...).
CACHES_AWS_ACCESS_KEY_ID: AKIA46X5W6CZOMUQATD5 CACHES_AWS_ACCESS_KEY_ID: AKIA46X5W6CZOMUQATD5
ARTIFACTS_AWS_ACCESS_KEY_ID: AKIA46X5W6CZH5AYXDVF ARTIFACTS_AWS_ACCESS_KEY_ID: AKIA46X5W6CZH5AYXDVF
CACHE_DOMAIN: ci-caches-gha.rust-lang.org
- &base-job - &base-job
env: {} env: {}

@ -1 +1 @@
Subproject commit e37c0e84e2ef73d3a4ebffda8011db6814a3b02d Subproject commit 6247be15a7f7509559f7981ee2209b9e0cc121df

@ -1 +1 @@
Subproject commit 8204c1d123472cd17f0c1c5c77300ae802eb0271 Subproject commit 49270740c7a4bff2763e6bc730b191d45b7d5167

@ -1 +1 @@
Subproject commit 40beccdf1bb8eb9184a2e3b42db8b8c6e394247f Subproject commit 366c50a03bed928589771eba8a6f18e0c0c01d23

@ -1 +1 @@
Subproject commit 4d2d275997746d35eabfc4d992dfbdcce2f626ed Subproject commit d1517d4e3f29264c5c67bce2658516bb5202c800

@ -1 +1 @@
Subproject commit ed22e6fbfcb6ce436e9ea3b4bb4a55b2fb50a57e Subproject commit 892b928b565e35d25b6f9c47faee03b94bc41489

@ -1 +1 @@
Subproject commit ffc99581689fe2455908aaef5f5cf50dd03bb8f5 Subproject commit ab072b14393cbd9e8a1d1d75879bf51e27217bbb

View File

@ -464,7 +464,15 @@ machine. Each target has a default base CPU.
Individual targets will support different features; this flag lets you control Individual targets will support different features; this flag lets you control
enabling or disabling a feature. Each feature should be prefixed with a `+` to enabling or disabling a feature. Each feature should be prefixed with a `+` to
enable it or `-` to disable it. Separate multiple features with commas. enable it or `-` to disable it.
Features from multiple `-C target-feature` options are combined. \
Multiple features can be specified in a single option by separating them
with commas - `-C target-feature=+x,-y`. \
If some feature is specified more than once with both `+` and `-`,
then values passed later override values passed earlier. \
For example, `-C target-feature=+x,-y,+z -Ctarget-feature=-x,+y`
is equivalent to `-C target-feature=-x,+y,+z`.
To see the valid options and an example of use, run `rustc --print To see the valid options and an example of use, run `rustc --print
target-features`. target-features`.

View File

@ -23,7 +23,7 @@ fn allocate_zeroed() {
} }
#[bench] #[bench]
#[cfg_attr(miri, ignore)] // Miri does not support benchmarks #[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn alloc_owned_small(b: &mut Bencher) { fn alloc_owned_small(b: &mut Bencher) {
b.iter(|| { b.iter(|| {
let _: Box<_> = box 10; let _: Box<_> = box 10;

View File

@ -215,59 +215,6 @@ impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> {
clone_subtree(self.root.as_ref().unwrap().as_ref()) clone_subtree(self.root.as_ref().unwrap().as_ref())
} }
} }
fn clone_from(&mut self, other: &Self) {
BTreeClone::clone_from(self, other);
}
}
trait BTreeClone {
fn clone_from(&mut self, other: &Self);
}
impl<K: Clone, V: Clone> BTreeClone for BTreeMap<K, V> {
default fn clone_from(&mut self, other: &Self) {
*self = other.clone();
}
}
impl<K: Clone + Ord, V: Clone> BTreeClone for BTreeMap<K, V> {
fn clone_from(&mut self, other: &Self) {
// This truncates `self` to `other.len()` by calling `split_off` on
// the first key after `other.len()` elements if it exists.
let split_off_key = if self.len() > other.len() {
let diff = self.len() - other.len();
if diff <= other.len() {
self.iter().nth_back(diff - 1).map(|pair| (*pair.0).clone())
} else {
self.iter().nth(other.len()).map(|pair| (*pair.0).clone())
}
} else {
None
};
if let Some(key) = split_off_key {
self.split_off(&key);
}
let mut siter = self.range_mut(..);
let mut oiter = other.iter();
// After truncation, `self` is at most as long as `other` so this loop
// replaces every key-value pair in `self`. Since `oiter` is in sorted
// order and the structure of the `BTreeMap` stays the same,
// the BTree invariants are maintained at the end of the loop.
while !siter.is_empty() {
if let Some((ok, ov)) = oiter.next() {
// SAFETY: This is safe because `siter` is nonempty.
let (sk, sv) = unsafe { siter.next_unchecked() };
sk.clone_from(ok);
sv.clone_from(ov);
} else {
break;
}
}
// If `other` is longer than `self`, the remaining elements are inserted.
self.extend(oiter.map(|(k, v)| ((*k).clone(), (*v).clone())));
}
} }
impl<K, Q: ?Sized> super::Recover<Q> for BTreeMap<K, ()> impl<K, Q: ?Sized> super::Recover<Q> for BTreeMap<K, ()>

View File

@ -182,7 +182,6 @@ fn test_insert_prev() {
#[test] #[test]
#[cfg_attr(target_os = "emscripten", ignore)] #[cfg_attr(target_os = "emscripten", ignore)]
#[cfg_attr(miri, ignore)] // Miri does not support threads
fn test_send() { fn test_send() {
let n = list_from(&[1, 2, 3]); let n = list_from(&[1, 2, 3]);
thread::spawn(move || { thread::spawn(move || {

View File

@ -1354,7 +1354,9 @@ impl<T> VecDeque<T> {
/// ``` /// ```
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
pub fn push_front(&mut self, value: T) { pub fn push_front(&mut self, value: T) {
self.grow_if_necessary(); if self.is_full() {
self.grow();
}
self.tail = self.wrap_sub(self.tail, 1); self.tail = self.wrap_sub(self.tail, 1);
let tail = self.tail; let tail = self.tail;
@ -1377,7 +1379,9 @@ impl<T> VecDeque<T> {
/// ``` /// ```
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
pub fn push_back(&mut self, value: T) { pub fn push_back(&mut self, value: T) {
self.grow_if_necessary(); if self.is_full() {
self.grow();
}
let head = self.head; let head = self.head;
self.head = self.wrap_add(self.head, 1); self.head = self.wrap_add(self.head, 1);
@ -1485,7 +1489,9 @@ impl<T> VecDeque<T> {
#[stable(feature = "deque_extras_15", since = "1.5.0")] #[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn insert(&mut self, index: usize, value: T) { pub fn insert(&mut self, index: usize, value: T) {
assert!(index <= self.len(), "index out of bounds"); assert!(index <= self.len(), "index out of bounds");
self.grow_if_necessary(); if self.is_full() {
self.grow();
}
// Move the least number of elements in the ring buffer and insert // Move the least number of elements in the ring buffer and insert
// the given object // the given object
@ -2003,11 +2009,13 @@ impl<T> VecDeque<T> {
} }
// This may panic or abort // This may panic or abort
#[inline] #[inline(never)]
fn grow_if_necessary(&mut self) { fn grow(&mut self) {
if self.is_full() { if self.is_full() {
let old_cap = self.cap(); let old_cap = self.cap();
self.buf.double(); // Double the buffer size.
self.buf.reserve_exact(old_cap, old_cap);
assert!(self.cap() == old_cap * 2);
unsafe { unsafe {
self.handle_capacity_increase(old_cap); self.handle_capacity_increase(old_cap);
} }

View File

@ -3,7 +3,7 @@ use super::*;
use test; use test;
#[bench] #[bench]
#[cfg_attr(miri, ignore)] // Miri does not support benchmarks #[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_push_back_100(b: &mut test::Bencher) { fn bench_push_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101); let mut deq = VecDeque::with_capacity(101);
b.iter(|| { b.iter(|| {
@ -16,7 +16,7 @@ fn bench_push_back_100(b: &mut test::Bencher) {
} }
#[bench] #[bench]
#[cfg_attr(miri, ignore)] // Miri does not support benchmarks #[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_push_front_100(b: &mut test::Bencher) { fn bench_push_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101); let mut deq = VecDeque::with_capacity(101);
b.iter(|| { b.iter(|| {
@ -29,7 +29,7 @@ fn bench_push_front_100(b: &mut test::Bencher) {
} }
#[bench] #[bench]
#[cfg_attr(miri, ignore)] // Miri does not support benchmarks #[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_pop_back_100(b: &mut test::Bencher) { fn bench_pop_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101); let mut deq = VecDeque::<i32>::with_capacity(101);
@ -43,7 +43,7 @@ fn bench_pop_back_100(b: &mut test::Bencher) {
} }
#[bench] #[bench]
#[cfg_attr(miri, ignore)] // Miri does not support benchmarks #[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_pop_front_100(b: &mut test::Bencher) { fn bench_pop_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101); let mut deq = VecDeque::<i32>::with_capacity(101);

View File

@ -109,7 +109,7 @@
#![feature(ptr_offset_from)] #![feature(ptr_offset_from)]
#![feature(rustc_attrs)] #![feature(rustc_attrs)]
#![feature(receiver_trait)] #![feature(receiver_trait)]
#![feature(specialization)] #![feature(min_specialization)]
#![feature(staged_api)] #![feature(staged_api)]
#![feature(std_internals)] #![feature(std_internals)]
#![feature(str_internals)] #![feature(str_internals)]

View File

@ -1,7 +1,7 @@
#![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")] #![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")]
#![doc(hidden)] #![doc(hidden)]
use core::alloc::MemoryBlock; use core::alloc::{LayoutErr, MemoryBlock};
use core::cmp; use core::cmp;
use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::ops::Drop; use core::ops::Drop;
@ -211,82 +211,6 @@ impl<T, A: AllocRef> RawVec<T, A> {
} }
} }
/// Doubles the size of the type's backing allocation. This is common enough
/// to want to do that it's easiest to just have a dedicated method. Slightly
/// more efficient logic can be provided for this than the general case.
///
/// This function is ideal for when pushing elements one-at-a-time because
/// you don't need to incur the costs of the more general computations
/// reserve needs to do to guard against overflow. You do however need to
/// manually check if your `len == capacity`.
///
/// # Panics
///
/// * Panics if `T` is zero-sized on the assumption that you managed to exhaust
/// all `usize::MAX` slots in your imaginary buffer.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
///
/// # Examples
///
/// ```
/// # #![feature(raw_vec_internals)]
/// # extern crate alloc;
/// # use std::ptr;
/// # use alloc::raw_vec::RawVec;
/// struct MyVec<T> {
/// buf: RawVec<T>,
/// len: usize,
/// }
///
/// impl<T> MyVec<T> {
/// pub fn push(&mut self, elem: T) {
/// if self.len == self.buf.capacity() { self.buf.double(); }
/// // double would have aborted or panicked if the len exceeded
/// // `isize::MAX` so this is safe to do unchecked now.
/// unsafe {
/// ptr::write(self.buf.ptr().add(self.len), elem);
/// }
/// self.len += 1;
/// }
/// }
/// # fn main() {
/// # let mut vec = MyVec { buf: RawVec::new(), len: 0 };
/// # vec.push(1);
/// # }
/// ```
#[inline(never)]
#[cold]
pub fn double(&mut self) {
match self.grow(Double, MayMove, Uninitialized) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
}
}
/// Attempts to double the size of the type's backing allocation in place. This is common
/// enough to want to do that it's easiest to just have a dedicated method. Slightly
/// more efficient logic can be provided for this than the general case.
///
/// Returns `true` if the reallocation attempt has succeeded.
///
/// # Panics
///
/// * Panics if `T` is zero-sized on the assumption that you managed to exhaust
/// all `usize::MAX` slots in your imaginary buffer.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
#[inline(never)]
#[cold]
pub fn double_in_place(&mut self) -> bool {
self.grow(Double, InPlace, Uninitialized).is_ok()
}
/// Ensures that the buffer contains at least enough space to hold /// Ensures that the buffer contains at least enough space to hold
/// `used_capacity + needed_extra_capacity` elements. If it doesn't already have /// `used_capacity + needed_extra_capacity` elements. If it doesn't already have
/// enough capacity, will reallocate enough space plus comfortable slack /// enough capacity, will reallocate enough space plus comfortable slack
@ -354,7 +278,7 @@ impl<T, A: AllocRef> RawVec<T, A> {
needed_extra_capacity: usize, needed_extra_capacity: usize,
) -> Result<(), TryReserveError> { ) -> Result<(), TryReserveError> {
if self.needs_to_grow(used_capacity, needed_extra_capacity) { if self.needs_to_grow(used_capacity, needed_extra_capacity) {
self.grow(Amortized { used_capacity, needed_extra_capacity }, MayMove, Uninitialized) self.grow_amortized(used_capacity, needed_extra_capacity, MayMove)
} else { } else {
Ok(()) Ok(())
} }
@ -381,8 +305,7 @@ impl<T, A: AllocRef> RawVec<T, A> {
// This is more readable than putting this in one line: // This is more readable than putting this in one line:
// `!self.needs_to_grow(...) || self.grow(...).is_ok()` // `!self.needs_to_grow(...) || self.grow(...).is_ok()`
if self.needs_to_grow(used_capacity, needed_extra_capacity) { if self.needs_to_grow(used_capacity, needed_extra_capacity) {
self.grow(Amortized { used_capacity, needed_extra_capacity }, InPlace, Uninitialized) self.grow_amortized(used_capacity, needed_extra_capacity, InPlace).is_ok()
.is_ok()
} else { } else {
true true
} }
@ -423,7 +346,7 @@ impl<T, A: AllocRef> RawVec<T, A> {
needed_extra_capacity: usize, needed_extra_capacity: usize,
) -> Result<(), TryReserveError> { ) -> Result<(), TryReserveError> {
if self.needs_to_grow(used_capacity, needed_extra_capacity) { if self.needs_to_grow(used_capacity, needed_extra_capacity) {
self.grow(Exact { used_capacity, needed_extra_capacity }, MayMove, Uninitialized) self.grow_exact(used_capacity, needed_extra_capacity)
} else { } else {
Ok(()) Ok(())
} }
@ -448,14 +371,6 @@ impl<T, A: AllocRef> RawVec<T, A> {
} }
} }
#[derive(Copy, Clone)]
enum Strategy {
Double,
Amortized { used_capacity: usize, needed_extra_capacity: usize },
Exact { used_capacity: usize, needed_extra_capacity: usize },
}
use Strategy::*;
impl<T, A: AllocRef> RawVec<T, A> { impl<T, A: AllocRef> RawVec<T, A> {
/// Returns if the buffer needs to grow to fulfill the needed extra capacity. /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
/// Mainly used to make inlining reserve-calls possible without inlining `grow`. /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
@ -473,68 +388,59 @@ impl<T, A: AllocRef> RawVec<T, A> {
self.cap = Self::capacity_from_bytes(memory.size); self.cap = Self::capacity_from_bytes(memory.size);
} }
/// Single method to handle all possibilities of growing the buffer. // This method is usually instantiated many times. So we want it to be as
fn grow( // small as possible, to improve compile times. But we also want as much of
// its contents to be statically computable as possible, to make the
// generated code run faster. Therefore, this method is carefully written
// so that all of the code that depends on `T` is within it, while as much
// of the code that doesn't depend on `T` as possible is in functions that
// are non-generic over `T`.
fn grow_amortized(
&mut self, &mut self,
strategy: Strategy, used_capacity: usize,
needed_extra_capacity: usize,
placement: ReallocPlacement, placement: ReallocPlacement,
init: AllocInit,
) -> Result<(), TryReserveError> { ) -> Result<(), TryReserveError> {
let elem_size = mem::size_of::<T>(); if mem::size_of::<T>() == 0 {
if elem_size == 0 {
// Since we return a capacity of `usize::MAX` when `elem_size` is // Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull. // 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow); return Err(CapacityOverflow);
} }
let new_layout = match strategy {
Double => unsafe {
// Since we guarantee that we never allocate more than `isize::MAX` bytes,
// `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow.
// Additionally the alignment will never be too large as to "not be satisfiable",
// so `Layout::from_size_align` will always return `Some`.
//
// TL;DR, we bypass runtime checks due to dynamic assertions in this module,
// allowing us to use `from_size_align_unchecked`.
let cap = if self.cap == 0 {
// Skip to 4 because tiny `Vec`'s are dumb; but not if that would cause overflow.
if elem_size > usize::MAX / 8 { 1 } else { 4 }
} else {
self.cap * 2
};
Layout::from_size_align_unchecked(cap * elem_size, mem::align_of::<T>())
},
Amortized { used_capacity, needed_extra_capacity } => {
// Nothing we can really do about these checks, sadly.
let required_cap =
used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
// Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
let double_cap = self.cap * 2;
// `double_cap` guarantees exponential growth.
let cap = cmp::max(double_cap, required_cap);
Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?
}
Exact { used_capacity, needed_extra_capacity } => {
let cap =
used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?
}
};
alloc_guard(new_layout.size())?;
let memory = if let Some((ptr, old_layout)) = self.current_memory() { // Nothing we can really do about these checks, sadly.
debug_assert_eq!(old_layout.align(), new_layout.align()); let required_cap =
unsafe { used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
self.alloc // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
.grow(ptr, old_layout, new_layout.size(), placement, init) let double_cap = self.cap * 2;
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? // `double_cap` guarantees exponential growth.
} let cap = cmp::max(double_cap, required_cap);
} else { let new_layout = Layout::array::<T>(cap);
match placement {
MayMove => self.alloc.alloc(new_layout, init), // `finish_grow` is non-generic over `T`.
InPlace => Err(AllocErr), let memory = finish_grow(new_layout, placement, self.current_memory(), &mut self.alloc)?;
} self.set_memory(memory);
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? Ok(())
}; }
// The constraints on this method are much the same as those on
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
fn grow_exact(
&mut self,
used_capacity: usize,
needed_extra_capacity: usize,
) -> Result<(), TryReserveError> {
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow);
}
let cap = used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let memory = finish_grow(new_layout, MayMove, self.current_memory(), &mut self.alloc)?;
self.set_memory(memory); self.set_memory(memory);
Ok(()) Ok(())
} }
@ -562,6 +468,38 @@ impl<T, A: AllocRef> RawVec<T, A> {
} }
} }
// This function is outside `RawVec` to minimize compile times. See the comment
// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
// significant, because the number of different `A` types seen in practice is
// much smaller than the number of `T` types.)
fn finish_grow<A>(
new_layout: Result<Layout, LayoutErr>,
placement: ReallocPlacement,
current_memory: Option<(NonNull<u8>, Layout)>,
alloc: &mut A,
) -> Result<MemoryBlock, TryReserveError>
where
A: AllocRef,
{
// Check for the error here to minimize the size of `RawVec::grow_*`.
let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let memory = if let Some((ptr, old_layout)) = current_memory {
debug_assert_eq!(old_layout.align(), new_layout.align());
unsafe { alloc.grow(ptr, old_layout, new_layout.size(), placement, Uninitialized) }
} else {
match placement {
MayMove => alloc.alloc(new_layout, Uninitialized),
InPlace => Err(AllocErr),
}
}
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?;
Ok(memory)
}
impl<T> RawVec<T, Global> { impl<T> RawVec<T, Global> {
/// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`. /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
/// ///

View File

@ -249,7 +249,7 @@ use core::mem::{self, align_of, align_of_val, forget, size_of_val};
use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver}; use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
use core::pin::Pin; use core::pin::Pin;
use core::ptr::{self, NonNull}; use core::ptr::{self, NonNull};
use core::slice::{self, from_raw_parts_mut}; use core::slice::from_raw_parts_mut;
use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout}; use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
use crate::string::String; use crate::string::String;
@ -1221,6 +1221,12 @@ impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> {
} }
} }
// Hack to allow specializing on `Eq` even though `Eq` has a method.
#[rustc_unsafe_specialization_marker]
pub(crate) trait MarkerEq: PartialEq<Self> {}
impl<T: Eq> MarkerEq for T {}
/// We're doing this specialization here, and not as a more general optimization on `&T`, because it /// We're doing this specialization here, and not as a more general optimization on `&T`, because it
/// would otherwise add a cost to all equality checks on refs. We assume that `Rc`s are used to /// would otherwise add a cost to all equality checks on refs. We assume that `Rc`s are used to
/// store large values, that are slow to clone, but also heavy to check for equality, causing this /// store large values, that are slow to clone, but also heavy to check for equality, causing this
@ -1229,7 +1235,7 @@ impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> {
/// ///
/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive. /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> RcEqIdent<T> for Rc<T> { impl<T: ?Sized + MarkerEq> RcEqIdent<T> for Rc<T> {
#[inline] #[inline]
fn eq(&self, other: &Rc<T>) -> bool { fn eq(&self, other: &Rc<T>) -> bool {
Rc::ptr_eq(self, other) || **self == **other Rc::ptr_eq(self, other) || **self == **other
@ -1548,25 +1554,25 @@ impl<T> iter::FromIterator<T> for Rc<[T]> {
/// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>()); /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
/// ``` /// ```
fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self { fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
RcFromIter::from_iter(iter.into_iter()) ToRcSlice::to_rc_slice(iter.into_iter())
} }
} }
/// Specialization trait used for collecting into `Rc<[T]>`. /// Specialization trait used for collecting into `Rc<[T]>`.
trait RcFromIter<T, I> { trait ToRcSlice<T>: Iterator<Item = T> + Sized {
fn from_iter(iter: I) -> Self; fn to_rc_slice(self) -> Rc<[T]>;
} }
impl<T, I: Iterator<Item = T>> RcFromIter<T, I> for Rc<[T]> { impl<T, I: Iterator<Item = T>> ToRcSlice<T> for I {
default fn from_iter(iter: I) -> Self { default fn to_rc_slice(self) -> Rc<[T]> {
iter.collect::<Vec<T>>().into() self.collect::<Vec<T>>().into()
} }
} }
impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]> { impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I {
default fn from_iter(iter: I) -> Self { fn to_rc_slice(self) -> Rc<[T]> {
// This is the case for a `TrustedLen` iterator. // This is the case for a `TrustedLen` iterator.
let (low, high) = iter.size_hint(); let (low, high) = self.size_hint();
if let Some(high) = high { if let Some(high) = high {
debug_assert_eq!( debug_assert_eq!(
low, low,
@ -1577,29 +1583,15 @@ impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]> {
unsafe { unsafe {
// SAFETY: We need to ensure that the iterator has an exact length and we have. // SAFETY: We need to ensure that the iterator has an exact length and we have.
Rc::from_iter_exact(iter, low) Rc::from_iter_exact(self, low)
} }
} else { } else {
// Fall back to normal implementation. // Fall back to normal implementation.
iter.collect::<Vec<T>>().into() self.collect::<Vec<T>>().into()
} }
} }
} }
impl<'a, T: 'a + Clone> RcFromIter<&'a T, slice::Iter<'a, T>> for Rc<[T]> {
fn from_iter(iter: slice::Iter<'a, T>) -> Self {
// Delegate to `impl<T: Clone> From<&[T]> for Rc<[T]>`.
//
// In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
// which is even more performant.
//
// In the fall-back case we have `T: Clone`. This is still better
// than the `TrustedLen` implementation as slices have a known length
// and so we get to avoid calling `size_hint` and avoid the branching.
iter.as_slice().into()
}
}
/// `Weak` is a version of [`Rc`] that holds a non-owning reference to the /// `Weak` is a version of [`Rc`] that holds a non-owning reference to the
/// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak` /// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
/// pointer, which returns an [`Option`]`<`[`Rc`]`<T>>`. /// pointer, which returns an [`Option`]`<`[`Rc`]`<T>>`.
@ -2035,6 +2027,8 @@ trait RcBoxPtr<T: ?Sized> {
// nevertheless, we insert an abort here to hint LLVM at // nevertheless, we insert an abort here to hint LLVM at
// an otherwise missed optimization. // an otherwise missed optimization.
if strong == 0 || strong == usize::max_value() { if strong == 0 || strong == usize::max_value() {
// remove `unsafe` on bootstrap bump
#[cfg_attr(not(bootstrap), allow(unused_unsafe))]
unsafe { unsafe {
abort(); abort();
} }
@ -2061,6 +2055,8 @@ trait RcBoxPtr<T: ?Sized> {
// nevertheless, we insert an abort here to hint LLVM at // nevertheless, we insert an abort here to hint LLVM at
// an otherwise missed optimization. // an otherwise missed optimization.
if weak == 0 || weak == usize::max_value() { if weak == 0 || weak == usize::max_value() {
// remove `unsafe` on bootstrap bump
#[cfg_attr(not(bootstrap), allow(unused_unsafe))]
unsafe { unsafe {
abort(); abort();
} }

View File

@ -20,7 +20,7 @@ use core::mem::{self, align_of, align_of_val, size_of_val};
use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver}; use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
use core::pin::Pin; use core::pin::Pin;
use core::ptr::{self, NonNull}; use core::ptr::{self, NonNull};
use core::slice::{self, from_raw_parts_mut}; use core::slice::from_raw_parts_mut;
use core::sync::atomic; use core::sync::atomic;
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
@ -835,12 +835,14 @@ impl<T: ?Sized> Arc<T> {
/// ///
/// unsafe { /// unsafe {
/// let ptr = Arc::into_raw(five); /// let ptr = Arc::into_raw(five);
/// Arc::decr_strong_count(ptr); /// Arc::incr_strong_count(ptr);
/// ///
/// // This assertion is deterministic because we haven't shared /// // Those assertions are deterministic because we haven't shared
/// // the `Arc` between threads. /// // the `Arc` between threads.
/// let five = Arc::from_raw(ptr); /// let five = Arc::from_raw(ptr);
/// assert_eq!(0, Arc::strong_count(&five)); /// assert_eq!(2, Arc::strong_count(&five));
/// Arc::decr_strong_count(ptr);
/// assert_eq!(1, Arc::strong_count(&five));
/// } /// }
/// ``` /// ```
#[inline] #[inline]
@ -1094,6 +1096,8 @@ impl<T: ?Sized> Clone for Arc<T> {
// We abort because such a program is incredibly degenerate, and we // We abort because such a program is incredibly degenerate, and we
// don't care to support it. // don't care to support it.
if old_size > MAX_REFCOUNT { if old_size > MAX_REFCOUNT {
// remove `unsafe` on bootstrap bump
#[cfg_attr(not(bootstrap), allow(unused_unsafe))]
unsafe { unsafe {
abort(); abort();
} }
@ -1612,6 +1616,8 @@ impl<T: ?Sized> Weak<T> {
// See comments in `Arc::clone` for why we do this (for `mem::forget`). // See comments in `Arc::clone` for why we do this (for `mem::forget`).
if n > MAX_REFCOUNT { if n > MAX_REFCOUNT {
// remove `unsafe` on bootstrap bump
#[cfg_attr(not(bootstrap), allow(unused_unsafe))]
unsafe { unsafe {
abort(); abort();
} }
@ -1751,6 +1757,7 @@ impl<T: ?Sized> Clone for Weak<T> {
// See comments in Arc::clone() for why we do this (for mem::forget). // See comments in Arc::clone() for why we do this (for mem::forget).
if old_size > MAX_REFCOUNT { if old_size > MAX_REFCOUNT {
#[cfg_attr(not(bootstrap), allow(unused_unsafe))] // remove `unsafe` on bootstrap bump
unsafe { unsafe {
abort(); abort();
} }
@ -1852,7 +1859,7 @@ impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
/// ///
/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive. /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> ArcEqIdent<T> for Arc<T> { impl<T: ?Sized + crate::rc::MarkerEq> ArcEqIdent<T> for Arc<T> {
#[inline] #[inline]
fn eq(&self, other: &Arc<T>) -> bool { fn eq(&self, other: &Arc<T>) -> bool {
Arc::ptr_eq(self, other) || **self == **other Arc::ptr_eq(self, other) || **self == **other
@ -2178,25 +2185,25 @@ impl<T> iter::FromIterator<T> for Arc<[T]> {
/// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>()); /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
/// ``` /// ```
fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self { fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
ArcFromIter::from_iter(iter.into_iter()) ToArcSlice::to_arc_slice(iter.into_iter())
} }
} }
/// Specialization trait used for collecting into `Arc<[T]>`. /// Specialization trait used for collecting into `Arc<[T]>`.
trait ArcFromIter<T, I> { trait ToArcSlice<T>: Iterator<Item = T> + Sized {
fn from_iter(iter: I) -> Self; fn to_arc_slice(self) -> Arc<[T]>;
} }
impl<T, I: Iterator<Item = T>> ArcFromIter<T, I> for Arc<[T]> { impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
default fn from_iter(iter: I) -> Self { default fn to_arc_slice(self) -> Arc<[T]> {
iter.collect::<Vec<T>>().into() self.collect::<Vec<T>>().into()
} }
} }
impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> { impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
default fn from_iter(iter: I) -> Self { fn to_arc_slice(self) -> Arc<[T]> {
// This is the case for a `TrustedLen` iterator. // This is the case for a `TrustedLen` iterator.
let (low, high) = iter.size_hint(); let (low, high) = self.size_hint();
if let Some(high) = high { if let Some(high) = high {
debug_assert_eq!( debug_assert_eq!(
low, low,
@ -2207,29 +2214,15 @@ impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
unsafe { unsafe {
// SAFETY: We need to ensure that the iterator has an exact length and we have. // SAFETY: We need to ensure that the iterator has an exact length and we have.
Arc::from_iter_exact(iter, low) Arc::from_iter_exact(self, low)
} }
} else { } else {
// Fall back to normal implementation. // Fall back to normal implementation.
iter.collect::<Vec<T>>().into() self.collect::<Vec<T>>().into()
} }
} }
} }
impl<'a, T: 'a + Clone> ArcFromIter<&'a T, slice::Iter<'a, T>> for Arc<[T]> {
fn from_iter(iter: slice::Iter<'a, T>) -> Self {
// Delegate to `impl<T: Clone> From<&[T]> for Arc<[T]>`.
//
// In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
// which is even more performant.
//
// In the fall-back case we have `T: Clone`. This is still better
// than the `TrustedLen` implementation as slices have a known length
// and so we get to avoid calling `size_hint` and avoid the branching.
iter.as_slice().into()
}
}
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> borrow::Borrow<T> for Arc<T> { impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
fn borrow(&self) -> &T { fn borrow(&self) -> &T {

View File

@ -32,7 +32,6 @@ impl Drop for Canary {
#[test] #[test]
#[cfg_attr(target_os = "emscripten", ignore)] #[cfg_attr(target_os = "emscripten", ignore)]
#[cfg_attr(miri, ignore)] // Miri does not support threads
fn manually_share_arc() { fn manually_share_arc() {
let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let arc_v = Arc::new(v); let arc_v = Arc::new(v);
@ -337,12 +336,13 @@ fn test_ptr_eq() {
#[test] #[test]
#[cfg_attr(target_os = "emscripten", ignore)] #[cfg_attr(target_os = "emscripten", ignore)]
#[cfg_attr(miri, ignore)] // Miri does not support threads
fn test_weak_count_locked() { fn test_weak_count_locked() {
let mut a = Arc::new(atomic::AtomicBool::new(false)); let mut a = Arc::new(atomic::AtomicBool::new(false));
let a2 = a.clone(); let a2 = a.clone();
let t = thread::spawn(move || { let t = thread::spawn(move || {
for _i in 0..1000000 { // Miri is too slow
let count = if cfg!(miri) { 1000 } else { 1000000 };
for _i in 0..count {
Arc::get_mut(&mut a); Arc::get_mut(&mut a);
} }
a.store(true, SeqCst); a.store(true, SeqCst);
@ -351,6 +351,8 @@ fn test_weak_count_locked() {
while !a2.load(SeqCst) { while !a2.load(SeqCst) {
let n = Arc::weak_count(&a2); let n = Arc::weak_count(&a2);
assert!(n < 2, "bad weak count: {}", n); assert!(n < 2, "bad weak count: {}", n);
#[cfg(miri)] // Miri's scheduler does not guarantee liveness, and thus needs this hint.
atomic::spin_loop_hint();
} }
t.join().unwrap(); t.join().unwrap();
} }

View File

@ -1619,8 +1619,8 @@ impl<T: Default> Vec<T> {
#[unstable(feature = "vec_resize_default", issue = "41758")] #[unstable(feature = "vec_resize_default", issue = "41758")]
#[rustc_deprecated( #[rustc_deprecated(
reason = "This is moving towards being removed in favor \ reason = "This is moving towards being removed in favor \
of `.resize_with(Default::default)`. If you disagree, please comment \ of `.resize_with(Default::default)`. If you disagree, please comment \
in the tracking issue.", in the tracking issue.",
since = "1.33.0" since = "1.33.0"
)] )]
pub fn resize_default(&mut self, new_len: usize) { pub fn resize_default(&mut self, new_len: usize) {
@ -1825,6 +1825,7 @@ impl<T: Clone + IsZero> SpecFromElem for T {
} }
} }
#[rustc_specialization_trait]
unsafe trait IsZero { unsafe trait IsZero {
/// Whether this value is zero /// Whether this value is zero
fn is_zero(&self) -> bool; fn is_zero(&self) -> bool;
@ -1874,9 +1875,12 @@ unsafe impl<T> IsZero for *mut T {
} }
} }
// `Option<&T>`, `Option<&mut T>` and `Option<Box<T>>` are guaranteed to represent `None` as null. // `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
// For fat pointers, the bytes that would be the pointer metadata in the `Some` variant // For fat pointers, the bytes that would be the pointer metadata in the `Some`
// are padding in the `None` variant, so ignoring them and zero-initializing instead is ok. // variant are padding in the `None` variant, so ignoring them and
// zero-initializing instead is ok.
// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of
// `SpecFromElem`.
unsafe impl<T: ?Sized> IsZero for Option<&T> { unsafe impl<T: ?Sized> IsZero for Option<&T> {
#[inline] #[inline]
@ -1885,13 +1889,6 @@ unsafe impl<T: ?Sized> IsZero for Option<&T> {
} }
} }
unsafe impl<T: ?Sized> IsZero for Option<&mut T> {
#[inline]
fn is_zero(&self) -> bool {
self.is_none()
}
}
unsafe impl<T: ?Sized> IsZero for Option<Box<T>> { unsafe impl<T: ?Sized> IsZero for Option<Box<T>> {
#[inline] #[inline]
fn is_zero(&self) -> bool { fn is_zero(&self) -> bool {

View File

@ -5,8 +5,7 @@
//! of individual objects while the arena itself is still alive. The benefit //! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump. //! of an arena is very fast allocation; just a pointer bump.
//! //!
//! This crate implements `TypedArena`, a simple arena that can only hold //! This crate implements several kinds of arena.
//! objects of a single type.
#![doc( #![doc(
html_root_url = "https://doc.rust-lang.org/nightly/", html_root_url = "https://doc.rust-lang.org/nightly/",
@ -98,7 +97,13 @@ impl<T> TypedArenaChunk<T> {
} }
} }
// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
// we stop growing. This scales well, from arenas that are barely used up to
// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
// the usual sizes of pages and huge pages on Linux.
const PAGE: usize = 4096; const PAGE: usize = 4096;
const HUGE_PAGE: usize = 2 * 1024 * 1024;
impl<T> Default for TypedArena<T> { impl<T> Default for TypedArena<T> {
/// Creates a new `TypedArena`. /// Creates a new `TypedArena`.
@ -211,6 +216,9 @@ impl<T> TypedArena<T> {
#[cold] #[cold]
fn grow(&self, n: usize) { fn grow(&self, n: usize) {
unsafe { unsafe {
// We need the element size in to convert chunk sizes (ranging from
// PAGE to HUGE_PAGE bytes) to element counts.
let elem_size = cmp::max(1, mem::size_of::<T>());
let mut chunks = self.chunks.borrow_mut(); let mut chunks = self.chunks.borrow_mut();
let (chunk, mut new_capacity); let (chunk, mut new_capacity);
if let Some(last_chunk) = chunks.last_mut() { if let Some(last_chunk) = chunks.last_mut() {
@ -221,18 +229,20 @@ impl<T> TypedArena<T> {
self.end.set(last_chunk.end()); self.end.set(last_chunk.end());
return; return;
} else { } else {
// If the previous chunk's capacity is less than HUGE_PAGE
// bytes, then this chunk will be least double the previous
// chunk's size.
new_capacity = last_chunk.storage.capacity(); new_capacity = last_chunk.storage.capacity();
loop { if new_capacity < HUGE_PAGE / elem_size {
new_capacity = new_capacity.checked_mul(2).unwrap(); new_capacity = new_capacity.checked_mul(2).unwrap();
if new_capacity >= currently_used_cap + n {
break;
}
} }
} }
} else { } else {
let elem_size = cmp::max(1, mem::size_of::<T>()); new_capacity = PAGE / elem_size;
new_capacity = cmp::max(n, PAGE / elem_size);
} }
// Also ensure that this chunk can fit `n`.
new_capacity = cmp::max(n, new_capacity);
chunk = TypedArenaChunk::<T>::new(new_capacity); chunk = TypedArenaChunk::<T>::new(new_capacity);
self.ptr.set(chunk.start()); self.ptr.set(chunk.start());
self.end.set(chunk.end()); self.end.set(chunk.end());
@ -347,17 +357,20 @@ impl DroplessArena {
self.end.set(last_chunk.end()); self.end.set(last_chunk.end());
return; return;
} else { } else {
// If the previous chunk's capacity is less than HUGE_PAGE
// bytes, then this chunk will be least double the previous
// chunk's size.
new_capacity = last_chunk.storage.capacity(); new_capacity = last_chunk.storage.capacity();
loop { if new_capacity < HUGE_PAGE {
new_capacity = new_capacity.checked_mul(2).unwrap(); new_capacity = new_capacity.checked_mul(2).unwrap();
if new_capacity >= used_bytes + needed_bytes {
break;
}
} }
} }
} else { } else {
new_capacity = cmp::max(needed_bytes, PAGE); new_capacity = PAGE;
} }
// Also ensure that this chunk can fit `needed_bytes`.
new_capacity = cmp::max(needed_bytes, new_capacity);
chunk = TypedArenaChunk::<u8>::new(new_capacity); chunk = TypedArenaChunk::<u8>::new(new_capacity);
self.ptr.set(chunk.start()); self.ptr.set(chunk.start());
self.end.set(chunk.end()); self.end.set(chunk.end());

View File

@ -446,14 +446,16 @@ impl TypeId {
/// # Note /// # Note
/// ///
/// This is intended for diagnostic use. The exact contents and format of the /// This is intended for diagnostic use. The exact contents and format of the
/// string are not specified, other than being a best-effort description of the /// string returned are not specified, other than being a best-effort
/// type. For example, `type_name::<Option<String>>()` could return the /// description of the type. For example, amongst the strings
/// `"Option<String>"` or `"std::option::Option<std::string::String>"`, but not /// that `type_name::<Option<String>>()` might return are `"Option<String>"` and
/// `"foobar"`. In addition, the output may change between versions of the /// `"std::option::Option<std::string::String>"`.
/// compiler.
/// ///
/// The type name should not be considered a unique identifier of a type; /// The returned string must not be considered to be a unique identifier of a
/// multiple types may share the same type name. /// type as multiple types may map to the same type name. Similarly, there is no
/// guarantee that all parts of a type will appear in the returned string: for
/// example, lifetime specifiers are currently not included. In addition, the
/// output may change between versions of the compiler.
/// ///
/// The current implementation uses the same infrastructure as compiler /// The current implementation uses the same infrastructure as compiler
/// diagnostics and debuginfo, but this is not guaranteed. /// diagnostics and debuginfo, but this is not guaranteed.

View File

@ -133,10 +133,9 @@
//! `Cell<T>`. //! `Cell<T>`.
//! //!
//! ``` //! ```
//! #![feature(core_intrinsics)]
//! use std::cell::Cell; //! use std::cell::Cell;
//! use std::ptr::NonNull; //! use std::ptr::NonNull;
//! use std::intrinsics::abort; //! use std::process::abort;
//! use std::marker::PhantomData; //! use std::marker::PhantomData;
//! //!
//! struct Rc<T: ?Sized> { //! struct Rc<T: ?Sized> {
@ -173,7 +172,7 @@
//! .strong //! .strong
//! .set(self.strong() //! .set(self.strong()
//! .checked_add(1) //! .checked_add(1)
//! .unwrap_or_else(|| unsafe { abort() })); //! .unwrap_or_else(|| abort() ));
//! } //! }
//! } //! }
//! //!

View File

@ -918,7 +918,7 @@ extern "rust-intrinsic" {
/// Aborts the execution of the process. /// Aborts the execution of the process.
/// ///
/// The stabilized version of this intrinsic is /// A more user-friendly and stable version of this operation is
/// [`std::process::abort`](../../std/process/fn.abort.html). /// [`std::process::abort`](../../std/process/fn.abort.html).
pub fn abort() -> !; pub fn abort() -> !;

View File

@ -4,47 +4,182 @@ use crate::ops::{self, Add, Sub, Try};
use super::{FusedIterator, TrustedLen}; use super::{FusedIterator, TrustedLen};
/// Objects that can be stepped over in both directions. /// Objects that have a notion of *successor* and *predecessor* operations.
/// ///
/// The `steps_between` function provides a way to efficiently compare /// The *successor* operation moves towards values that compare greater.
/// two `Step` objects. /// The *predecessor* operation moves towards values that compare lesser.
#[unstable( ///
feature = "step_trait", /// # Safety
reason = "likely to be replaced by finer-grained traits", ///
issue = "42168" /// This trait is `unsafe` because its implementation must be correct for
)] /// the safety of `unsafe trait TrustedLen` implementations, and the results
pub trait Step: Clone + PartialOrd + Sized { /// of using this trait can otherwise be trusted by `unsafe` code to be correct
/// Returns the number of steps between two step objects. The count is /// and fulfill the listed obligations.
/// inclusive of `start` and exclusive of `end`. #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
pub unsafe trait Step: Clone + PartialOrd + Sized {
/// Returns the number of *successor* steps required to get from `start` to `end`.
/// ///
/// Returns `None` if it is not possible to calculate `steps_between` /// Returns `None` if the number of steps would overflow `usize`
/// without overflow. /// (or is infinite, or if `end` would never be reached).
///
/// # Invariants
///
/// For any `a`, `b`, and `n`:
///
/// * `steps_between(&a, &b) == Some(n)` if and only if `Step::forward_checked(&a, n) == Some(b)`
/// * `steps_between(&a, &b) == Some(n)` if and only if `Step::backward_checked(&a, n) == Some(a)`
/// * `steps_between(&a, &b) == Some(n)` only if `a <= b`
/// * Corollary: `steps_between(&a, &b) == Some(0)` if and only if `a == b`
/// * Note that `a <= b` does _not_ imply `steps_between(&a, &b) != None`;
/// this is the case wheen it would require more than `usize::MAX` steps to get to `b`
/// * `steps_between(&a, &b) == None` if `a > b`
fn steps_between(start: &Self, end: &Self) -> Option<usize>; fn steps_between(start: &Self, end: &Self) -> Option<usize>;
/// Replaces this step with `1`, returning a clone of itself. /// Returns the value that would be obtained by taking the *successor*
/// of `self` `count` times.
/// ///
/// The output of this method should always be greater than the output of replace_zero. /// If this would overflow the range of values supported by `Self`, returns `None`.
fn replace_one(&mut self) -> Self;
/// Replaces this step with `0`, returning a clone of itself.
/// ///
/// The output of this method should always be less than the output of replace_one. /// # Invariants
fn replace_zero(&mut self) -> Self; ///
/// For any `a`, `n`, and `m`:
///
/// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, m).and_then(|x| Step::forward_checked(x, n))`
///
/// For any `a`, `n`, and `m` where `n + m` does not overflow:
///
/// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, n + m)`
///
/// For any `a` and `n`:
///
/// * `Step::forward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::forward_checked(&x, 1))`
/// * Corollary: `Step::forward_checked(&a, 0) == Some(a)`
#[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
fn forward_checked(start: Self, count: usize) -> Option<Self>;
/// Adds one to this step, returning the result. /// Returns the value that would be obtained by taking the *successor*
fn add_one(&self) -> Self; /// of `self` `count` times.
///
/// If this would overflow the range of values supported by `Self`,
/// this function is allowed to panic, wrap, or saturate.
/// The suggested behavior is to panic when debug assertions are enabled,
/// and to wrap or saturate otherwise.
///
/// Unsafe code should not rely on the correctness of behavior after overflow.
///
/// # Invariants
///
/// For any `a`, `n`, and `m`, where no overflow occurs:
///
/// * `Step::forward(Step::forward(a, n), m) == Step::forward(a, n + m)`
///
/// For any `a` and `n`, where no overflow occurs:
///
/// * `Step::forward_checked(a, n) == Some(Step::forward(a, n))`
/// * `Step::forward(a, n) == (0..n).fold(a, |x, _| Step::forward(x, 1))`
/// * Corollary: `Step::forward(a, 0) == a`
/// * `Step::forward(a, n) >= a`
/// * `Step::backward(Step::forward(a, n), n) == a`
#[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
fn forward(start: Self, count: usize) -> Self {
Step::forward_checked(start, count).expect("overflow in `Step::forward`")
}
/// Subtracts one to this step, returning the result. /// Returns the value that would be obtained by taking the *successor*
fn sub_one(&self) -> Self; /// of `self` `count` times.
///
/// # Safety
///
/// It is undefined behavior for this operation to overflow the
/// range of values supported by `Self`. If you cannot guarantee that this
/// will not overflow, use `forward` or `forward_checked` instead.
///
/// # Invariants
///
/// For any `a`:
///
/// * if there exists `b` such that `b > a`, it is safe to call `Step::forward_unchecked(a, 1)`
/// * if there exists `b`, `n` such that `steps_between(&a, &b) == Some(n)`,
/// it is safe to call `Step::forward_unchecked(a, m)` for any `m <= n`.
///
/// For any `a` and `n`, where no overflow occurs:
///
/// * `Step::forward_unchecked(a, n)` is equivalent to `Step::forward(a, n)`
#[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")]
unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
Step::forward(start, count)
}
/// Adds a `usize`, returning `None` on overflow. /// Returns the value that would be obtained by taking the *successor*
fn add_usize(&self, n: usize) -> Option<Self>; /// of `self` `count` times.
///
/// If this would overflow the range of values supported by `Self`, returns `None`.
///
/// # Invariants
///
/// For any `a`, `n`, and `m`:
///
/// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == n.checked_add(m).and_then(|x| Step::backward_checked(a, x))`
/// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == try { Step::backward_checked(a, n.checked_add(m)?) }`
///
/// For any `a` and `n`:
///
/// * `Step::backward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::backward_checked(&x, 1))`
/// * Corollary: `Step::backward_checked(&a, 0) == Some(a)`
#[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
fn backward_checked(start: Self, count: usize) -> Option<Self>;
/// Subtracts a `usize`, returning `None` on underflow. /// Returns the value that would be obtained by taking the *predecessor*
fn sub_usize(&self, n: usize) -> Option<Self> { /// of `self` `count` times.
// this default implementation makes the addition of `sub_usize` a non-breaking change ///
let _ = n; /// If this would overflow the range of values supported by `Self`,
unimplemented!() /// this function is allowed to panic, wrap, or saturate.
/// The suggested behavior is to panic when debug assertions are enabled,
/// and to wrap or saturate otherwise.
///
/// Unsafe code should not rely on the correctness of behavior after overflow.
///
/// # Invariants
///
/// For any `a`, `n`, and `m`, where no overflow occurs:
///
/// * `Step::backward(Step::backward(a, n), m) == Step::backward(a, n + m)`
///
/// For any `a` and `n`, where no overflow occurs:
///
/// * `Step::backward_checked(a, n) == Some(Step::backward(a, n))`
/// * `Step::backward(a, n) == (0..n).fold(a, |x, _| Step::backward(x, 1))`
/// * Corollary: `Step::backward(a, 0) == a`
/// * `Step::backward(a, n) <= a`
/// * `Step::forward(Step::backward(a, n), n) == a`
#[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
fn backward(start: Self, count: usize) -> Self {
Step::backward_checked(start, count).expect("overflow in `Step::backward`")
}
/// Returns the value that would be obtained by taking the *predecessor*
/// of `self` `count` times.
///
/// # Safety
///
/// It is undefined behavior for this operation to overflow the
/// range of values supported by `Self`. If you cannot guarantee that this
/// will not overflow, use `backward` or `backward_checked` instead.
///
/// # Invariants
///
/// For any `a`:
///
/// * if there exists `b` such that `b < a`, it is safe to call `Step::backward_unchecked(a, 1)`
/// * if there exists `b`, `n` such that `steps_between(&b, &a) == Some(n)`,
/// it is safe to call `Step::backward_unchecked(a, m)` for any `m <= n`.
///
/// For any `a` and `n`, where no overflow occurs:
///
/// * `Step::backward_unchecked(a, n)` is equivalent to `Step::backward(a, n)`
#[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")]
unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
Step::backward(start, count)
} }
} }
@ -52,127 +187,218 @@ pub trait Step: Clone + PartialOrd + Sized {
macro_rules! step_identical_methods { macro_rules! step_identical_methods {
() => { () => {
#[inline] #[inline]
fn replace_one(&mut self) -> Self { unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
mem::replace(self, 1) start.unchecked_add(n as Self)
} }
#[inline] #[inline]
fn replace_zero(&mut self) -> Self { unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
mem::replace(self, 0) start.unchecked_sub(n as Self)
} }
#[inline] #[inline]
fn add_one(&self) -> Self { fn forward(start: Self, n: usize) -> Self {
Add::add(*self, 1) // In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Add::add(Self::MAX, 1);
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
} }
#[inline] #[inline]
fn sub_one(&self) -> Self { fn backward(start: Self, n: usize) -> Self {
Sub::sub(*self, 1) // In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Sub::sub(Self::MIN, 1);
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
} }
}; };
} }
macro_rules! step_impl_unsigned { macro_rules! step_integer_impls {
($($t:ty)*) => ($( {
#[unstable(feature = "step_trait", narrower than or same width as usize:
reason = "likely to be replaced by finer-grained traits", $( [ $u_narrower:ident $i_narrower:ident ] ),+;
issue = "42168")] wider than usize:
impl Step for $t { $( [ $u_wider:ident $i_wider:ident ] ),+;
#[inline] } => {
fn steps_between(start: &$t, end: &$t) -> Option<usize> { $(
if *start < *end {
usize::try_from(*end - *start).ok()
} else {
Some(0)
}
}
#[inline]
#[allow(unreachable_patterns)] #[allow(unreachable_patterns)]
fn add_usize(&self, n: usize) -> Option<Self> { #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
match <$t>::try_from(n) { unsafe impl Step for $u_narrower {
Ok(n_as_t) => self.checked_add(n_as_t), step_identical_methods!();
Err(_) => None,
}
}
#[inline] #[inline]
#[allow(unreachable_patterns)] fn steps_between(start: &Self, end: &Self) -> Option<usize> {
fn sub_usize(&self, n: usize) -> Option<Self> { if *start <= *end {
match <$t>::try_from(n) { // This relies on $u_narrower <= usize
Ok(n_as_t) => self.checked_sub(n_as_t), Some((*end - *start) as usize)
Err(_) => None, } else {
} None
}
step_identical_methods!();
}
)*)
}
macro_rules! step_impl_signed {
($( [$t:ty : $unsigned:ty] )*) => ($(
#[unstable(feature = "step_trait",
reason = "likely to be replaced by finer-grained traits",
issue = "42168")]
impl Step for $t {
#[inline]
fn steps_between(start: &$t, end: &$t) -> Option<usize> {
if *start < *end {
// Use .wrapping_sub and cast to unsigned to compute the
// difference that may not fit inside the range of $t.
usize::try_from(end.wrapping_sub(*start) as $unsigned).ok()
} else {
Some(0)
}
}
#[inline]
#[allow(unreachable_patterns)]
fn add_usize(&self, n: usize) -> Option<Self> {
match <$unsigned>::try_from(n) {
Ok(n_as_unsigned) => {
// Wrapping in unsigned space handles cases like
// `-120_i8.add_usize(200) == Some(80_i8)`,
// even though 200_usize is out of range for i8.
let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t;
if wrapped >= *self {
Some(wrapped)
} else {
None // Addition overflowed
}
} }
Err(_) => None,
} }
}
#[inline] #[inline]
#[allow(unreachable_patterns)] fn forward_checked(start: Self, n: usize) -> Option<Self> {
fn sub_usize(&self, n: usize) -> Option<Self> { match Self::try_from(n) {
match <$unsigned>::try_from(n) { Ok(n) => start.checked_add(n),
Ok(n_as_unsigned) => { Err(_) => None, // if n is out of range, `unsigned_start + n` is too
// Wrapping in unsigned space handles cases like }
// `80_i8.sub_usize(200) == Some(-120_i8)`, }
// even though 200_usize is out of range for i8.
let wrapped = (*self as $unsigned).wrapping_sub(n_as_unsigned) as $t; #[inline]
if wrapped <= *self { fn backward_checked(start: Self, n: usize) -> Option<Self> {
Some(wrapped) match Self::try_from(n) {
} else { Ok(n) => start.checked_sub(n),
None // Subtraction underflowed Err(_) => None, // if n is out of range, `unsigned_start - n` is too
}
} }
Err(_) => None,
} }
} }
step_identical_methods!(); #[allow(unreachable_patterns)]
} #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
)*) unsafe impl Step for $i_narrower {
step_identical_methods!();
#[inline]
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if *start <= *end {
// This relies on $i_narrower <= usize
//
// Casting to isize extends the width but preserves the sign.
// Use wrapping_sub in isize space and cast to usize to compute
// the difference that may not fit inside the range of isize.
Some((*end as isize).wrapping_sub(*start as isize) as usize)
} else {
None
}
}
#[inline]
fn forward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_add(n as Self);
if wrapped >= start {
Some(wrapped)
} else {
None // Addition overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 + n` necessarily overflows i8.
Err(_) => None,
}
}
#[inline]
fn backward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_sub(n as Self);
if wrapped <= start {
Some(wrapped)
} else {
None // Subtraction overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 - n` necessarily overflows i8.
Err(_) => None,
}
}
}
)+
$(
#[allow(unreachable_patterns)]
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
unsafe impl Step for $u_wider {
step_identical_methods!();
#[inline]
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if *start <= *end {
usize::try_from(*end - *start).ok()
} else {
None
}
}
#[inline]
fn forward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_add(n as Self)
}
#[inline]
fn backward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_sub(n as Self)
}
}
#[allow(unreachable_patterns)]
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
unsafe impl Step for $i_wider {
step_identical_methods!();
#[inline]
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if *start <= *end {
match end.checked_sub(*start) {
Some(result) => usize::try_from(result).ok(),
// If the difference is too big for e.g. i128,
// it's also gonna be too big for usize with fewer bits.
None => None,
}
} else {
None
}
}
#[inline]
fn forward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_add(n as Self)
}
#[inline]
fn backward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_sub(n as Self)
}
}
)+
};
} }
step_impl_unsigned!(usize u8 u16 u32 u64 u128); #[cfg(target_pointer_width = "64")]
step_impl_signed!([isize: usize][i8: u8][i16: u16]); step_integer_impls! {
step_impl_signed!([i32: u32][i64: u64][i128: u128]); narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [u64 i64], [usize isize];
wider than usize: [u128 i128];
}
#[cfg(target_pointer_width = "32")]
step_integer_impls! {
narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [usize isize];
wider than usize: [u64 i64], [u128 i128];
}
#[cfg(target_pointer_width = "16")]
step_integer_impls! {
narrower than or same width as usize: [u8 i8], [u16 i16], [usize isize];
wider than usize: [u32 i32], [u64 i64], [u128 i128];
}
macro_rules! range_exact_iter_impl { macro_rules! range_exact_iter_impl {
($($t:ty)*) => ($( ($($t:ty)*) => ($(
@ -188,20 +414,6 @@ macro_rules! range_incl_exact_iter_impl {
)*) )*)
} }
macro_rules! range_trusted_len_impl {
($($t:ty)*) => ($(
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl TrustedLen for ops::Range<$t> { }
)*)
}
macro_rules! range_incl_trusted_len_impl {
($($t:ty)*) => ($(
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl TrustedLen for ops::RangeInclusive<$t> { }
)*)
}
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
impl<A: Step> Iterator for ops::Range<A> { impl<A: Step> Iterator for ops::Range<A> {
type Item = A; type Item = A;
@ -209,16 +421,12 @@ impl<A: Step> Iterator for ops::Range<A> {
#[inline] #[inline]
fn next(&mut self) -> Option<A> { fn next(&mut self) -> Option<A> {
if self.start < self.end { if self.start < self.end {
// We check for overflow here, even though it can't actually // SAFETY: just checked precondition
// happen. Adding this check does however help llvm vectorize loops // We use the unchecked version here, because
// for some ranges that don't get vectorized otherwise, // this helps LLVM vectorize loops for some ranges
// and this won't actually result in an extra check in an optimized build. // that don't get vectorized otherwise.
if let Some(mut n) = self.start.add_usize(1) { let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
mem::swap(&mut n, &mut self.start); Some(mem::replace(&mut self.start, n))
Some(n)
} else {
None
}
} else { } else {
None None
} }
@ -226,17 +434,19 @@ impl<A: Step> Iterator for ops::Range<A> {
#[inline] #[inline]
fn size_hint(&self) -> (usize, Option<usize>) { fn size_hint(&self) -> (usize, Option<usize>) {
match Step::steps_between(&self.start, &self.end) { if self.start < self.end {
Some(hint) => (hint, Some(hint)), let hint = Step::steps_between(&self.start, &self.end);
None => (usize::MAX, None), (hint.unwrap_or(usize::MAX), hint)
} else {
(0, Some(0))
} }
} }
#[inline] #[inline]
fn nth(&mut self, n: usize) -> Option<A> { fn nth(&mut self, n: usize) -> Option<A> {
if let Some(plus_n) = self.start.add_usize(n) { if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
if plus_n < self.end { if plus_n < self.end {
self.start = plus_n.add_one(); self.start = Step::forward(plus_n.clone(), 1);
return Some(plus_n); return Some(plus_n);
} }
} }
@ -262,25 +472,42 @@ impl<A: Step> Iterator for ops::Range<A> {
} }
// These macros generate `ExactSizeIterator` impls for various range types. // These macros generate `ExactSizeIterator` impls for various range types.
// Range<{u,i}64> and RangeInclusive<{u,i}{32,64,size}> are excluded
// because they cannot guarantee having a length <= usize::MAX, which is
// required by ExactSizeIterator.
range_exact_iter_impl!(usize u8 u16 u32 isize i8 i16 i32);
range_incl_exact_iter_impl!(u8 u16 i8 i16);
// These macros generate `TrustedLen` impls.
// //
// They need to guarantee that .size_hint() is either exact, or that // * `ExactSizeIterator::len` is required to always return an exact `usize`,
// the upper bound is None when it does not fit the type limits. // so no range can be longer than `usize::MAX`.
range_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128); // * For integer types in `Range<_>` this is the case for types narrower than or as wide as `usize`.
range_incl_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128); // For integer types in `RangeInclusive<_>`
// this is the case for types *strictly narrower* than `usize`
// since e.g. `(0..=u64::MAX).len()` would be `u64::MAX + 1`.
range_exact_iter_impl! {
usize u8 u16
isize i8 i16
// These are incorect per the reasoning above,
// but removing them would be a breaking change as they were stabilized in Rust 1.0.0.
// So e.g. `(0..66_000_u32).len()` for example will compile without error or warnings
// on 16-bit platforms, but continue to give a wrong result.
u32
i32
}
range_incl_exact_iter_impl! {
u8
i8
// These are incorect per the reasoning above,
// but removing them would be a breaking change as they were stabilized in Rust 1.26.0.
// So e.g. `(0..=u16::MAX).len()` for example will compile without error or warnings
// on 16-bit platforms, but continue to give a wrong result.
u16
i16
}
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
impl<A: Step> DoubleEndedIterator for ops::Range<A> { impl<A: Step> DoubleEndedIterator for ops::Range<A> {
#[inline] #[inline]
fn next_back(&mut self) -> Option<A> { fn next_back(&mut self) -> Option<A> {
if self.start < self.end { if self.start < self.end {
self.end = self.end.sub_one(); self.end = Step::backward(self.end.clone(), 1);
Some(self.end.clone()) Some(self.end.clone())
} else { } else {
None None
@ -289,9 +516,9 @@ impl<A: Step> DoubleEndedIterator for ops::Range<A> {
#[inline] #[inline]
fn nth_back(&mut self, n: usize) -> Option<A> { fn nth_back(&mut self, n: usize) -> Option<A> {
if let Some(minus_n) = self.end.sub_usize(n) { if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
if minus_n > self.start { if minus_n > self.start {
self.end = minus_n.sub_one(); self.end = Step::backward(minus_n, 1);
return Some(self.end.clone()); return Some(self.end.clone());
} }
} }
@ -301,6 +528,9 @@ impl<A: Step> DoubleEndedIterator for ops::Range<A> {
} }
} }
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<A: Step> TrustedLen for ops::Range<A> {}
#[stable(feature = "fused", since = "1.26.0")] #[stable(feature = "fused", since = "1.26.0")]
impl<A: Step> FusedIterator for ops::Range<A> {} impl<A: Step> FusedIterator for ops::Range<A> {}
@ -310,9 +540,8 @@ impl<A: Step> Iterator for ops::RangeFrom<A> {
#[inline] #[inline]
fn next(&mut self) -> Option<A> { fn next(&mut self) -> Option<A> {
let mut n = self.start.add_one(); let n = Step::forward(self.start.clone(), 1);
mem::swap(&mut n, &mut self.start); Some(mem::replace(&mut self.start, n))
Some(n)
} }
#[inline] #[inline]
@ -322,8 +551,16 @@ impl<A: Step> Iterator for ops::RangeFrom<A> {
#[inline] #[inline]
fn nth(&mut self, n: usize) -> Option<A> { fn nth(&mut self, n: usize) -> Option<A> {
let plus_n = self.start.add_usize(n).expect("overflow in RangeFrom::nth"); // If we would jump over the maximum value, panic immediately.
self.start = plus_n.add_one(); // This is consistent with behavior before the Step redesign,
// even though it's inconsistent with n `next` calls.
// To get consistent behavior, change it to use `forward` instead.
// This change should go through FCP separately to the redesign, so is for now left as a
// FIXME: make this consistent
let plus_n =
Step::forward_checked(self.start.clone(), n).expect("overflow in RangeFrom::nth");
// The final step should always be debug-checked.
self.start = Step::forward(plus_n.clone(), 1);
Some(plus_n) Some(plus_n)
} }
} }
@ -345,7 +582,7 @@ impl<A: Step> Iterator for ops::RangeInclusive<A> {
} }
let is_iterating = self.start < self.end; let is_iterating = self.start < self.end;
Some(if is_iterating { Some(if is_iterating {
let n = self.start.add_one(); let n = Step::forward(self.start.clone(), 1);
mem::replace(&mut self.start, n) mem::replace(&mut self.start, n)
} else { } else {
self.exhausted = true; self.exhausted = true;
@ -371,12 +608,12 @@ impl<A: Step> Iterator for ops::RangeInclusive<A> {
return None; return None;
} }
if let Some(plus_n) = self.start.add_usize(n) { if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
use crate::cmp::Ordering::*; use crate::cmp::Ordering::*;
match plus_n.partial_cmp(&self.end) { match plus_n.partial_cmp(&self.end) {
Some(Less) => { Some(Less) => {
self.start = plus_n.add_one(); self.start = Step::forward(plus_n.clone(), 1);
return Some(plus_n); return Some(plus_n);
} }
Some(Equal) => { Some(Equal) => {
@ -407,7 +644,7 @@ impl<A: Step> Iterator for ops::RangeInclusive<A> {
let mut accum = init; let mut accum = init;
while self.start < self.end { while self.start < self.end {
let n = self.start.add_one(); let n = Step::forward(self.start.clone(), 1);
let n = mem::replace(&mut self.start, n); let n = mem::replace(&mut self.start, n);
accum = f(accum, n)?; accum = f(accum, n)?;
} }
@ -446,7 +683,7 @@ impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
} }
let is_iterating = self.start < self.end; let is_iterating = self.start < self.end;
Some(if is_iterating { Some(if is_iterating {
let n = self.end.sub_one(); let n = Step::backward(self.end.clone(), 1);
mem::replace(&mut self.end, n) mem::replace(&mut self.end, n)
} else { } else {
self.exhausted = true; self.exhausted = true;
@ -460,12 +697,12 @@ impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
return None; return None;
} }
if let Some(minus_n) = self.end.sub_usize(n) { if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
use crate::cmp::Ordering::*; use crate::cmp::Ordering::*;
match minus_n.partial_cmp(&self.start) { match minus_n.partial_cmp(&self.start) {
Some(Greater) => { Some(Greater) => {
self.end = minus_n.sub_one(); self.end = Step::backward(minus_n.clone(), 1);
return Some(minus_n); return Some(minus_n);
} }
Some(Equal) => { Some(Equal) => {
@ -496,7 +733,7 @@ impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
let mut accum = init; let mut accum = init;
while self.start < self.end { while self.start < self.end {
let n = self.end.sub_one(); let n = Step::backward(self.end.clone(), 1);
let n = mem::replace(&mut self.end, n); let n = mem::replace(&mut self.end, n);
accum = f(accum, n)?; accum = f(accum, n)?;
} }
@ -511,5 +748,8 @@ impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
} }
} }
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<A: Step> TrustedLen for ops::RangeInclusive<A> {}
#[stable(feature = "fused", since = "1.26.0")] #[stable(feature = "fused", since = "1.26.0")]
impl<A: Step> FusedIterator for ops::RangeInclusive<A> {} impl<A: Step> FusedIterator for ops::RangeInclusive<A> {}

View File

@ -333,7 +333,7 @@ pub trait Iterator {
#[inline] #[inline]
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
fn nth(&mut self, mut n: usize) -> Option<Self::Item> { fn nth(&mut self, mut n: usize) -> Option<Self::Item> {
for x in self { while let Some(x) = self.next() {
if n == 0 { if n == 0 {
return Some(x); return Some(x);
} }

View File

@ -13,6 +13,7 @@
/// [`Iterator::fuse`]: ../../std/iter/trait.Iterator.html#method.fuse /// [`Iterator::fuse`]: ../../std/iter/trait.Iterator.html#method.fuse
/// [`Fuse`]: ../../std/iter/struct.Fuse.html /// [`Fuse`]: ../../std/iter/struct.Fuse.html
#[stable(feature = "fused", since = "1.26.0")] #[stable(feature = "fused", since = "1.26.0")]
#[rustc_unsafe_specialization_marker]
pub trait FusedIterator: Iterator {} pub trait FusedIterator: Iterator {}
#[stable(feature = "fused", since = "1.26.0")] #[stable(feature = "fused", since = "1.26.0")]
@ -38,6 +39,7 @@ impl<I: FusedIterator + ?Sized> FusedIterator for &mut I {}
/// [`usize::MAX`]: ../../std/usize/constant.MAX.html /// [`usize::MAX`]: ../../std/usize/constant.MAX.html
/// [`.size_hint`]: ../../std/iter/trait.Iterator.html#method.size_hint /// [`.size_hint`]: ../../std/iter/trait.Iterator.html#method.size_hint
#[unstable(feature = "trusted_len", issue = "37572")] #[unstable(feature = "trusted_len", issue = "37572")]
#[rustc_unsafe_specialization_marker]
pub unsafe trait TrustedLen: Iterator {} pub unsafe trait TrustedLen: Iterator {}
#[unstable(feature = "trusted_len", issue = "37572")] #[unstable(feature = "trusted_len", issue = "37572")]

View File

@ -363,6 +363,13 @@ pub trait StructuralEq {
/// [impls]: #implementors /// [impls]: #implementors
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
#[lang = "copy"] #[lang = "copy"]
// FIXME(matthewjasper) This allows copying a type that doesn't implement
// `Copy` because of unsatisfied lifetime bounds (copying `A<'_>` when only
// `A<'static>: Copy` and `A<'_>: Clone`).
// We have this attribute here for now only because there are quite a few
// existing specializations on `Copy` that already exist in the standard
// library, and there's no way to safely have this behavior right now.
#[rustc_unsafe_specialization_marker]
pub trait Copy: Clone { pub trait Copy: Clone {
// Empty. // Empty.
} }

View File

@ -2,19 +2,23 @@ use crate::ops::{Deref, DerefMut};
use crate::ptr; use crate::ptr;
/// A wrapper to inhibit compiler from automatically calling `T`s destructor. /// A wrapper to inhibit compiler from automatically calling `T`s destructor.
///
/// This wrapper is 0-cost. /// This wrapper is 0-cost.
/// ///
/// `ManuallyDrop<T>` is subject to the same layout optimizations as `T`. /// `ManuallyDrop<T>` is subject to the same layout optimizations as `T`.
/// As a consequence, it has *no effect* on the assumptions that the compiler makes /// As a consequence, it has *no effect* on the assumptions that the compiler makes
/// about all values being initialized at their type. In particular, initializing /// about its contents. For example, initializing a `ManuallyDrop<&mut T>`
/// a `ManuallyDrop<&mut T>` with [`mem::zeroed`] is undefined behavior. /// with [`mem::zeroed`] is undefined behavior.
/// If you need to handle uninitialized data, use [`MaybeUninit<T>`] instead. /// If you need to handle uninitialized data, use [`MaybeUninit<T>`] instead.
/// ///
/// Note that accessing the value inside a `ManuallyDrop<T>` is safe.
/// This means that a `ManuallyDrop<T>` whose content has been dropped must not
/// be exposed through a public safe API.
/// Correspondingly, `ManuallyDrop::drop` is unsafe.
///
/// # Examples /// # Examples
/// ///
/// This wrapper helps with explicitly documenting the drop order dependencies between fields of /// This wrapper can be used to enforce a particular drop order on fields, regardless
/// the type: /// of how they are defined in the struct:
/// ///
/// ```rust /// ```rust
/// use std::mem::ManuallyDrop; /// use std::mem::ManuallyDrop;
@ -43,8 +47,18 @@ use crate::ptr;
/// } /// }
/// ``` /// ```
/// ///
/// However, care should be taken when using this pattern as it can lead to *leak amplification*.
/// In this example, if the `Drop` implementation for `Peach` were to panic, the `banana` field
/// would also be leaked.
///
/// In contrast, the automatically-generated compiler drop implementation would have ensured
/// that all fields are dropped even in the presence of panics. This is especially important when
/// working with [pinned] data, where reusing the memory without calling the destructor could lead
/// to Undefined Behaviour.
///
/// [`mem::zeroed`]: fn.zeroed.html /// [`mem::zeroed`]: fn.zeroed.html
/// [`MaybeUninit<T>`]: union.MaybeUninit.html /// [`MaybeUninit<T>`]: union.MaybeUninit.html
/// [pinned]: ../pin/index.html
#[stable(feature = "manually_drop", since = "1.20.0")] #[stable(feature = "manually_drop", since = "1.20.0")]
#[lang = "manually_drop"] #[lang = "manually_drop"]
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
@ -113,19 +127,28 @@ impl<T> ManuallyDrop<T> {
} }
impl<T: ?Sized> ManuallyDrop<T> { impl<T: ?Sized> ManuallyDrop<T> {
/// Manually drops the contained value. /// Manually drops the contained value. This is exactly equivalent to calling
/// [`ptr::drop_in_place`] with a pointer to the contained value. As such, unless
/// the contained value is a packed struct, the destructor will be called in-place
/// without moving the value, and thus can be used to safely drop [pinned] data.
/// ///
/// If you have ownership of the value, you can use [`ManuallyDrop::into_inner`] instead. /// If you have ownership of the value, you can use [`ManuallyDrop::into_inner`] instead.
/// ///
/// # Safety /// # Safety
/// ///
/// This function runs the destructor of the contained value and thus the wrapped value /// This function runs the destructor of the contained value. Other than changes made by
/// now represents uninitialized data. It is up to the user of this method to ensure the /// the destructor itself, the memory is left unchanged, and so as far as the compiler is
/// uninitialized data is not actually used. /// concerned still holds a bit-pattern which is valid for the type `T`.
/// In particular, this function can only be called at most once ///
/// for a given instance of `ManuallyDrop<T>`. /// However, this "zombie" value should not be exposed to safe code, and this function
/// should not be called more than once. To use a value after it's been dropped, or drop
/// a value multiple times, can cause Undefined Behavior (depending on what `drop` does).
/// This is normally prevented by the type system, but users of `ManuallyDrop` must
/// uphold those guarantees without assistance from the compiler.
/// ///
/// [`ManuallyDrop::into_inner`]: #method.into_inner /// [`ManuallyDrop::into_inner`]: #method.into_inner
/// [`ptr::drop_in_place`]: ../ptr/fn.drop_in_place.html
/// [pinned]: ../pin/index.html
#[stable(feature = "manually_drop", since = "1.20.0")] #[stable(feature = "manually_drop", since = "1.20.0")]
#[inline] #[inline]
pub unsafe fn drop(slot: &mut ManuallyDrop<T>) { pub unsafe fn drop(slot: &mut ManuallyDrop<T>) {

View File

@ -367,7 +367,7 @@ impl f32 {
/// Infinity (∞). /// Infinity (∞).
#[stable(feature = "assoc_int_consts", since = "1.43.0")] #[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const INFINITY: f32 = 1.0_f32 / 0.0_f32; pub const INFINITY: f32 = 1.0_f32 / 0.0_f32;
/// Negative infinity (-∞). /// Negative infinity (∞).
#[stable(feature = "assoc_int_consts", since = "1.43.0")] #[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const NEG_INFINITY: f32 = -1.0_f32 / 0.0_f32; pub const NEG_INFINITY: f32 = -1.0_f32 / 0.0_f32;

View File

@ -366,7 +366,7 @@ impl f64 {
/// Infinity (∞). /// Infinity (∞).
#[stable(feature = "assoc_int_consts", since = "1.43.0")] #[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const INFINITY: f64 = 1.0_f64 / 0.0_f64; pub const INFINITY: f64 = 1.0_f64 / 0.0_f64;
/// Negative infinity (-∞). /// Negative infinity (∞).
#[stable(feature = "assoc_int_consts", since = "1.43.0")] #[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const NEG_INFINITY: f64 = -1.0_f64 / 0.0_f64; pub const NEG_INFINITY: f64 = -1.0_f64 / 0.0_f64;

View File

@ -749,6 +749,23 @@ $EndFeature, "
} }
} }
doc_comment! {
concat!("Unchecked integer addition. Computes `self + rhs, assuming overflow
cannot occur. This results in undefined behavior when `self + rhs > ", stringify!($SelfT),
"::max_value()` or `self + rhs < ", stringify!($SelfT), "::min_value()`."),
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
issue = "none",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
intrinsics::unchecked_add(self, rhs)
}
}
doc_comment! { doc_comment! {
concat!("Checked integer subtraction. Computes `self - rhs`, returning `None` if concat!("Checked integer subtraction. Computes `self - rhs`, returning `None` if
overflow occurred. overflow occurred.
@ -774,6 +791,23 @@ $EndFeature, "
} }
} }
doc_comment! {
concat!("Unchecked integer subtraction. Computes `self - rhs, assuming overflow
cannot occur. This results in undefined behavior when `self - rhs > ", stringify!($SelfT),
"::max_value()` or `self - rhs < ", stringify!($SelfT), "::min_value()`."),
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
issue = "none",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
intrinsics::unchecked_sub(self, rhs)
}
}
doc_comment! { doc_comment! {
concat!("Checked integer multiplication. Computes `self * rhs`, returning `None` if concat!("Checked integer multiplication. Computes `self * rhs`, returning `None` if
overflow occurred. overflow occurred.
@ -799,6 +833,23 @@ $EndFeature, "
} }
} }
doc_comment! {
concat!("Unchecked integer multiplication. Computes `self * rhs, assuming overflow
cannot occur. This results in undefined behavior when `self * rhs > ", stringify!($SelfT),
"::max_value()` or `self * rhs < ", stringify!($SelfT), "::min_value()`."),
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
issue = "none",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
intrinsics::unchecked_mul(self, rhs)
}
}
doc_comment! { doc_comment! {
concat!("Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0` concat!("Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0`
or the division results in overflow. or the division results in overflow.
@ -1448,8 +1499,8 @@ any high-order bits of `rhs` that would cause the shift to exceed the bitwidth o
Note that this is *not* the same as a rotate-left; the RHS of a wrapping shift-left is restricted to Note that this is *not* the same as a rotate-left; the RHS of a wrapping shift-left is restricted to
the range of the type, rather than the bits shifted out of the LHS being returned to the other end. the range of the type, rather than the bits shifted out of the LHS being returned to the other end.
The primitive integer types all implement a `rotate_left` function, which may be what you want The primitive integer types all implement a `[`rotate_left`](#method.rotate_left) function,
instead. which may be what you want instead.
# Examples # Examples
@ -1480,8 +1531,8 @@ removes any high-order bits of `rhs` that would cause the shift to exceed the bi
Note that this is *not* the same as a rotate-right; the RHS of a wrapping shift-right is restricted Note that this is *not* the same as a rotate-right; the RHS of a wrapping shift-right is restricted
to the range of the type, rather than the bits shifted out of the LHS being returned to the other to the range of the type, rather than the bits shifted out of the LHS being returned to the other
end. The primitive integer types all implement a `rotate_right` function, which may be what you want end. The primitive integer types all implement a [`rotate_right`](#method.rotate_right) function,
instead. which may be what you want instead.
# Examples # Examples
@ -2936,6 +2987,23 @@ assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(3), None);", $EndFeat
} }
} }
doc_comment! {
concat!("Unchecked integer addition. Computes `self + rhs, assuming overflow
cannot occur. This results in undefined behavior when `self + rhs > ", stringify!($SelfT),
"::max_value()` or `self + rhs < ", stringify!($SelfT), "::min_value()`."),
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
issue = "none",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
intrinsics::unchecked_add(self, rhs)
}
}
doc_comment! { doc_comment! {
concat!("Checked integer subtraction. Computes `self - rhs`, returning concat!("Checked integer subtraction. Computes `self - rhs`, returning
`None` if overflow occurred. `None` if overflow occurred.
@ -2959,6 +3027,23 @@ assert_eq!(0", stringify!($SelfT), ".checked_sub(1), None);", $EndFeature, "
} }
} }
doc_comment! {
concat!("Unchecked integer subtraction. Computes `self - rhs, assuming overflow
cannot occur. This results in undefined behavior when `self - rhs > ", stringify!($SelfT),
"::max_value()` or `self - rhs < ", stringify!($SelfT), "::min_value()`."),
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
issue = "none",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
intrinsics::unchecked_sub(self, rhs)
}
}
doc_comment! { doc_comment! {
concat!("Checked integer multiplication. Computes `self * rhs`, returning concat!("Checked integer multiplication. Computes `self * rhs`, returning
`None` if overflow occurred. `None` if overflow occurred.
@ -2982,6 +3067,23 @@ assert_eq!(", stringify!($SelfT), "::MAX.checked_mul(2), None);", $EndFeature, "
} }
} }
doc_comment! {
concat!("Unchecked integer multiplication. Computes `self * rhs, assuming overflow
cannot occur. This results in undefined behavior when `self * rhs > ", stringify!($SelfT),
"::max_value()` or `self * rhs < ", stringify!($SelfT), "::min_value()`."),
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
issue = "none",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
intrinsics::unchecked_mul(self, rhs)
}
}
doc_comment! { doc_comment! {
concat!("Checked integer division. Computes `self / rhs`, returning `None` concat!("Checked integer division. Computes `self / rhs`, returning `None`
if `rhs == 0`. if `rhs == 0`.
@ -3508,8 +3610,8 @@ Note that this is *not* the same as a rotate-left; the
RHS of a wrapping shift-left is restricted to the range RHS of a wrapping shift-left is restricted to the range
of the type, rather than the bits shifted out of the LHS of the type, rather than the bits shifted out of the LHS
being returned to the other end. The primitive integer being returned to the other end. The primitive integer
types all implement a `rotate_left` function, which may types all implement a [`rotate_left`](#method.rotate_left) function,
be what you want instead. which may be what you want instead.
# Examples # Examples
@ -3542,8 +3644,8 @@ Note that this is *not* the same as a rotate-right; the
RHS of a wrapping shift-right is restricted to the range RHS of a wrapping shift-right is restricted to the range
of the type, rather than the bits shifted out of the LHS of the type, rather than the bits shifted out of the LHS
being returned to the other end. The primitive integer being returned to the other end. The primitive integer
types all implement a `rotate_right` function, which may types all implement a [`rotate_right`](#method.rotate_right) function,
be what you want instead. which may be what you want instead.
# Examples # Examples

View File

@ -1357,6 +1357,15 @@ impl<'a, T> IntoIterator for &'a mut Option<T> {
#[stable(since = "1.12.0", feature = "option_from")] #[stable(since = "1.12.0", feature = "option_from")]
impl<T> From<T> for Option<T> { impl<T> From<T> for Option<T> {
/// Copies `val` into a new `Some`.
///
/// # Examples
///
/// ```
/// let o: Option<u8> = Option::from(67);
///
/// assert_eq!(Some(67), o);
/// ```
fn from(val: T) -> Option<T> { fn from(val: T) -> Option<T> {
Some(val) Some(val)
} }
@ -1364,6 +1373,27 @@ impl<T> From<T> for Option<T> {
#[stable(feature = "option_ref_from_ref_option", since = "1.30.0")] #[stable(feature = "option_ref_from_ref_option", since = "1.30.0")]
impl<'a, T> From<&'a Option<T>> for Option<&'a T> { impl<'a, T> From<&'a Option<T>> for Option<&'a T> {
/// Converts from `&Option<T>` to `Option<&T>`.
///
/// # Examples
///
/// Converts an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, preserving the original.
/// The [`map`] method takes the `self` argument by value, consuming the original,
/// so this technique uses `as_ref` to first take an `Option` to a reference
/// to the value inside the original.
///
/// [`map`]: ../../std/option/enum.Option.html#method.map
/// [`String`]: ../../std/string/struct.String.html
/// [`usize`]: ../../std/primitive.usize.html
///
/// ```
/// let s: Option<String> = Some(String::from("Hello, Rustaceans!"));
/// let o: Option<usize> = Option::from(&s).map(|ss: &String| ss.len());
///
/// println!("Can still print s: {:?}", s);
///
/// assert_eq!(o, Some(18));
/// ```
fn from(o: &'a Option<T>) -> Option<&'a T> { fn from(o: &'a Option<T>) -> Option<&'a T> {
o.as_ref() o.as_ref()
} }
@ -1371,6 +1401,21 @@ impl<'a, T> From<&'a Option<T>> for Option<&'a T> {
#[stable(feature = "option_ref_from_ref_option", since = "1.30.0")] #[stable(feature = "option_ref_from_ref_option", since = "1.30.0")]
impl<'a, T> From<&'a mut Option<T>> for Option<&'a mut T> { impl<'a, T> From<&'a mut Option<T>> for Option<&'a mut T> {
/// Converts from `&mut Option<T>` to `Option<&mut T>`
///
/// # Examples
///
/// ```
/// let mut s = Some(String::from("Hello"));
/// let o: Option<&mut String> = Option::from(&mut s);
///
/// match o {
/// Some(t) => *t = String::from("Hello, Rustaceans!"),
/// None => (),
/// }
///
/// assert_eq!(s, Some(String::from("Hello, Rustaceans!")));
/// ```
fn from(o: &'a mut Option<T>) -> Option<&'a mut T> { fn from(o: &'a mut Option<T>) -> Option<&'a mut T> {
o.as_mut() o.as_mut()
} }

View File

@ -39,8 +39,12 @@ use crate::panic::{Location, PanicInfo};
#[lang = "panic"] // needed by codegen for panic on overflow and other `Assert` MIR terminators #[lang = "panic"] // needed by codegen for panic on overflow and other `Assert` MIR terminators
pub fn panic(expr: &str) -> ! { pub fn panic(expr: &str) -> ! {
if cfg!(feature = "panic_immediate_abort") { if cfg!(feature = "panic_immediate_abort") {
// remove `unsafe` (and safety comment) on bootstrap bump
#[cfg_attr(not(bootstrap), allow(unused_unsafe))]
// SAFETY: the `abort` intrinsic has no requirements to be called. // SAFETY: the `abort` intrinsic has no requirements to be called.
unsafe { super::intrinsics::abort() } unsafe {
super::intrinsics::abort()
}
} }
// Use Arguments::new_v1 instead of format_args!("{}", expr) to potentially // Use Arguments::new_v1 instead of format_args!("{}", expr) to potentially
@ -58,8 +62,12 @@ pub fn panic(expr: &str) -> ! {
#[lang = "panic_bounds_check"] // needed by codegen for panic on OOB array/slice access #[lang = "panic_bounds_check"] // needed by codegen for panic on OOB array/slice access
fn panic_bounds_check(index: usize, len: usize) -> ! { fn panic_bounds_check(index: usize, len: usize) -> ! {
if cfg!(feature = "panic_immediate_abort") { if cfg!(feature = "panic_immediate_abort") {
// remove `unsafe` (and safety comment) on bootstrap bump
#[cfg_attr(not(bootstrap), allow(unused_unsafe))]
// SAFETY: the `abort` intrinsic has no requirements to be called. // SAFETY: the `abort` intrinsic has no requirements to be called.
unsafe { super::intrinsics::abort() } unsafe {
super::intrinsics::abort()
}
} }
panic!("index out of bounds: the len is {} but the index is {}", len, index) panic!("index out of bounds: the len is {} but the index is {}", len, index)
@ -72,8 +80,12 @@ fn panic_bounds_check(index: usize, len: usize) -> ! {
#[track_caller] #[track_caller]
pub fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! { pub fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
if cfg!(feature = "panic_immediate_abort") { if cfg!(feature = "panic_immediate_abort") {
// remove `unsafe` (and safety comment) on bootstrap bump
#[cfg_attr(not(bootstrap), allow(unused_unsafe))]
// SAFETY: the `abort` intrinsic has no requirements to be called. // SAFETY: the `abort` intrinsic has no requirements to be called.
unsafe { super::intrinsics::abort() } unsafe {
super::intrinsics::abort()
}
} }
// NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call

View File

@ -110,11 +110,17 @@ mod mut_ptr;
/// as the compiler doesn't need to prove that it's sound to elide the /// as the compiler doesn't need to prove that it's sound to elide the
/// copy. /// copy.
/// ///
/// * It can be used to drop [pinned] data when `T` is not `repr(packed)`
/// (pinned data must not be moved before it is dropped).
///
/// Unaligned values cannot be dropped in place, they must be copied to an aligned /// Unaligned values cannot be dropped in place, they must be copied to an aligned
/// location first using [`ptr::read_unaligned`]. /// location first using [`ptr::read_unaligned`]. For packed structs, this move is
/// done automatically by the compiler. This means the fields of packed structs
/// are not dropped in-place.
/// ///
/// [`ptr::read`]: ../ptr/fn.read.html /// [`ptr::read`]: ../ptr/fn.read.html
/// [`ptr::read_unaligned`]: ../ptr/fn.read_unaligned.html /// [`ptr::read_unaligned`]: ../ptr/fn.read_unaligned.html
/// [pinned]: ../pin/index.html
/// ///
/// # Safety /// # Safety
/// ///

View File

@ -3179,6 +3179,7 @@ macro_rules! is_empty {
$self.ptr.as_ptr() as *const T == $self.end $self.ptr.as_ptr() as *const T == $self.end
}; };
} }
// To get rid of some bounds checks (see `position`), we compute the length in a somewhat // To get rid of some bounds checks (see `position`), we compute the length in a somewhat
// unexpected way. (Tested by `codegen/slice-position-bounds-check`.) // unexpected way. (Tested by `codegen/slice-position-bounds-check`.)
macro_rules! len { macro_rules! len {
@ -3347,40 +3348,127 @@ macro_rules! iterator {
self.next_back() self.next_back()
} }
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn for_each<F>(mut self, mut f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
while let Some(x) = self.next() {
f(x);
}
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn all<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if !f(x) {
return false;
}
}
true
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn any<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if f(x) {
return true;
}
}
false
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
while let Some(x) = self.next() {
if predicate(&x) {
return Some(x);
}
}
None
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn find_map<B, F>(&mut self, mut f: F) -> Option<B>
where
Self: Sized,
F: FnMut(Self::Item) -> Option<B>,
{
while let Some(x) = self.next() {
if let Some(y) = f(x) {
return Some(y);
}
}
None
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile. Also, the `assume` avoids a bounds check.
#[inline] #[inline]
#[rustc_inherit_overflow_checks] #[rustc_inherit_overflow_checks]
fn position<P>(&mut self, mut predicate: P) -> Option<usize> where fn position<P>(&mut self, mut predicate: P) -> Option<usize> where
Self: Sized, Self: Sized,
P: FnMut(Self::Item) -> bool, P: FnMut(Self::Item) -> bool,
{ {
// The addition might panic on overflow.
let n = len!(self); let n = len!(self);
self.try_fold(0, move |i, x| { let mut i = 0;
if predicate(x) { Err(i) } while let Some(x) = self.next() {
else { Ok(i + 1) } if predicate(x) {
}).err()
.map(|i| {
unsafe { assume(i < n) }; unsafe { assume(i < n) };
i return Some(i);
}) }
i += 1;
}
None
} }
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile. Also, the `assume` avoids a bounds check.
#[inline] #[inline]
fn rposition<P>(&mut self, mut predicate: P) -> Option<usize> where fn rposition<P>(&mut self, mut predicate: P) -> Option<usize> where
P: FnMut(Self::Item) -> bool, P: FnMut(Self::Item) -> bool,
Self: Sized + ExactSizeIterator + DoubleEndedIterator Self: Sized + ExactSizeIterator + DoubleEndedIterator
{ {
// No need for an overflow check here, because `ExactSizeIterator`
let n = len!(self); let n = len!(self);
self.try_rfold(n, move |i, x| { let mut i = n;
let i = i - 1; while let Some(x) = self.next_back() {
if predicate(x) { Err(i) } i -= 1;
else { Ok(i) } if predicate(x) {
}).err()
.map(|i| {
unsafe { assume(i < n) }; unsafe { assume(i < n) };
i return Some(i);
}) }
}
None
} }
$($extra)* $($extra)*

View File

@ -2139,6 +2139,24 @@ fn test_range_inclusive_nth_back() {
assert_eq!(ExactSizeIterator::is_empty(&r), true); assert_eq!(ExactSizeIterator::is_empty(&r), true);
} }
#[test]
fn test_range_len() {
assert_eq!((0..10_u8).len(), 10);
assert_eq!((9..10_u8).len(), 1);
assert_eq!((10..10_u8).len(), 0);
assert_eq!((11..10_u8).len(), 0);
assert_eq!((100..10_u8).len(), 0);
}
#[test]
fn test_range_inclusive_len() {
assert_eq!((0..=10_u8).len(), 11);
assert_eq!((9..=10_u8).len(), 2);
assert_eq!((10..=10_u8).len(), 1);
assert_eq!((11..=10_u8).len(), 0);
assert_eq!((100..=10_u8).len(), 0);
}
#[test] #[test]
fn test_range_step() { fn test_range_step() {
#![allow(deprecated)] #![allow(deprecated)]
@ -2509,42 +2527,91 @@ fn test_chain_fold() {
} }
#[test] #[test]
fn test_step_replace_unsigned() { fn test_steps_between() {
let mut x = 4u32; assert_eq!(Step::steps_between(&20_u8, &200_u8), Some(180_usize));
let y = x.replace_zero(); assert_eq!(Step::steps_between(&-20_i8, &80_i8), Some(100_usize));
assert_eq!(x, 0); assert_eq!(Step::steps_between(&-120_i8, &80_i8), Some(200_usize));
assert_eq!(y, 4); assert_eq!(Step::steps_between(&20_u32, &4_000_100_u32), Some(4_000_080_usize));
assert_eq!(Step::steps_between(&-20_i32, &80_i32), Some(100_usize));
assert_eq!(Step::steps_between(&-2_000_030_i32, &2_000_050_i32), Some(4_000_080_usize));
x = 5; // Skip u64/i64 to avoid differences with 32-bit vs 64-bit platforms
let y = x.replace_one();
assert_eq!(x, 1); assert_eq!(Step::steps_between(&20_u128, &200_u128), Some(180_usize));
assert_eq!(y, 5); assert_eq!(Step::steps_between(&-20_i128, &80_i128), Some(100_usize));
if cfg!(target_pointer_width = "64") {
assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_0009_u128), Some(usize::MAX));
}
assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_000a_u128), None);
assert_eq!(Step::steps_between(&10_i128, &0x1_0000_0000_0000_000a_i128), None);
assert_eq!(
Step::steps_between(&-0x1_0000_0000_0000_0000_i128, &0x1_0000_0000_0000_0000_i128,),
None,
);
} }
#[test] #[test]
fn test_step_replace_signed() { fn test_step_forward() {
let mut x = 4i32; assert_eq!(Step::forward_checked(55_u8, 200_usize), Some(255_u8));
let y = x.replace_zero(); assert_eq!(Step::forward_checked(252_u8, 200_usize), None);
assert_eq!(x, 0); assert_eq!(Step::forward_checked(0_u8, 256_usize), None);
assert_eq!(y, 4); assert_eq!(Step::forward_checked(-110_i8, 200_usize), Some(90_i8));
assert_eq!(Step::forward_checked(-110_i8, 248_usize), None);
assert_eq!(Step::forward_checked(-126_i8, 256_usize), None);
x = 5; assert_eq!(Step::forward_checked(35_u16, 100_usize), Some(135_u16));
let y = x.replace_one(); assert_eq!(Step::forward_checked(35_u16, 65500_usize), Some(u16::MAX));
assert_eq!(x, 1); assert_eq!(Step::forward_checked(36_u16, 65500_usize), None);
assert_eq!(y, 5); assert_eq!(Step::forward_checked(-110_i16, 200_usize), Some(90_i16));
assert_eq!(Step::forward_checked(-20_030_i16, 50_050_usize), Some(30_020_i16));
assert_eq!(Step::forward_checked(-10_i16, 40_000_usize), None);
assert_eq!(Step::forward_checked(-10_i16, 70_000_usize), None);
assert_eq!(Step::forward_checked(10_u128, 70_000_usize), Some(70_010_u128));
assert_eq!(Step::forward_checked(10_i128, 70_030_usize), Some(70_040_i128));
assert_eq!(
Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0xff_usize),
Some(u128::MAX),
);
assert_eq!(
Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0x100_usize),
None
);
assert_eq!(
Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0xff_usize),
Some(i128::MAX),
);
assert_eq!(
Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize),
None
);
} }
#[test] #[test]
fn test_step_replace_no_between() { fn test_step_backward() {
let mut x = 4u128; assert_eq!(Step::backward_checked(255_u8, 200_usize), Some(55_u8));
let y = x.replace_zero(); assert_eq!(Step::backward_checked(100_u8, 200_usize), None);
assert_eq!(x, 0); assert_eq!(Step::backward_checked(255_u8, 256_usize), None);
assert_eq!(y, 4); assert_eq!(Step::backward_checked(90_i8, 200_usize), Some(-110_i8));
assert_eq!(Step::backward_checked(110_i8, 248_usize), None);
assert_eq!(Step::backward_checked(127_i8, 256_usize), None);
x = 5; assert_eq!(Step::backward_checked(135_u16, 100_usize), Some(35_u16));
let y = x.replace_one(); assert_eq!(Step::backward_checked(u16::MAX, 65500_usize), Some(35_u16));
assert_eq!(x, 1); assert_eq!(Step::backward_checked(10_u16, 11_usize), None);
assert_eq!(y, 5); assert_eq!(Step::backward_checked(90_i16, 200_usize), Some(-110_i16));
assert_eq!(Step::backward_checked(30_020_i16, 50_050_usize), Some(-20_030_i16));
assert_eq!(Step::backward_checked(-10_i16, 40_000_usize), None);
assert_eq!(Step::backward_checked(-10_i16, 70_000_usize), None);
assert_eq!(Step::backward_checked(70_010_u128, 70_000_usize), Some(10_u128));
assert_eq!(Step::backward_checked(70_020_i128, 70_030_usize), Some(-10_i128));
assert_eq!(Step::backward_checked(10_u128, 7_usize), Some(3_u128));
assert_eq!(Step::backward_checked(10_u128, 11_usize), None);
assert_eq!(
Step::backward_checked(-0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize),
Some(i128::MIN)
);
} }
#[test] #[test]

View File

@ -22,6 +22,7 @@
#![feature(slice_partition_at_index)] #![feature(slice_partition_at_index)]
#![feature(specialization)] #![feature(specialization)]
#![feature(step_trait)] #![feature(step_trait)]
#![feature(step_trait_ext)]
#![feature(str_internals)] #![feature(str_internals)]
#![feature(test)] #![feature(test)]
#![feature(trusted_len)] #![feature(trusted_len)]

View File

@ -327,5 +327,8 @@ pub unsafe fn cleanup(payload: *mut u8) -> Box<dyn Any + Send> {
#[lang = "eh_personality"] #[lang = "eh_personality"]
#[cfg(not(test))] #[cfg(not(test))]
fn rust_eh_personality() { fn rust_eh_personality() {
unsafe { core::intrinsics::abort() } #[cfg_attr(not(bootstrap), allow(unused_unsafe))] // remove `unsafe` on bootstrap bump
unsafe {
core::intrinsics::abort()
}
} }

View File

@ -202,10 +202,16 @@ impl Clone for Literal {
} }
} }
// FIXME(eddyb) `Literal` should not expose internal `Debug` impls.
impl fmt::Debug for Literal { impl fmt::Debug for Literal {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&self.debug()) f.debug_struct("Literal")
// format the kind without quotes, as in `kind: Float`
.field("kind", &format_args!("{}", &self.debug_kind()))
.field("symbol", &self.symbol())
// format `Some("...")` on one line even in {:#?} mode
.field("suffix", &format_args!("{:?}", &self.suffix()))
.field("span", &self.span())
.finish()
} }
} }

View File

@ -103,8 +103,9 @@ macro_rules! with_api {
Literal { Literal {
fn drop($self: $S::Literal); fn drop($self: $S::Literal);
fn clone($self: &$S::Literal) -> $S::Literal; fn clone($self: &$S::Literal) -> $S::Literal;
// FIXME(eddyb) `Literal` should not expose internal `Debug` impls. fn debug_kind($self: &$S::Literal) -> String;
fn debug($self: &$S::Literal) -> String; fn symbol($self: &$S::Literal) -> String;
fn suffix($self: &$S::Literal) -> Option<String>;
fn integer(n: &str) -> $S::Literal; fn integer(n: &str) -> $S::Literal;
fn typed_integer(n: &str, kind: &str) -> $S::Literal; fn typed_integer(n: &str, kind: &str) -> $S::Literal;
fn float(n: &str) -> $S::Literal; fn float(n: &str) -> $S::Literal;

View File

@ -158,6 +158,13 @@ impl fmt::Debug for TokenStream {
} }
} }
#[stable(feature = "proc_macro_token_stream_default", since = "1.45.0")]
impl Default for TokenStream {
fn default() -> Self {
TokenStream::new()
}
}
#[unstable(feature = "proc_macro_quote", issue = "54722")] #[unstable(feature = "proc_macro_quote", issue = "54722")]
pub use quote::{quote, quote_span}; pub use quote::{quote, quote_span};
@ -1134,7 +1141,6 @@ impl fmt::Display for Literal {
#[stable(feature = "proc_macro_lib2", since = "1.29.0")] #[stable(feature = "proc_macro_lib2", since = "1.29.0")]
impl fmt::Debug for Literal { impl fmt::Debug for Literal {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// FIXME(eddyb) `Literal` should not expose internal `Debug` impls.
self.0.fmt(f) self.0.fmt(f)
} }
} }

View File

@ -6,8 +6,7 @@ use crate::tokenstream::TokenTree;
use rustc_data_structures::sync::Lrc; use rustc_data_structures::sync::Lrc;
use rustc_lexer::unescape::{unescape_byte, unescape_char}; use rustc_lexer::unescape::{unescape_byte, unescape_char};
use rustc_lexer::unescape::{unescape_byte_str, unescape_str}; use rustc_lexer::unescape::{unescape_byte_literal, unescape_literal, Mode};
use rustc_lexer::unescape::{unescape_raw_byte_str, unescape_raw_str};
use rustc_span::symbol::{kw, sym, Symbol}; use rustc_span::symbol::{kw, sym, Symbol};
use rustc_span::Span; use rustc_span::Span;
@ -59,45 +58,53 @@ impl LitKind {
// new symbol because the string in the LitKind is different to the // new symbol because the string in the LitKind is different to the
// string in the token. // string in the token.
let s = symbol.as_str(); let s = symbol.as_str();
let symbol = if s.contains(&['\\', '\r'][..]) { let symbol =
let mut buf = String::with_capacity(s.len()); if s.contains(&['\\', '\r'][..]) {
let mut error = Ok(()); let mut buf = String::with_capacity(s.len());
unescape_str(&s, &mut |_, unescaped_char| match unescaped_char { let mut error = Ok(());
Ok(c) => buf.push(c), unescape_literal(&s, Mode::Str, &mut |_, unescaped_char| {
Err(_) => error = Err(LitError::LexerError), match unescaped_char {
}); Ok(c) => buf.push(c),
error?; Err(_) => error = Err(LitError::LexerError),
Symbol::intern(&buf) }
} else { });
symbol error?;
}; Symbol::intern(&buf)
} else {
symbol
};
LitKind::Str(symbol, ast::StrStyle::Cooked) LitKind::Str(symbol, ast::StrStyle::Cooked)
} }
token::StrRaw(n) => { token::StrRaw(n) => {
// Ditto. // Ditto.
let s = symbol.as_str(); let s = symbol.as_str();
let symbol = if s.contains('\r') { let symbol =
let mut buf = String::with_capacity(s.len()); if s.contains('\r') {
let mut error = Ok(()); let mut buf = String::with_capacity(s.len());
unescape_raw_str(&s, &mut |_, unescaped_char| match unescaped_char { let mut error = Ok(());
Ok(c) => buf.push(c), unescape_literal(&s, Mode::RawStr, &mut |_, unescaped_char| {
Err(_) => error = Err(LitError::LexerError), match unescaped_char {
}); Ok(c) => buf.push(c),
error?; Err(_) => error = Err(LitError::LexerError),
buf.shrink_to_fit(); }
Symbol::intern(&buf) });
} else { error?;
symbol buf.shrink_to_fit();
}; Symbol::intern(&buf)
} else {
symbol
};
LitKind::Str(symbol, ast::StrStyle::Raw(n)) LitKind::Str(symbol, ast::StrStyle::Raw(n))
} }
token::ByteStr => { token::ByteStr => {
let s = symbol.as_str(); let s = symbol.as_str();
let mut buf = Vec::with_capacity(s.len()); let mut buf = Vec::with_capacity(s.len());
let mut error = Ok(()); let mut error = Ok(());
unescape_byte_str(&s, &mut |_, unescaped_byte| match unescaped_byte { unescape_byte_literal(&s, Mode::ByteStr, &mut |_, unescaped_byte| {
Ok(c) => buf.push(c), match unescaped_byte {
Err(_) => error = Err(LitError::LexerError), Ok(c) => buf.push(c),
Err(_) => error = Err(LitError::LexerError),
}
}); });
error?; error?;
buf.shrink_to_fit(); buf.shrink_to_fit();
@ -108,9 +115,11 @@ impl LitKind {
let bytes = if s.contains('\r') { let bytes = if s.contains('\r') {
let mut buf = Vec::with_capacity(s.len()); let mut buf = Vec::with_capacity(s.len());
let mut error = Ok(()); let mut error = Ok(());
unescape_raw_byte_str(&s, &mut |_, unescaped_byte| match unescaped_byte { unescape_byte_literal(&s, Mode::RawByteStr, &mut |_, unescaped_byte| {
Ok(c) => buf.push(c), match unescaped_byte {
Err(_) => error = Err(LitError::LexerError), Ok(c) => buf.push(c),
Err(_) => error = Err(LitError::LexerError),
}
}); });
error?; error?;
buf.shrink_to_fit(); buf.shrink_to_fit();

View File

@ -33,7 +33,7 @@
#![feature(array_value_iter)] #![feature(array_value_iter)]
#![feature(crate_visibility_modifier)] #![feature(crate_visibility_modifier)]
#![feature(marker_trait_attr)] #![feature(marker_trait_attr)]
#![feature(specialization)] #![feature(specialization)] // FIXME: min_specialization does not work
#![feature(or_patterns)] #![feature(or_patterns)]
#![recursion_limit = "256"] #![recursion_limit = "256"]

View File

@ -572,6 +572,35 @@ impl<'a> AstValidator<'a> {
.emit(); .emit();
} }
fn check_nomangle_item_asciionly(&self, ident: Ident, item_span: Span) {
if ident.name.as_str().is_ascii() {
return;
}
let head_span = self.session.source_map().guess_head_span(item_span);
struct_span_err!(
self.session,
head_span,
E0754,
"`#[no_mangle]` requires ASCII identifier"
)
.emit();
}
fn check_mod_file_item_asciionly(&self, ident: Ident) {
if ident.name.as_str().is_ascii() {
return;
}
struct_span_err!(
self.session,
ident.span,
E0754,
"trying to load file for module `{}` with non ascii identifer name",
ident.name
)
.help("consider using `#[path]` attribute to specify filesystem path")
.emit();
}
fn deny_generic_params(&self, generics: &Generics, ident_span: Span) { fn deny_generic_params(&self, generics: &Generics, ident_span: Span) {
if !generics.params.is_empty() { if !generics.params.is_empty() {
struct_span_err!( struct_span_err!(
@ -866,6 +895,10 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
self.has_proc_macro_decls = true; self.has_proc_macro_decls = true;
} }
if attr::contains_name(&item.attrs, sym::no_mangle) {
self.check_nomangle_item_asciionly(item.ident, item.span);
}
match item.kind { match item.kind {
ItemKind::Impl { ItemKind::Impl {
unsafety, unsafety,
@ -992,9 +1025,11 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
walk_list!(self, visit_attribute, &item.attrs); walk_list!(self, visit_attribute, &item.attrs);
return; return;
} }
ItemKind::Mod(_) => { ItemKind::Mod(Mod { inline, .. }) => {
// Ensure that `path` attributes on modules are recorded as used (cf. issue #35584). // Ensure that `path` attributes on modules are recorded as used (cf. issue #35584).
attr::first_attr_value_str_by_name(&item.attrs, sym::path); if !inline && !attr::contains_name(&item.attrs, sym::path) {
self.check_mod_file_item_asciionly(item.ident);
}
} }
ItemKind::Union(ref vdata, _) => { ItemKind::Union(ref vdata, _) => {
if let VariantData::Tuple(..) | VariantData::Unit(..) = vdata { if let VariantData::Tuple(..) | VariantData::Unit(..) = vdata {

View File

@ -1,4 +1,4 @@
// Simply gives a rought count of the number of nodes in an AST. // Simply gives a rough count of the number of nodes in an AST.
use rustc_ast::ast::*; use rustc_ast::ast::*;
use rustc_ast::visit::*; use rustc_ast::visit::*;

View File

@ -634,7 +634,7 @@ pub fn eval_condition(
[NestedMetaItem::Literal(Lit { span, .. }) [NestedMetaItem::Literal(Lit { span, .. })
| NestedMetaItem::MetaItem(MetaItem { span, .. })] => { | NestedMetaItem::MetaItem(MetaItem { span, .. })] => {
sess.span_diagnostic sess.span_diagnostic
.struct_span_err(*span, &*format!("expected a version literal")) .struct_span_err(*span, "expected a version literal")
.emit(); .emit();
return false; return false;
} }

View File

@ -394,6 +394,7 @@ pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
config.vectorize_slp, config.vectorize_slp,
config.vectorize_loop, config.vectorize_loop,
config.no_builtins, config.no_builtins,
config.emit_lifetime_markers,
sanitizer_options.as_ref(), sanitizer_options.as_ref(),
pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()), pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()), pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
@ -934,10 +935,10 @@ pub unsafe fn with_llvm_pmb(
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25); llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25);
} }
(llvm::CodeGenOptLevel::None, ..) => { (llvm::CodeGenOptLevel::None, ..) => {
llvm::LLVMRustAddAlwaysInlinePass(builder, false); llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
} }
(llvm::CodeGenOptLevel::Less, ..) => { (llvm::CodeGenOptLevel::Less, ..) => {
llvm::LLVMRustAddAlwaysInlinePass(builder, true); llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
} }
(llvm::CodeGenOptLevel::Default, ..) => { (llvm::CodeGenOptLevel::Default, ..) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225); llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225);

View File

@ -18,7 +18,6 @@ use rustc_data_structures::small_c_str::SmallCStr;
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_session::config::{self, Sanitizer};
use rustc_target::abi::{self, Align, Size}; use rustc_target::abi::{self, Align, Size};
use rustc_target::spec::{HasTargetSpec, Target}; use rustc_target::spec::{HasTargetSpec, Target};
use std::borrow::Cow; use std::borrow::Cow;
@ -1243,14 +1242,7 @@ impl Builder<'a, 'll, 'tcx> {
return; return;
} }
let opts = &self.cx.sess().opts; if !self.cx().sess().emit_lifetime_markers() {
let emit = match opts.debugging_opts.sanitizer {
// Some sanitizer use lifetime intrinsics. When they are in use,
// emit lifetime intrinsics regardless of optimization level.
Some(Sanitizer::Address | Sanitizer::Memory) => true,
_ => opts.optimize != config::OptLevel::No,
};
if !emit {
return; return;
} }

View File

@ -2000,6 +2000,7 @@ extern "C" {
SLPVectorize: bool, SLPVectorize: bool,
LoopVectorize: bool, LoopVectorize: bool,
DisableSimplifyLibCalls: bool, DisableSimplifyLibCalls: bool,
EmitLifetimeMarkers: bool,
SanitizerOptions: Option<&SanitizerOptions>, SanitizerOptions: Option<&SanitizerOptions>,
PGOGenPath: *const c_char, PGOGenPath: *const c_char,
PGOUsePath: *const c_char, PGOUsePath: *const c_char,

View File

@ -1179,6 +1179,28 @@ fn add_pre_link_args(
cmd.args(&sess.opts.debugging_opts.pre_link_args); cmd.args(&sess.opts.debugging_opts.pre_link_args);
} }
/// Add a link script embedded in the target, if applicable.
fn add_link_script(cmd: &mut dyn Linker, sess: &Session, tmpdir: &Path, crate_type: CrateType) {
match (crate_type, &sess.target.target.options.link_script) {
(CrateType::Cdylib | CrateType::Executable, Some(script)) => {
if !sess.target.target.options.linker_is_gnu {
sess.fatal("can only use link script when linking with GNU-like linker");
}
let file_name = ["rustc", &sess.target.target.llvm_target, "linkfile.ld"].join("-");
let path = tmpdir.join(file_name);
if let Err(e) = fs::write(&path, script) {
sess.fatal(&format!("failed to write link script to {}: {}", path.display(), e));
}
cmd.arg("--script");
cmd.arg(path);
}
_ => {}
}
}
/// Add arbitrary "user defined" args defined from command line and by `#[link_args]` attributes. /// Add arbitrary "user defined" args defined from command line and by `#[link_args]` attributes.
/// FIXME: Determine where exactly these args need to be inserted. /// FIXME: Determine where exactly these args need to be inserted.
fn add_user_defined_link_args( fn add_user_defined_link_args(
@ -1421,8 +1443,11 @@ fn linker_with_args<'a, B: ArchiveBuilder<'a>>(
// NO-OPT-OUT, OBJECT-FILES-MAYBE, CUSTOMIZATION-POINT // NO-OPT-OUT, OBJECT-FILES-MAYBE, CUSTOMIZATION-POINT
add_pre_link_args(cmd, sess, flavor, crate_type); add_pre_link_args(cmd, sess, flavor, crate_type);
// NO-OPT-OUT
add_link_script(cmd, sess, tmpdir, crate_type);
// NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
if sess.target.target.options.is_like_fuchsia { if sess.target.target.options.is_like_fuchsia && crate_type == CrateType::Executable {
let prefix = match sess.opts.debugging_opts.sanitizer { let prefix = match sess.opts.debugging_opts.sanitizer {
Some(Sanitizer::Address) => "asan/", Some(Sanitizer::Address) => "asan/",
_ => "", _ => "",

View File

@ -21,7 +21,7 @@ use rustc_errors::{DiagnosticId, FatalError, Handler, Level};
use rustc_fs_util::link_or_copy; use rustc_fs_util::link_or_copy;
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE}; use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc_incremental::{ use rustc_incremental::{
copy_cgu_workproducts_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess, copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
}; };
use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::middle::cstore::EncodedMetadata; use rustc_middle::middle::cstore::EncodedMetadata;
@ -110,6 +110,7 @@ pub struct ModuleConfig {
pub merge_functions: bool, pub merge_functions: bool,
pub inline_threshold: Option<usize>, pub inline_threshold: Option<usize>,
pub new_llvm_pass_manager: bool, pub new_llvm_pass_manager: bool,
pub emit_lifetime_markers: bool,
} }
impl ModuleConfig { impl ModuleConfig {
@ -244,6 +245,7 @@ impl ModuleConfig {
inline_threshold: sess.opts.cg.inline_threshold, inline_threshold: sess.opts.cg.inline_threshold,
new_llvm_pass_manager: sess.opts.debugging_opts.new_llvm_pass_manager, new_llvm_pass_manager: sess.opts.debugging_opts.new_llvm_pass_manager,
emit_lifetime_markers: sess.emit_lifetime_markers(),
} }
} }
@ -465,17 +467,13 @@ fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
return work_products; return work_products;
} }
let _timer = sess.timer("incr_comp_copy_cgu_workproducts"); let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) { for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
let mut files = vec![]; let path = module.object.as_ref().map(|path| path.clone());
if let Some(ref path) = module.object {
files.push(path.clone());
}
if let Some((id, product)) = if let Some((id, product)) =
copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files) copy_cgu_workproduct_to_incr_comp_cache_dir(sess, &module.name, &path)
{ {
work_products.insert(id, product); work_products.insert(id, product);
} }
@ -817,7 +815,7 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
) -> Result<WorkItemResult<B>, FatalError> { ) -> Result<WorkItemResult<B>, FatalError> {
let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap(); let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
let mut object = None; let mut object = None;
for saved_file in &module.source.saved_files { if let Some(saved_file) = module.source.saved_file {
let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, Some(&module.name)); let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, Some(&module.name));
object = Some(obj_out.clone()); object = Some(obj_out.clone());
let source_file = in_incr_comp_dir(&incr_comp_session_dir, &saved_file); let source_file = in_incr_comp_dir(&incr_comp_session_dir, &saved_file);

View File

@ -198,7 +198,6 @@ pub fn push_debuginfo_type_name<'tcx>(
ty::Error ty::Error
| ty::Infer(_) | ty::Infer(_)
| ty::Placeholder(..) | ty::Placeholder(..)
| ty::UnnormalizedProjection(..)
| ty::Projection(..) | ty::Projection(..)
| ty::Bound(..) | ty::Bound(..)
| ty::Opaque(..) | ty::Opaque(..)

View File

@ -28,7 +28,7 @@ rustc_index = { path = "../librustc_index", package = "rustc_index" }
bitflags = "1.2.1" bitflags = "1.2.1"
measureme = "0.7.1" measureme = "0.7.1"
libc = "0.2" libc = "0.2"
stacker = "0.1.6" stacker = "0.1.9"
[dependencies.parking_lot] [dependencies.parking_lot]
version = "0.10" version = "0.10"

View File

@ -12,7 +12,7 @@
#![feature(generators)] #![feature(generators)]
#![feature(generator_trait)] #![feature(generator_trait)]
#![feature(fn_traits)] #![feature(fn_traits)]
#![feature(specialization)] #![feature(min_specialization)]
#![feature(optin_builtin_traits)] #![feature(optin_builtin_traits)]
#![feature(nll)] #![feature(nll)]
#![feature(allow_internal_unstable)] #![feature(allow_internal_unstable)]

View File

@ -52,7 +52,7 @@ impl<T: PartialEq> TinyList<T> {
if &e.data == data { if &e.data == data {
return true; return true;
} }
elem = e.next.as_ref().map(|e| &**e); elem = e.next.as_deref();
} }
false false
} }
@ -62,7 +62,7 @@ impl<T: PartialEq> TinyList<T> {
let (mut elem, mut count) = (self.head.as_ref(), 0); let (mut elem, mut count) = (self.head.as_ref(), 0);
while let Some(ref e) = elem { while let Some(ref e) = elem {
count += 1; count += 1;
elem = e.next.as_ref().map(|e| &**e); elem = e.next.as_deref();
} }
count count
} }

View File

@ -1138,6 +1138,16 @@ pub fn catch_fatal_errors<F: FnOnce() -> R, R>(f: F) -> Result<R, ErrorReported>
}) })
} }
/// Variant of `catch_fatal_errors` for the `interface::Result` return type
/// that also computes the exit code.
pub fn catch_with_exit_code(f: impl FnOnce() -> interface::Result<()>) -> i32 {
let result = catch_fatal_errors(f).and_then(|result| result);
match result {
Ok(()) => EXIT_SUCCESS,
Err(_) => EXIT_FAILURE,
}
}
lazy_static! { lazy_static! {
static ref DEFAULT_HOOK: Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static> = { static ref DEFAULT_HOOK: Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static> = {
let hook = panic::take_hook(); let hook = panic::take_hook();
@ -1228,12 +1238,12 @@ pub fn init_rustc_env_logger() {
env_logger::init_from_env("RUSTC_LOG"); env_logger::init_from_env("RUSTC_LOG");
} }
pub fn main() { pub fn main() -> ! {
let start = Instant::now(); let start = Instant::now();
init_rustc_env_logger(); init_rustc_env_logger();
let mut callbacks = TimePassesCallbacks::default(); let mut callbacks = TimePassesCallbacks::default();
install_ice_hook(); install_ice_hook();
let result = catch_fatal_errors(|| { let exit_code = catch_with_exit_code(|| {
let args = env::args_os() let args = env::args_os()
.enumerate() .enumerate()
.map(|(i, arg)| { .map(|(i, arg)| {
@ -1246,13 +1256,8 @@ pub fn main() {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
run_compiler(&args, &mut callbacks, None, None) run_compiler(&args, &mut callbacks, None, None)
}) });
.and_then(|result| result);
let exit_code = match result {
Ok(_) => EXIT_SUCCESS,
Err(_) => EXIT_FAILURE,
};
// The extra `\t` is necessary to align this label with the others. // The extra `\t` is necessary to align this label with the others.
print_time_passes_entry(callbacks.time_passes, "\ttotal", start.elapsed()); print_time_passes_entry(callbacks.time_passes, "\ttotal", start.elapsed());
process::exit(exit_code); process::exit(exit_code)
} }

View File

@ -120,6 +120,7 @@ E0223: include_str!("./error_codes/E0223.md"),
E0224: include_str!("./error_codes/E0224.md"), E0224: include_str!("./error_codes/E0224.md"),
E0225: include_str!("./error_codes/E0225.md"), E0225: include_str!("./error_codes/E0225.md"),
E0226: include_str!("./error_codes/E0226.md"), E0226: include_str!("./error_codes/E0226.md"),
E0228: include_str!("./error_codes/E0228.md"),
E0229: include_str!("./error_codes/E0229.md"), E0229: include_str!("./error_codes/E0229.md"),
E0230: include_str!("./error_codes/E0230.md"), E0230: include_str!("./error_codes/E0230.md"),
E0231: include_str!("./error_codes/E0231.md"), E0231: include_str!("./error_codes/E0231.md"),
@ -435,6 +436,7 @@ E0750: include_str!("./error_codes/E0750.md"),
E0751: include_str!("./error_codes/E0751.md"), E0751: include_str!("./error_codes/E0751.md"),
E0752: include_str!("./error_codes/E0752.md"), E0752: include_str!("./error_codes/E0752.md"),
E0753: include_str!("./error_codes/E0753.md"), E0753: include_str!("./error_codes/E0753.md"),
E0754: include_str!("./error_codes/E0754.md"),
E0755: include_str!("./error_codes/E0755.md"), E0755: include_str!("./error_codes/E0755.md"),
; ;
// E0006, // merged with E0005 // E0006, // merged with E0005
@ -483,7 +485,6 @@ E0755: include_str!("./error_codes/E0755.md"),
// E0218, // no associated type defined // E0218, // no associated type defined
// E0219, // associated type defined in higher-ranked supertrait // E0219, // associated type defined in higher-ranked supertrait
E0227, // ambiguous lifetime bound, explicit lifetime bound required E0227, // ambiguous lifetime bound, explicit lifetime bound required
E0228, // explicit lifetime bound required
// E0233, // E0233,
// E0234, // E0234,
// E0235, // structure constructor specifies a structure of type but // E0235, // structure constructor specifies a structure of type but

View File

@ -0,0 +1,40 @@
The lifetime bound for this object type cannot be deduced from context and must
be specified.
Erroneous code example:
```compile_fail,E0228
trait Trait { }
struct TwoBounds<'a, 'b, T: Sized + 'a + 'b> {
x: &'a i32,
y: &'b i32,
z: T,
}
type Foo<'a, 'b> = TwoBounds<'a, 'b, dyn Trait>;
```
When a trait object is used as a type argument of a generic type, Rust will try
to infer its lifetime if unspecified. However, this isn't possible when the
containing type has more than one lifetime bound.
The above example can be resolved by either reducing the number of lifetime
bounds to one or by making the trait object lifetime explicit, like so:
```
trait Trait { }
struct TwoBounds<'a, 'b, T: Sized + 'a + 'b> {
x: &'a i32,
y: &'b i32,
z: T,
}
type Foo<'a, 'b> = TwoBounds<'a, 'b, dyn Trait + 'b>;
```
For more information, see [RFC 599] and its amendment [RFC 1156].
[RFC 599]: https://github.com/rust-lang/rfcs/blob/master/text/0599-default-object-bound.md
[RFC 1156]: https://github.com/rust-lang/rfcs/blob/master/text/1156-adjust-default-object-bounds.md

View File

@ -64,7 +64,7 @@ impl Trait for Foo {
} }
``` ```
The nightly feature [Arbintrary self types][AST] extends the accepted The nightly feature [Arbitrary self types][AST] extends the accepted
set of receiver types to also include any type that can dereference to set of receiver types to also include any type that can dereference to
`Self`: `Self`:

View File

@ -7,7 +7,7 @@ Example of erroneous code:
# fn satisfied(n: usize) -> bool { n % 23 == 0 } # fn satisfied(n: usize) -> bool { n % 23 == 0 }
let result = while true { let result = while true {
if satisfied(i) { if satisfied(i) {
break 2*i; // error: `break` with value from a `while` loop break 2 * i; // error: `break` with value from a `while` loop
} }
i += 1; i += 1;
}; };
@ -22,9 +22,9 @@ Make sure `break value;` statements only occur in `loop` loops:
``` ```
# let mut i = 1; # let mut i = 1;
# fn satisfied(n: usize) -> bool { n % 23 == 0 } # fn satisfied(n: usize) -> bool { n % 23 == 0 }
let result = loop { // ok! let result = loop { // This is now a "loop" loop.
if satisfied(i) { if satisfied(i) {
break 2*i; break 2 * i; // ok!
} }
i += 1; i += 1;
}; };

View File

@ -1,7 +1,4 @@
When matching against an exclusive range, the compiler verifies that the range A lower range wasn't less than the upper range.
is non-empty. Exclusive range patterns include the start point but not the end
point, so this is equivalent to requiring the start of the range to be less
than the end of the range.
Erroneous code example: Erroneous code example:
@ -17,3 +14,8 @@ fn main() {
} }
} }
``` ```
When matching against an exclusive range, the compiler verifies that the range
is non-empty. Exclusive range patterns include the start point but not the end
point, so this is equivalent to requiring the start of the range to be less
than the end of the range.

View File

@ -1,4 +1,4 @@
In a `fn` type, a lifetime appears only in the return type, In a `fn` type, a lifetime appears only in the return type
and not in the arguments types. and not in the arguments types.
Erroneous code example: Erroneous code example:
@ -10,8 +10,11 @@ fn main() {
} }
``` ```
To fix this issue, either use the lifetime in the arguments, or use The problem here is that the lifetime isn't contrained by any of the arguments,
`'static`. Example: making it impossible to determine how long it's supposed to live.
To fix this issue, either use the lifetime in the arguments, or use the
`'static` lifetime. Example:
``` ```
fn main() { fn main() {

View File

@ -1,5 +1,5 @@
A lifetime appears only in an associated-type binding, A lifetime is only present in an associated-type binding, and not in the input
and not in the input types to the trait. types to the trait.
Erroneous code example: Erroneous code example:

View File

@ -1,6 +1,8 @@
The value of `N` that was specified for `repr(align(N))` was not a power The value of `N` that was specified for `repr(align(N))` was not a power
of two, or was greater than 2^29. of two, or was greater than 2^29.
Erroneous code example:
```compile_fail,E0589 ```compile_fail,E0589
#[repr(align(15))] // error: invalid `repr(align)` attribute: not a power of two #[repr(align(15))] // error: invalid `repr(align)` attribute: not a power of two
enum Foo { enum Foo {

View File

@ -0,0 +1,33 @@
An non-ascii identifier was used in an invalid context.
Erroneous code example:
```compile_fail,E0754
# #![feature(non_ascii_idents)]
mod řųśť;
// ^ error!
fn main() {}
```
```compile_fail,E0754
# #![feature(non_ascii_idents)]
#[no_mangle]
fn řųśť() {}
// ^ error!
fn main() {}
```
Non-ascii can be used as module names if it is inline
or a #\[path\] attribute is specified. For example:
```
# #![feature(non_ascii_idents)]
mod řųśť {
const IS_GREAT: bool = true;
}
fn main() {}
```

View File

@ -3,7 +3,7 @@ or `Self` that references lifetimes from a parent scope.
Erroneous code example: Erroneous code example:
```compile_fail,E0754,edition2018 ```compile_fail,E0755,edition2018
struct S<'a>(&'a i32); struct S<'a>(&'a i32);
impl<'a> S<'a> { impl<'a> S<'a> {

View File

@ -50,13 +50,13 @@ fn t1() {
assert_eq!(string_reader.next_token(), token::Whitespace); assert_eq!(string_reader.next_token(), token::Whitespace);
// Read another token. // Read another token.
let tok3 = string_reader.next_token(); let tok3 = string_reader.next_token();
assert_eq!(string_reader.pos.clone(), BytePos(28)); assert_eq!(string_reader.pos(), BytePos(28));
let tok4 = Token::new(mk_ident("main"), Span::with_root_ctxt(BytePos(24), BytePos(28))); let tok4 = Token::new(mk_ident("main"), Span::with_root_ctxt(BytePos(24), BytePos(28)));
assert_eq!(tok3.kind, tok4.kind); assert_eq!(tok3.kind, tok4.kind);
assert_eq!(tok3.span, tok4.span); assert_eq!(tok3.span, tok4.span);
assert_eq!(string_reader.next_token(), token::OpenDelim(token::Paren)); assert_eq!(string_reader.next_token(), token::OpenDelim(token::Paren));
assert_eq!(string_reader.pos.clone(), BytePos(29)) assert_eq!(string_reader.pos(), BytePos(29))
}) })
} }

View File

@ -507,9 +507,14 @@ impl server::Ident for Rustc<'_> {
} }
impl server::Literal for Rustc<'_> { impl server::Literal for Rustc<'_> {
// FIXME(eddyb) `Literal` should not expose internal `Debug` impls. fn debug_kind(&mut self, literal: &Self::Literal) -> String {
fn debug(&mut self, literal: &Self::Literal) -> String { format!("{:?}", literal.lit.kind)
format!("{:?}", literal) }
fn symbol(&mut self, literal: &Self::Literal) -> String {
literal.lit.symbol.to_string()
}
fn suffix(&mut self, literal: &Self::Literal) -> Option<String> {
literal.lit.suffix.as_ref().map(Symbol::to_string)
} }
fn integer(&mut self, n: &str) -> Self::Literal { fn integer(&mut self, n: &str) -> Self::Literal {
self.lit(token::Integer, Symbol::intern(n), None) self.lit(token::Integer, Symbol::intern(n), None)

View File

@ -8,7 +8,7 @@
#![feature(const_panic)] #![feature(const_panic)]
#![feature(in_band_lifetimes)] #![feature(in_band_lifetimes)]
#![feature(or_patterns)] #![feature(or_patterns)]
#![feature(specialization)] #![feature(min_specialization)]
#![recursion_limit = "256"] #![recursion_limit = "256"]
#[macro_use] #[macro_use]

View File

@ -15,7 +15,7 @@ pub mod assert_module_sources;
mod persist; mod persist;
pub use assert_dep_graph::assert_dep_graph; pub use assert_dep_graph::assert_dep_graph;
pub use persist::copy_cgu_workproducts_to_incr_comp_cache_dir; pub use persist::copy_cgu_workproduct_to_incr_comp_cache_dir;
pub use persist::delete_workproduct_files; pub use persist::delete_workproduct_files;
pub use persist::dep_graph_tcx_init; pub use persist::dep_graph_tcx_init;
pub use persist::finalize_session_directory; pub use persist::finalize_session_directory;

View File

@ -134,7 +134,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
for swp in work_products { for swp in work_products {
let mut all_files_exist = true; let mut all_files_exist = true;
for file_name in swp.work_product.saved_files.iter() { if let Some(ref file_name) = swp.work_product.saved_file {
let path = in_incr_comp_dir_sess(sess, file_name); let path = in_incr_comp_dir_sess(sess, file_name);
if !path.exists() { if !path.exists() {
all_files_exist = false; all_files_exist = false;

View File

@ -21,5 +21,5 @@ pub use load::LoadResult;
pub use load::{load_dep_graph, DepGraphFuture}; pub use load::{load_dep_graph, DepGraphFuture};
pub use save::save_dep_graph; pub use save::save_dep_graph;
pub use save::save_work_product_index; pub use save::save_work_product_index;
pub use work_product::copy_cgu_workproducts_to_incr_comp_cache_dir; pub use work_product::copy_cgu_workproduct_to_incr_comp_cache_dir;
pub use work_product::delete_workproduct_files; pub use work_product::delete_workproduct_files;

View File

@ -74,9 +74,9 @@ pub fn save_work_product_index(
if !new_work_products.contains_key(id) { if !new_work_products.contains_key(id) {
work_product::delete_workproduct_files(sess, wp); work_product::delete_workproduct_files(sess, wp);
debug_assert!( debug_assert!(
wp.saved_files wp.saved_file.as_ref().map_or(true, |file_name| {
.iter() !in_incr_comp_dir_sess(sess, &file_name).exists()
.all(|file_name| { !in_incr_comp_dir_sess(sess, file_name).exists() }) })
); );
} }
} }
@ -85,7 +85,7 @@ pub fn save_work_product_index(
debug_assert!({ debug_assert!({
new_work_products new_work_products
.iter() .iter()
.flat_map(|(_, wp)| wp.saved_files.iter()) .flat_map(|(_, wp)| wp.saved_file.iter())
.map(|name| in_incr_comp_dir_sess(sess, name)) .map(|name| in_incr_comp_dir_sess(sess, name))
.all(|path| path.exists()) .all(|path| path.exists())
}); });

View File

@ -7,43 +7,41 @@ use rustc_session::Session;
use std::fs as std_fs; use std::fs as std_fs;
use std::path::PathBuf; use std::path::PathBuf;
pub fn copy_cgu_workproducts_to_incr_comp_cache_dir( pub fn copy_cgu_workproduct_to_incr_comp_cache_dir(
sess: &Session, sess: &Session,
cgu_name: &str, cgu_name: &str,
files: &[PathBuf], path: &Option<PathBuf>,
) -> Option<(WorkProductId, WorkProduct)> { ) -> Option<(WorkProductId, WorkProduct)> {
debug!("copy_cgu_workproducts_to_incr_comp_cache_dir({:?},{:?})", cgu_name, files); debug!("copy_cgu_workproduct_to_incr_comp_cache_dir({:?},{:?})", cgu_name, path);
sess.opts.incremental.as_ref()?; sess.opts.incremental.as_ref()?;
let saved_files = files let saved_file = if let Some(path) = path {
.iter() let file_name = format!("{}.o", cgu_name);
.map(|path| { let path_in_incr_dir = in_incr_comp_dir_sess(sess, &file_name);
let file_name = format!("{}.o", cgu_name); match link_or_copy(path, &path_in_incr_dir) {
let path_in_incr_dir = in_incr_comp_dir_sess(sess, &file_name); Ok(_) => Some(file_name),
match link_or_copy(path, &path_in_incr_dir) { Err(err) => {
Ok(_) => Some(file_name), sess.warn(&format!(
Err(err) => { "error copying object file `{}` to incremental directory as `{}`: {}",
sess.warn(&format!( path.display(),
"error copying object file `{}` \ path_in_incr_dir.display(),
to incremental directory as `{}`: {}", err
path.display(), ));
path_in_incr_dir.display(), return None;
err
));
None
}
} }
}) }
.collect::<Option<Vec<_>>>()?; } else {
None
};
let work_product = WorkProduct { cgu_name: cgu_name.to_string(), saved_files }; let work_product = WorkProduct { cgu_name: cgu_name.to_string(), saved_file };
let work_product_id = WorkProductId::from_cgu_name(cgu_name); let work_product_id = WorkProductId::from_cgu_name(cgu_name);
Some((work_product_id, work_product)) Some((work_product_id, work_product))
} }
pub fn delete_workproduct_files(sess: &Session, work_product: &WorkProduct) { pub fn delete_workproduct_files(sess: &Session, work_product: &WorkProduct) {
for file_name in &work_product.saved_files { if let Some(ref file_name) = work_product.saved_file {
let path = in_incr_comp_dir_sess(sess, file_name); let path = in_incr_comp_dir_sess(sess, file_name);
match std_fs::remove_file(&path) { match std_fs::remove_file(&path) {
Ok(()) => {} Ok(()) => {}

View File

@ -65,7 +65,7 @@ impl Idx for u32 {
/// `u32::MAX`. You can also customize things like the `Debug` impl, /// `u32::MAX`. You can also customize things like the `Debug` impl,
/// what traits are derived, and so forth via the macro. /// what traits are derived, and so forth via the macro.
#[macro_export] #[macro_export]
#[allow_internal_unstable(step_trait, rustc_attrs)] #[allow_internal_unstable(step_trait, step_trait_ext, rustc_attrs)]
macro_rules! newtype_index { macro_rules! newtype_index {
// ---- public rules ---- // ---- public rules ----
@ -181,7 +181,7 @@ macro_rules! newtype_index {
} }
} }
impl ::std::iter::Step for $type { unsafe impl ::std::iter::Step for $type {
#[inline] #[inline]
fn steps_between(start: &Self, end: &Self) -> Option<usize> { fn steps_between(start: &Self, end: &Self) -> Option<usize> {
<usize as ::std::iter::Step>::steps_between( <usize as ::std::iter::Step>::steps_between(
@ -191,33 +191,13 @@ macro_rules! newtype_index {
} }
#[inline] #[inline]
fn replace_one(&mut self) -> Self { fn forward_checked(start: Self, u: usize) -> Option<Self> {
::std::mem::replace(self, Self::from_u32(1)) Self::index(start).checked_add(u).map(Self::from_usize)
} }
#[inline] #[inline]
fn replace_zero(&mut self) -> Self { fn backward_checked(start: Self, u: usize) -> Option<Self> {
::std::mem::replace(self, Self::from_u32(0)) Self::index(start).checked_sub(u).map(Self::from_usize)
}
#[inline]
fn add_one(&self) -> Self {
Self::from_usize(Self::index(*self) + 1)
}
#[inline]
fn sub_one(&self) -> Self {
Self::from_usize(Self::index(*self) - 1)
}
#[inline]
fn add_usize(&self, u: usize) -> Option<Self> {
Self::index(*self).checked_add(u).map(Self::from_usize)
}
#[inline]
fn sub_usize(&self, u: usize) -> Option<Self> {
Self::index(*self).checked_sub(u).map(Self::from_usize)
} }
} }

View File

@ -415,7 +415,6 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Canonicalizer<'cx, 'tcx> {
| ty::Never | ty::Never
| ty::Tuple(..) | ty::Tuple(..)
| ty::Projection(..) | ty::Projection(..)
| ty::UnnormalizedProjection(..)
| ty::Foreign(..) | ty::Foreign(..)
| ty::Param(..) | ty::Param(..)
| ty::Opaque(..) => { | ty::Opaque(..) => {

View File

@ -554,7 +554,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
let output = bound_output.skip_binder(); let output = bound_output.skip_binder();
err.span_label(e.span, &format!("this method call resolves to `{:?}`", output)); err.span_label(e.span, &format!("this method call resolves to `{:?}`", output));
let kind = &output.kind; let kind = &output.kind;
if let ty::Projection(proj) | ty::UnnormalizedProjection(proj) = kind { if let ty::Projection(proj) = kind {
if let Some(span) = self.tcx.hir().span_if_local(proj.item_def_id) { if let Some(span) = self.tcx.hir().span_if_local(proj.item_def_id) {
err.span_label(span, &format!("`{:?}` defined here", output)); err.span_label(span, &format!("`{:?}` defined here", output));
} }

View File

@ -204,7 +204,6 @@ impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> {
| ty::Never | ty::Never
| ty::Tuple(..) | ty::Tuple(..)
| ty::Projection(..) | ty::Projection(..)
| ty::UnnormalizedProjection(..)
| ty::Foreign(..) | ty::Foreign(..)
| ty::Param(..) | ty::Param(..)
| ty::Closure(..) | ty::Closure(..)

View File

@ -325,8 +325,21 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
} }
} }
debug!("enforce_member_constraint: final least choice = {:?}", least_choice); // (#72087) Different `ty::Regions` can be known to be equal, for
if least_choice != member_lower_bound { // example, we know that `'a` and `'static` are equal in a function
// with a parameter of type `&'static &'a ()`.
//
// When we have two equal regions like this `expansion` will use
// `lub_concrete_regions` to pick a canonical representative. The same
// choice is needed here so that we don't end up in a cycle of
// `expansion` changing the region one way and the code here changing
// it back.
let lub = self.lub_concrete_regions(least_choice, member_lower_bound);
debug!(
"enforce_member_constraint: final least choice = {:?}\nlub = {:?}",
least_choice, lub
);
if lub != member_lower_bound {
*var_values.value_mut(member_vid) = VarValue::Value(least_choice); *var_values.value_mut(member_vid) = VarValue::Value(least_choice);
true true
} else { } else {
@ -578,8 +591,7 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
self.tcx().mk_region(ReScope(lub)) self.tcx().mk_region(ReScope(lub))
} }
(&ReEarlyBound(_), &ReEarlyBound(_) | &ReFree(_)) (&ReEarlyBound(_) | &ReFree(_), &ReEarlyBound(_) | &ReFree(_)) => {
| (&ReFree(_), &ReEarlyBound(_) | &ReFree(_)) => {
self.region_rels.lub_free_regions(a, b) self.region_rels.lub_free_regions(a, b)
} }

View File

@ -112,8 +112,7 @@ pub fn elaborate_predicates<'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
predicates: impl Iterator<Item = ty::Predicate<'tcx>>, predicates: impl Iterator<Item = ty::Predicate<'tcx>>,
) -> Elaborator<'tcx> { ) -> Elaborator<'tcx> {
let obligations = let obligations = predicates.map(|predicate| predicate_obligation(predicate, None)).collect();
predicates.into_iter().map(|predicate| predicate_obligation(predicate, None)).collect();
elaborate_obligations(tcx, obligations) elaborate_obligations(tcx, obligations)
} }
@ -149,7 +148,7 @@ impl Elaborator<'tcx> {
// Get predicates declared on the trait. // Get predicates declared on the trait.
let predicates = tcx.super_predicates_of(data.def_id()); let predicates = tcx.super_predicates_of(data.def_id());
let obligations = predicates.predicates.into_iter().map(|(pred, span)| { let obligations = predicates.predicates.iter().map(|(pred, span)| {
predicate_obligation( predicate_obligation(
pred.subst_supertrait(tcx, &data.to_poly_trait_ref()), pred.subst_supertrait(tcx, &data.to_poly_trait_ref()),
Some(*span), Some(*span),

View File

@ -137,7 +137,7 @@ impl<'tcx> Queries<'tcx> {
let result = passes::register_plugins( let result = passes::register_plugins(
self.session(), self.session(),
&*self.codegen_backend().metadata_loader(), &*self.codegen_backend().metadata_loader(),
self.compiler.register_lints.as_ref().map(|p| &**p).unwrap_or_else(|| empty), self.compiler.register_lints.as_deref().unwrap_or_else(|| empty),
krate, krate,
&crate_name, &crate_name,
); );

View File

@ -1,5 +1,11 @@
//! Low-level Rust lexer. //! Low-level Rust lexer.
//! //!
//! The idea with `librustc_lexer` is to make a reusable library,
//! by separating out pure lexing and rustc-specific concerns, like spans,
//! error reporting an interning. So, rustc_lexer operates directly on `&str`,
//! produces simple tokens which are a pair of type-tag and a bit of original text,
//! and does not report errors, instead storing them as flags on the token.
//!
//! Tokens produced by this lexer are not yet ready for parsing the Rust syntax, //! Tokens produced by this lexer are not yet ready for parsing the Rust syntax,
//! for that see `librustc_parse::lexer`, which converts this basic token stream //! for that see `librustc_parse::lexer`, which converts this basic token stream
//! into wide tokens used by actual parser. //! into wide tokens used by actual parser.
@ -719,6 +725,9 @@ impl Cursor<'_> {
// Check that amount of closing '#' symbols // Check that amount of closing '#' symbols
// is equal to the amount of opening ones. // is equal to the amount of opening ones.
// Note that this will not consume extra trailing `#` characters:
// `r###"abcde"####` is lexed as a `LexedRawString { n_hashes: 3 }`
// followed by a `#` token.
let mut hashes_left = n_start_hashes; let mut hashes_left = n_start_hashes;
let is_closing_hash = |c| { let is_closing_hash = |c| {
if c == '#' && hashes_left != 0 { if c == '#' && hashes_left != 0 {
@ -739,8 +748,8 @@ impl Cursor<'_> {
possible_terminator_offset: None, possible_terminator_offset: None,
}; };
} else if n_end_hashes > max_hashes { } else if n_end_hashes > max_hashes {
// Keep track of possible terminators to give a hint about where there might be // Keep track of possible terminators to give a hint about
// a missing terminator // where there might be a missing terminator
possible_terminator_offset = possible_terminator_offset =
Some(self.len_consumed() - start_pos - n_end_hashes + prefix_len); Some(self.len_consumed() - start_pos - n_end_hashes + prefix_len);
max_hashes = n_end_hashes; max_hashes = n_end_hashes;

View File

@ -58,6 +58,42 @@ pub enum EscapeError {
NonAsciiCharInByteString, NonAsciiCharInByteString,
} }
/// Takes a contents of a literal (without quotes) and produces a
/// sequence of escaped characters or errors.
/// Values are returned through invoking of the provided callback.
pub fn unescape_literal<F>(literal_text: &str, mode: Mode, callback: &mut F)
where
F: FnMut(Range<usize>, Result<char, EscapeError>),
{
match mode {
Mode::Char | Mode::Byte => {
let mut chars = literal_text.chars();
let result = unescape_char_or_byte(&mut chars, mode);
// The Chars iterator moved forward.
callback(0..(literal_text.len() - chars.as_str().len()), result);
}
Mode::Str | Mode::ByteStr => unescape_str_or_byte_str(literal_text, mode, callback),
// NOTE: Raw strings do not perform any explicit character escaping, here we
// only translate CRLF to LF and produce errors on bare CR.
Mode::RawStr | Mode::RawByteStr => {
unescape_raw_str_or_byte_str(literal_text, mode, callback)
}
}
}
/// Takes a contents of a byte, byte string or raw byte string (without quotes)
/// and produces a sequence of bytes or errors.
/// Values are returned through invoking of the provided callback.
pub fn unescape_byte_literal<F>(literal_text: &str, mode: Mode, callback: &mut F)
where
F: FnMut(Range<usize>, Result<u8, EscapeError>),
{
assert!(mode.is_bytes());
unescape_literal(literal_text, mode, &mut |range, result| {
callback(range, result.map(byte_from_char));
})
}
/// Takes a contents of a char literal (without quotes), and returns an /// Takes a contents of a char literal (without quotes), and returns an
/// unescaped char or an error /// unescaped char or an error
pub fn unescape_char(literal_text: &str) -> Result<char, (usize, EscapeError)> { pub fn unescape_char(literal_text: &str) -> Result<char, (usize, EscapeError)> {
@ -75,54 +111,6 @@ pub fn unescape_byte(literal_text: &str) -> Result<u8, (usize, EscapeError)> {
.map_err(|err| (literal_text.len() - chars.as_str().len(), err)) .map_err(|err| (literal_text.len() - chars.as_str().len(), err))
} }
/// Takes a contents of a string literal (without quotes) and produces a
/// sequence of escaped characters or errors.
/// Values are returned through invoking of the provided callback.
pub fn unescape_str<F>(literal_text: &str, callback: &mut F)
where
F: FnMut(Range<usize>, Result<char, EscapeError>),
{
unescape_str_or_byte_str(literal_text, Mode::Str, callback)
}
/// Takes a contents of a byte string literal (without quotes) and produces a
/// sequence of bytes or errors.
/// Values are returned through invoking of the provided callback.
pub fn unescape_byte_str<F>(literal_text: &str, callback: &mut F)
where
F: FnMut(Range<usize>, Result<u8, EscapeError>),
{
unescape_str_or_byte_str(literal_text, Mode::ByteStr, &mut |range, char| {
callback(range, char.map(byte_from_char))
})
}
/// Takes a contents of a raw string literal (without quotes) and produces a
/// sequence of characters or errors.
/// Values are returned through invoking of the provided callback.
/// NOTE: Raw strings do not perform any explicit character escaping, here we
/// only translate CRLF to LF and produce errors on bare CR.
pub fn unescape_raw_str<F>(literal_text: &str, callback: &mut F)
where
F: FnMut(Range<usize>, Result<char, EscapeError>),
{
unescape_raw_str_or_byte_str(literal_text, Mode::Str, callback)
}
/// Takes a contents of a raw byte string literal (without quotes) and produces a
/// sequence of bytes or errors.
/// Values are returned through invoking of the provided callback.
/// NOTE: Raw strings do not perform any explicit character escaping, here we
/// only translate CRLF to LF and produce errors on bare CR.
pub fn unescape_raw_byte_str<F>(literal_text: &str, callback: &mut F)
where
F: FnMut(Range<usize>, Result<u8, EscapeError>),
{
unescape_raw_str_or_byte_str(literal_text, Mode::ByteStr, &mut |range, char| {
callback(range, char.map(byte_from_char))
})
}
/// What kind of literal do we parse. /// What kind of literal do we parse.
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
pub enum Mode { pub enum Mode {
@ -130,13 +118,15 @@ pub enum Mode {
Str, Str,
Byte, Byte,
ByteStr, ByteStr,
RawStr,
RawByteStr,
} }
impl Mode { impl Mode {
pub fn in_single_quotes(self) -> bool { pub fn in_single_quotes(self) -> bool {
match self { match self {
Mode::Char | Mode::Byte => true, Mode::Char | Mode::Byte => true,
Mode::Str | Mode::ByteStr => false, Mode::Str | Mode::ByteStr | Mode::RawStr | Mode::RawByteStr => false,
} }
} }
@ -146,8 +136,8 @@ impl Mode {
pub fn is_bytes(self) -> bool { pub fn is_bytes(self) -> bool {
match self { match self {
Mode::Byte | Mode::ByteStr => true, Mode::Byte | Mode::ByteStr | Mode::RawByteStr => true,
Mode::Char | Mode::Str => false, Mode::Char | Mode::Str | Mode::RawStr => false,
} }
} }
} }
@ -345,7 +335,7 @@ where
fn byte_from_char(c: char) -> u8 { fn byte_from_char(c: char) -> u8 {
let res = c as u32; let res = c as u32;
assert!(res <= u8::max_value() as u32, "guaranteed because of Mode::Byte(Str)"); assert!(res <= u8::max_value() as u32, "guaranteed because of Mode::ByteStr");
res as u8 res as u8
} }

View File

@ -102,7 +102,7 @@ fn test_unescape_char_good() {
fn test_unescape_str_good() { fn test_unescape_str_good() {
fn check(literal_text: &str, expected: &str) { fn check(literal_text: &str, expected: &str) {
let mut buf = Ok(String::with_capacity(literal_text.len())); let mut buf = Ok(String::with_capacity(literal_text.len()));
unescape_str(literal_text, &mut |range, c| { unescape_literal(literal_text, Mode::Str, &mut |range, c| {
if let Ok(b) = &mut buf { if let Ok(b) = &mut buf {
match c { match c {
Ok(c) => b.push(c), Ok(c) => b.push(c),
@ -222,7 +222,7 @@ fn test_unescape_byte_good() {
fn test_unescape_byte_str_good() { fn test_unescape_byte_str_good() {
fn check(literal_text: &str, expected: &[u8]) { fn check(literal_text: &str, expected: &[u8]) {
let mut buf = Ok(Vec::with_capacity(literal_text.len())); let mut buf = Ok(Vec::with_capacity(literal_text.len()));
unescape_byte_str(literal_text, &mut |range, c| { unescape_byte_literal(literal_text, Mode::ByteStr, &mut |range, c| {
if let Ok(b) = &mut buf { if let Ok(b) = &mut buf {
match c { match c {
Ok(c) => b.push(c), Ok(c) => b.push(c),
@ -246,7 +246,7 @@ fn test_unescape_byte_str_good() {
fn test_unescape_raw_str() { fn test_unescape_raw_str() {
fn check(literal: &str, expected: &[(Range<usize>, Result<char, EscapeError>)]) { fn check(literal: &str, expected: &[(Range<usize>, Result<char, EscapeError>)]) {
let mut unescaped = Vec::with_capacity(literal.len()); let mut unescaped = Vec::with_capacity(literal.len());
unescape_raw_str(literal, &mut |range, res| unescaped.push((range, res))); unescape_literal(literal, Mode::RawStr, &mut |range, res| unescaped.push((range, res)));
assert_eq!(unescaped, expected); assert_eq!(unescaped, expected);
} }
@ -258,7 +258,9 @@ fn test_unescape_raw_str() {
fn test_unescape_raw_byte_str() { fn test_unescape_raw_byte_str() {
fn check(literal: &str, expected: &[(Range<usize>, Result<u8, EscapeError>)]) { fn check(literal: &str, expected: &[(Range<usize>, Result<u8, EscapeError>)]) {
let mut unescaped = Vec::with_capacity(literal.len()); let mut unescaped = Vec::with_capacity(literal.len());
unescape_raw_byte_str(literal, &mut |range, res| unescaped.push((range, res))); unescape_byte_literal(literal, Mode::RawByteStr, &mut |range, res| {
unescaped.push((range, res))
});
assert_eq!(unescaped, expected); assert_eq!(unescaped, expected);
} }

View File

@ -28,8 +28,8 @@ use rustc_ast::visit::{FnCtxt, FnKind};
use rustc_ast_pretty::pprust::{self, expr_to_string}; use rustc_ast_pretty::pprust::{self, expr_to_string};
use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{Applicability, DiagnosticBuilder}; use rustc_errors::{Applicability, DiagnosticBuilder};
use rustc_feature::Stability;
use rustc_feature::{deprecated_attributes, AttributeGate, AttributeTemplate, AttributeType}; use rustc_feature::{deprecated_attributes, AttributeGate, AttributeTemplate, AttributeType};
use rustc_feature::{GateIssue, Stability};
use rustc_hir as hir; use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res}; use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
@ -1817,13 +1817,21 @@ impl EarlyLintPass for IncompleteFeatures {
.map(|(name, span, _)| (name, span)) .map(|(name, span, _)| (name, span))
.chain(features.declared_lib_features.iter().map(|(name, span)| (name, span))) .chain(features.declared_lib_features.iter().map(|(name, span)| (name, span)))
.filter(|(name, _)| rustc_feature::INCOMPLETE_FEATURES.iter().any(|f| name == &f)) .filter(|(name, _)| rustc_feature::INCOMPLETE_FEATURES.iter().any(|f| name == &f))
.for_each(|(name, &span)| { .for_each(|(&name, &span)| {
cx.struct_span_lint(INCOMPLETE_FEATURES, span, |lint| { cx.struct_span_lint(INCOMPLETE_FEATURES, span, |lint| {
lint.build(&format!( let mut builder = lint.build(&format!(
"the feature `{}` is incomplete and may cause the compiler to crash", "the feature `{}` is incomplete and may not be safe to use \
and/or cause compiler crashes",
name, name,
)) ));
.emit() if let Some(n) = rustc_feature::find_feature_issue(name, GateIssue::Language) {
builder.note(&format!(
"see issue #{} <https://github.com/rust-lang/rust/issues/{}> \
for more information",
n, n,
));
}
builder.emit();
}) })
}); });
} }

Some files were not shown because too many files have changed in this diff Show More