mirror of
https://github.com/rust-lang/rust.git
synced 2025-05-14 02:49:40 +00:00
Auto merge of #132586 - workingjubilee:rollup-qrmn49a, r=workingjubilee
Rollup of 4 pull requests Successful merges: - #131222 (Generate correct symbols.o for sparc-unknown-none-elf) - #132423 (remove const-support for align_offset and is_aligned) - #132565 (Reduce dependence on the target name) - #132576 (remove attribute ids from hir stats (they're simply not needed)) r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
56c6a2f9b1
@ -146,7 +146,7 @@ impl<'gcc, 'tcx> StaticCodegenMethods for CodegenCx<'gcc, 'tcx> {
|
||||
|
||||
// Wasm statics with custom link sections get special treatment as they
|
||||
// go into custom sections of the wasm executable.
|
||||
if self.tcx.sess.opts.target_triple.tuple().starts_with("wasm32") {
|
||||
if self.tcx.sess.target.is_like_wasm {
|
||||
if let Some(_section) = attrs.link_section {
|
||||
unimplemented!();
|
||||
}
|
||||
|
@ -945,23 +945,10 @@ fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data:
|
||||
asm
|
||||
}
|
||||
|
||||
fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
|
||||
let triple = cgcx.opts.target_triple.tuple();
|
||||
triple.contains("-ios")
|
||||
|| triple.contains("-darwin")
|
||||
|| triple.contains("-tvos")
|
||||
|| triple.contains("-watchos")
|
||||
|| triple.contains("-visionos")
|
||||
}
|
||||
|
||||
fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
|
||||
cgcx.opts.target_triple.tuple().contains("-aix")
|
||||
}
|
||||
|
||||
pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static CStr {
|
||||
if target_is_apple(cgcx) {
|
||||
if cgcx.target_is_like_osx {
|
||||
c"__LLVM,__bitcode"
|
||||
} else if target_is_aix(cgcx) {
|
||||
} else if cgcx.target_is_like_aix {
|
||||
c".ipa"
|
||||
} else {
|
||||
c".llvmbc"
|
||||
@ -1028,10 +1015,12 @@ unsafe fn embed_bitcode(
|
||||
// Unfortunately, LLVM provides no way to set custom section flags. For ELF
|
||||
// and COFF we emit the sections using module level inline assembly for that
|
||||
// reason (see issue #90326 for historical background).
|
||||
let is_aix = target_is_aix(cgcx);
|
||||
let is_apple = target_is_apple(cgcx);
|
||||
unsafe {
|
||||
if is_apple || is_aix || cgcx.opts.target_triple.tuple().starts_with("wasm") {
|
||||
if cgcx.target_is_like_osx
|
||||
|| cgcx.target_is_like_aix
|
||||
|| cgcx.target_arch == "wasm32"
|
||||
|| cgcx.target_arch == "wasm64"
|
||||
{
|
||||
// We don't need custom section flags, create LLVM globals.
|
||||
let llconst = common::bytes_in_context(llcx, bitcode);
|
||||
let llglobal = llvm::LLVMAddGlobal(
|
||||
@ -1052,9 +1041,9 @@ unsafe fn embed_bitcode(
|
||||
c"rustc.embedded.cmdline".as_ptr(),
|
||||
);
|
||||
llvm::LLVMSetInitializer(llglobal, llconst);
|
||||
let section = if is_apple {
|
||||
let section = if cgcx.target_is_like_osx {
|
||||
c"__LLVM,__cmdline"
|
||||
} else if is_aix {
|
||||
} else if cgcx.target_is_like_aix {
|
||||
c".info"
|
||||
} else {
|
||||
c".llvmcmd"
|
||||
|
@ -85,11 +85,7 @@ pub fn link_binary(
|
||||
}
|
||||
|
||||
if invalid_output_for_target(sess, crate_type) {
|
||||
bug!(
|
||||
"invalid output type `{:?}` for target os `{}`",
|
||||
crate_type,
|
||||
sess.opts.target_triple
|
||||
);
|
||||
bug!("invalid output type `{:?}` for target `{}`", crate_type, sess.opts.target_triple);
|
||||
}
|
||||
|
||||
sess.time("link_binary_check_files_are_writeable", || {
|
||||
@ -996,6 +992,7 @@ fn link_natively(
|
||||
&& (code < 1000 || code > 9999)
|
||||
{
|
||||
let is_vs_installed = windows_registry::find_vs_version().is_ok();
|
||||
// FIXME(cc-rs#1265) pass only target arch to find_tool()
|
||||
let has_linker = windows_registry::find_tool(
|
||||
sess.opts.target_triple.tuple(),
|
||||
"link.exe",
|
||||
|
@ -47,6 +47,7 @@ pub(crate) fn get_linker<'a>(
|
||||
self_contained: bool,
|
||||
target_cpu: &'a str,
|
||||
) -> Box<dyn Linker + 'a> {
|
||||
// FIXME(cc-rs#1265) pass only target arch to find_tool()
|
||||
let msvc_tool = windows_registry::find_tool(sess.opts.target_triple.tuple(), "link.exe");
|
||||
|
||||
// If our linker looks like a batch script on Windows then to execute this
|
||||
|
@ -211,7 +211,15 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
|
||||
"powerpc64" => (Architecture::PowerPc64, None),
|
||||
"riscv32" => (Architecture::Riscv32, None),
|
||||
"riscv64" => (Architecture::Riscv64, None),
|
||||
"sparc" => (Architecture::Sparc32Plus, None),
|
||||
"sparc" => {
|
||||
if sess.target.options.cpu == "v9" {
|
||||
// Target uses V8+, aka EM_SPARC32PLUS, aka 64-bit V9 but in 32-bit mode
|
||||
(Architecture::Sparc32Plus, None)
|
||||
} else {
|
||||
// Target uses V7 or V8, aka EM_SPARC
|
||||
(Architecture::Sparc, None)
|
||||
}
|
||||
}
|
||||
"sparc64" => (Architecture::Sparc64, None),
|
||||
"avr" => (Architecture::Avr, None),
|
||||
"msp430" => (Architecture::Msp430, None),
|
||||
|
@ -345,6 +345,8 @@ pub struct CodegenContext<B: WriteBackendMethods> {
|
||||
pub is_pe_coff: bool,
|
||||
pub target_can_use_split_dwarf: bool,
|
||||
pub target_arch: String,
|
||||
pub target_is_like_osx: bool,
|
||||
pub target_is_like_aix: bool,
|
||||
pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
|
||||
pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
|
||||
|
||||
@ -1195,6 +1197,8 @@ fn start_executing_work<B: ExtraBackendMethods>(
|
||||
is_pe_coff: tcx.sess.target.is_like_windows,
|
||||
target_can_use_split_dwarf: tcx.sess.target_can_use_split_dwarf(),
|
||||
target_arch: tcx.sess.target.arch.to_string(),
|
||||
target_is_like_osx: tcx.sess.target.is_like_osx,
|
||||
target_is_like_aix: tcx.sess.target.is_like_aix,
|
||||
split_debuginfo: tcx.sess.split_debuginfo(),
|
||||
split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
|
||||
parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend,
|
||||
|
@ -1,9 +1,6 @@
|
||||
const_eval_address_space_full =
|
||||
there are no more free addresses in the address space
|
||||
|
||||
const_eval_align_offset_invalid_align =
|
||||
`align_offset` called with non-power-of-two align: {$target_align}
|
||||
|
||||
const_eval_alignment_check_failed =
|
||||
{$msg ->
|
||||
[AccessedPtr] accessing memory
|
||||
|
@ -1,7 +1,6 @@
|
||||
use std::borrow::{Borrow, Cow};
|
||||
use std::fmt;
|
||||
use std::hash::Hash;
|
||||
use std::ops::ControlFlow;
|
||||
|
||||
use rustc_abi::{Align, ExternAbi, Size};
|
||||
use rustc_ast::Mutability;
|
||||
@ -10,7 +9,7 @@ use rustc_hir::def_id::{DefId, LocalDefId};
|
||||
use rustc_hir::{self as hir, CRATE_HIR_ID, LangItem};
|
||||
use rustc_middle::mir::AssertMessage;
|
||||
use rustc_middle::query::TyCtxtAt;
|
||||
use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
|
||||
use rustc_middle::ty::layout::TyAndLayout;
|
||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||
use rustc_middle::{bug, mir};
|
||||
use rustc_span::Span;
|
||||
@ -22,9 +21,9 @@ use crate::errors::{LongRunning, LongRunningWarn};
|
||||
use crate::fluent_generated as fluent;
|
||||
use crate::interpret::{
|
||||
self, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame, GlobalAlloc, ImmTy,
|
||||
InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, RangeSet, Scalar,
|
||||
StackPopCleanup, compile_time_machine, interp_ok, throw_exhaust, throw_inval, throw_ub,
|
||||
throw_ub_custom, throw_unsup, throw_unsup_format,
|
||||
InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, RangeSet, Scalar, compile_time_machine,
|
||||
interp_ok, throw_exhaust, throw_inval, throw_ub, throw_ub_custom, throw_unsup,
|
||||
throw_unsup_format,
|
||||
};
|
||||
|
||||
/// When hitting this many interpreted terminators we emit a deny by default lint
|
||||
@ -226,8 +225,8 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
|
||||
&mut self,
|
||||
instance: ty::Instance<'tcx>,
|
||||
args: &[FnArg<'tcx>],
|
||||
dest: &MPlaceTy<'tcx>,
|
||||
ret: Option<mir::BasicBlock>,
|
||||
_dest: &MPlaceTy<'tcx>,
|
||||
_ret: Option<mir::BasicBlock>,
|
||||
) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
|
||||
let def_id = instance.def_id();
|
||||
|
||||
@ -259,85 +258,10 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
|
||||
);
|
||||
|
||||
return interp_ok(Some(new_instance));
|
||||
} else if self.tcx.is_lang_item(def_id, LangItem::AlignOffset) {
|
||||
let args = self.copy_fn_args(args);
|
||||
// For align_offset, we replace the function call if the pointer has no address.
|
||||
match self.align_offset(instance, &args, dest, ret)? {
|
||||
ControlFlow::Continue(()) => return interp_ok(Some(instance)),
|
||||
ControlFlow::Break(()) => return interp_ok(None),
|
||||
}
|
||||
}
|
||||
interp_ok(Some(instance))
|
||||
}
|
||||
|
||||
/// `align_offset(ptr, target_align)` needs special handling in const eval, because the pointer
|
||||
/// may not have an address.
|
||||
///
|
||||
/// If `ptr` does have a known address, then we return `Continue(())` and the function call should
|
||||
/// proceed as normal.
|
||||
///
|
||||
/// If `ptr` doesn't have an address, but its underlying allocation's alignment is at most
|
||||
/// `target_align`, then we call the function again with an dummy address relative to the
|
||||
/// allocation.
|
||||
///
|
||||
/// If `ptr` doesn't have an address and `target_align` is stricter than the underlying
|
||||
/// allocation's alignment, then we return `usize::MAX` immediately.
|
||||
fn align_offset(
|
||||
&mut self,
|
||||
instance: ty::Instance<'tcx>,
|
||||
args: &[OpTy<'tcx>],
|
||||
dest: &MPlaceTy<'tcx>,
|
||||
ret: Option<mir::BasicBlock>,
|
||||
) -> InterpResult<'tcx, ControlFlow<()>> {
|
||||
assert_eq!(args.len(), 2);
|
||||
|
||||
let ptr = self.read_pointer(&args[0])?;
|
||||
let target_align = self.read_scalar(&args[1])?.to_target_usize(self)?;
|
||||
|
||||
if !target_align.is_power_of_two() {
|
||||
throw_ub_custom!(
|
||||
fluent::const_eval_align_offset_invalid_align,
|
||||
target_align = target_align,
|
||||
);
|
||||
}
|
||||
|
||||
match self.ptr_try_get_alloc_id(ptr, 0) {
|
||||
Ok((alloc_id, offset, _extra)) => {
|
||||
let (_size, alloc_align, _kind) = self.get_alloc_info(alloc_id);
|
||||
|
||||
if target_align <= alloc_align.bytes() {
|
||||
// Extract the address relative to the allocation base that is definitely
|
||||
// sufficiently aligned and call `align_offset` again.
|
||||
let addr = ImmTy::from_uint(offset.bytes(), args[0].layout).into();
|
||||
let align = ImmTy::from_uint(target_align, args[1].layout).into();
|
||||
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
|
||||
|
||||
// Push the stack frame with our own adjusted arguments.
|
||||
self.init_stack_frame(
|
||||
instance,
|
||||
self.load_mir(instance.def, None)?,
|
||||
fn_abi,
|
||||
&[FnArg::Copy(addr), FnArg::Copy(align)],
|
||||
/* with_caller_location = */ false,
|
||||
dest,
|
||||
StackPopCleanup::Goto { ret, unwind: mir::UnwindAction::Unreachable },
|
||||
)?;
|
||||
interp_ok(ControlFlow::Break(()))
|
||||
} else {
|
||||
// Not alignable in const, return `usize::MAX`.
|
||||
let usize_max = Scalar::from_target_usize(self.target_usize_max(), self);
|
||||
self.write_scalar(usize_max, dest)?;
|
||||
self.return_to_block(ret)?;
|
||||
interp_ok(ControlFlow::Break(()))
|
||||
}
|
||||
}
|
||||
Err(_addr) => {
|
||||
// The pointer has an address, continue with function call.
|
||||
interp_ok(ControlFlow::Continue(()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// See documentation on the `ptr_guaranteed_cmp` intrinsic.
|
||||
fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
|
||||
interp_ok(match (a, b) {
|
||||
|
@ -348,9 +348,6 @@ language_item_table! {
|
||||
|
||||
MaybeUninit, sym::maybe_uninit, maybe_uninit, Target::Union, GenericRequirement::None;
|
||||
|
||||
/// Align offset for stride != 1; must not panic.
|
||||
AlignOffset, sym::align_offset, align_offset_fn, Target::Fn, GenericRequirement::None;
|
||||
|
||||
Termination, sym::termination, termination, Target::Trait, GenericRequirement::None;
|
||||
|
||||
Try, sym::Try, try_trait, Target::Trait, GenericRequirement::None;
|
||||
|
@ -3,7 +3,7 @@
|
||||
// completely accurate (some things might be counted twice, others missed).
|
||||
|
||||
use rustc_ast::visit::BoundKind;
|
||||
use rustc_ast::{self as ast, AttrId, NodeId, visit as ast_visit};
|
||||
use rustc_ast::{self as ast, NodeId, visit as ast_visit};
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::{HirId, intravisit as hir_visit};
|
||||
@ -13,13 +13,6 @@ use rustc_middle::util::common::to_readable_str;
|
||||
use rustc_span::Span;
|
||||
use rustc_span::def_id::LocalDefId;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||
enum Id {
|
||||
Node(HirId),
|
||||
Attr(AttrId),
|
||||
None,
|
||||
}
|
||||
|
||||
struct NodeStats {
|
||||
count: usize,
|
||||
size: usize,
|
||||
@ -62,7 +55,7 @@ impl Node {
|
||||
struct StatCollector<'k> {
|
||||
krate: Option<Map<'k>>,
|
||||
nodes: FxHashMap<&'static str, Node>,
|
||||
seen: FxHashSet<Id>,
|
||||
seen: FxHashSet<HirId>,
|
||||
}
|
||||
|
||||
pub fn print_hir_stats(tcx: TyCtxt<'_>) {
|
||||
@ -87,12 +80,18 @@ pub fn print_ast_stats(krate: &ast::Crate, title: &str, prefix: &str) {
|
||||
|
||||
impl<'k> StatCollector<'k> {
|
||||
// Record a top-level node.
|
||||
fn record<T>(&mut self, label: &'static str, id: Id, val: &T) {
|
||||
fn record<T>(&mut self, label: &'static str, id: Option<HirId>, val: &T) {
|
||||
self.record_inner(label, None, id, val);
|
||||
}
|
||||
|
||||
// Record a two-level entry, with a top-level enum type and a variant.
|
||||
fn record_variant<T>(&mut self, label1: &'static str, label2: &'static str, id: Id, val: &T) {
|
||||
fn record_variant<T>(
|
||||
&mut self,
|
||||
label1: &'static str,
|
||||
label2: &'static str,
|
||||
id: Option<HirId>,
|
||||
val: &T,
|
||||
) {
|
||||
self.record_inner(label1, Some(label2), id, val);
|
||||
}
|
||||
|
||||
@ -100,10 +99,10 @@ impl<'k> StatCollector<'k> {
|
||||
&mut self,
|
||||
label1: &'static str,
|
||||
label2: Option<&'static str>,
|
||||
id: Id,
|
||||
id: Option<HirId>,
|
||||
val: &T,
|
||||
) {
|
||||
if id != Id::None && !self.seen.insert(id) {
|
||||
if id.is_some_and(|x| !self.seen.insert(x)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -191,7 +190,7 @@ macro_rules! record_variants {
|
||||
|
||||
impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
|
||||
fn visit_param(&mut self, param: &'v hir::Param<'v>) {
|
||||
self.record("Param", Id::Node(param.hir_id), param);
|
||||
self.record("Param", Some(param.hir_id), param);
|
||||
hir_visit::walk_param(self, param)
|
||||
}
|
||||
|
||||
@ -221,7 +220,7 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_item(&mut self, i: &'v hir::Item<'v>) {
|
||||
record_variants!((self, i, i.kind, Id::Node(i.hir_id()), hir, Item, ItemKind), [
|
||||
record_variants!((self, i, i.kind, Some(i.hir_id()), hir, Item, ItemKind), [
|
||||
ExternCrate,
|
||||
Use,
|
||||
Static,
|
||||
@ -243,47 +242,46 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_body(&mut self, b: &hir::Body<'v>) {
|
||||
self.record("Body", Id::None, b);
|
||||
self.record("Body", None, b);
|
||||
hir_visit::walk_body(self, b);
|
||||
}
|
||||
|
||||
fn visit_mod(&mut self, m: &'v hir::Mod<'v>, _s: Span, n: HirId) {
|
||||
self.record("Mod", Id::None, m);
|
||||
self.record("Mod", None, m);
|
||||
hir_visit::walk_mod(self, m, n)
|
||||
}
|
||||
|
||||
fn visit_foreign_item(&mut self, i: &'v hir::ForeignItem<'v>) {
|
||||
record_variants!(
|
||||
(self, i, i.kind, Id::Node(i.hir_id()), hir, ForeignItem, ForeignItemKind),
|
||||
[Fn, Static, Type]
|
||||
);
|
||||
record_variants!((self, i, i.kind, Some(i.hir_id()), hir, ForeignItem, ForeignItemKind), [
|
||||
Fn, Static, Type
|
||||
]);
|
||||
hir_visit::walk_foreign_item(self, i)
|
||||
}
|
||||
|
||||
fn visit_local(&mut self, l: &'v hir::LetStmt<'v>) {
|
||||
self.record("Local", Id::Node(l.hir_id), l);
|
||||
self.record("Local", Some(l.hir_id), l);
|
||||
hir_visit::walk_local(self, l)
|
||||
}
|
||||
|
||||
fn visit_block(&mut self, b: &'v hir::Block<'v>) {
|
||||
self.record("Block", Id::Node(b.hir_id), b);
|
||||
self.record("Block", Some(b.hir_id), b);
|
||||
hir_visit::walk_block(self, b)
|
||||
}
|
||||
|
||||
fn visit_stmt(&mut self, s: &'v hir::Stmt<'v>) {
|
||||
record_variants!((self, s, s.kind, Id::Node(s.hir_id), hir, Stmt, StmtKind), [
|
||||
record_variants!((self, s, s.kind, Some(s.hir_id), hir, Stmt, StmtKind), [
|
||||
Let, Item, Expr, Semi
|
||||
]);
|
||||
hir_visit::walk_stmt(self, s)
|
||||
}
|
||||
|
||||
fn visit_arm(&mut self, a: &'v hir::Arm<'v>) {
|
||||
self.record("Arm", Id::Node(a.hir_id), a);
|
||||
self.record("Arm", Some(a.hir_id), a);
|
||||
hir_visit::walk_arm(self, a)
|
||||
}
|
||||
|
||||
fn visit_pat(&mut self, p: &'v hir::Pat<'v>) {
|
||||
record_variants!((self, p, p.kind, Id::Node(p.hir_id), hir, Pat, PatKind), [
|
||||
record_variants!((self, p, p.kind, Some(p.hir_id), hir, Pat, PatKind), [
|
||||
Wild,
|
||||
Binding,
|
||||
Struct,
|
||||
@ -304,12 +302,12 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_pat_field(&mut self, f: &'v hir::PatField<'v>) {
|
||||
self.record("PatField", Id::Node(f.hir_id), f);
|
||||
self.record("PatField", Some(f.hir_id), f);
|
||||
hir_visit::walk_pat_field(self, f)
|
||||
}
|
||||
|
||||
fn visit_expr(&mut self, e: &'v hir::Expr<'v>) {
|
||||
record_variants!((self, e, e.kind, Id::Node(e.hir_id), hir, Expr, ExprKind), [
|
||||
record_variants!((self, e, e.kind, Some(e.hir_id), hir, Expr, ExprKind), [
|
||||
ConstBlock, Array, Call, MethodCall, Tup, Binary, Unary, Lit, Cast, Type, DropTemps,
|
||||
Let, If, Loop, Match, Closure, Block, Assign, AssignOp, Field, Index, Path, AddrOf,
|
||||
Break, Continue, Ret, Become, InlineAsm, OffsetOf, Struct, Repeat, Yield, Err
|
||||
@ -318,12 +316,12 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_expr_field(&mut self, f: &'v hir::ExprField<'v>) {
|
||||
self.record("ExprField", Id::Node(f.hir_id), f);
|
||||
self.record("ExprField", Some(f.hir_id), f);
|
||||
hir_visit::walk_expr_field(self, f)
|
||||
}
|
||||
|
||||
fn visit_ty(&mut self, t: &'v hir::Ty<'v>) {
|
||||
record_variants!((self, t, t.kind, Id::Node(t.hir_id), hir, Ty, TyKind), [
|
||||
record_variants!((self, t, t.kind, Some(t.hir_id), hir, Ty, TyKind), [
|
||||
InferDelegation,
|
||||
Slice,
|
||||
Array,
|
||||
@ -345,17 +343,17 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_generic_param(&mut self, p: &'v hir::GenericParam<'v>) {
|
||||
self.record("GenericParam", Id::Node(p.hir_id), p);
|
||||
self.record("GenericParam", Some(p.hir_id), p);
|
||||
hir_visit::walk_generic_param(self, p)
|
||||
}
|
||||
|
||||
fn visit_generics(&mut self, g: &'v hir::Generics<'v>) {
|
||||
self.record("Generics", Id::None, g);
|
||||
self.record("Generics", None, g);
|
||||
hir_visit::walk_generics(self, g)
|
||||
}
|
||||
|
||||
fn visit_where_predicate(&mut self, p: &'v hir::WherePredicate<'v>) {
|
||||
record_variants!((self, p, p, Id::None, hir, WherePredicate, WherePredicate), [
|
||||
record_variants!((self, p, p, None, hir, WherePredicate, WherePredicate), [
|
||||
BoundPredicate,
|
||||
RegionPredicate,
|
||||
EqPredicate
|
||||
@ -371,66 +369,64 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
|
||||
_: Span,
|
||||
id: LocalDefId,
|
||||
) {
|
||||
self.record("FnDecl", Id::None, fd);
|
||||
self.record("FnDecl", None, fd);
|
||||
hir_visit::walk_fn(self, fk, fd, b, id)
|
||||
}
|
||||
|
||||
fn visit_use(&mut self, p: &'v hir::UsePath<'v>, hir_id: HirId) {
|
||||
// This is `visit_use`, but the type is `Path` so record it that way.
|
||||
self.record("Path", Id::None, p);
|
||||
self.record("Path", None, p);
|
||||
hir_visit::walk_use(self, p, hir_id)
|
||||
}
|
||||
|
||||
fn visit_trait_item(&mut self, ti: &'v hir::TraitItem<'v>) {
|
||||
record_variants!(
|
||||
(self, ti, ti.kind, Id::Node(ti.hir_id()), hir, TraitItem, TraitItemKind),
|
||||
[Const, Fn, Type]
|
||||
);
|
||||
record_variants!((self, ti, ti.kind, Some(ti.hir_id()), hir, TraitItem, TraitItemKind), [
|
||||
Const, Fn, Type
|
||||
]);
|
||||
hir_visit::walk_trait_item(self, ti)
|
||||
}
|
||||
|
||||
fn visit_trait_item_ref(&mut self, ti: &'v hir::TraitItemRef) {
|
||||
self.record("TraitItemRef", Id::Node(ti.id.hir_id()), ti);
|
||||
self.record("TraitItemRef", Some(ti.id.hir_id()), ti);
|
||||
hir_visit::walk_trait_item_ref(self, ti)
|
||||
}
|
||||
|
||||
fn visit_impl_item(&mut self, ii: &'v hir::ImplItem<'v>) {
|
||||
record_variants!(
|
||||
(self, ii, ii.kind, Id::Node(ii.hir_id()), hir, ImplItem, ImplItemKind),
|
||||
[Const, Fn, Type]
|
||||
);
|
||||
record_variants!((self, ii, ii.kind, Some(ii.hir_id()), hir, ImplItem, ImplItemKind), [
|
||||
Const, Fn, Type
|
||||
]);
|
||||
hir_visit::walk_impl_item(self, ii)
|
||||
}
|
||||
|
||||
fn visit_foreign_item_ref(&mut self, fi: &'v hir::ForeignItemRef) {
|
||||
self.record("ForeignItemRef", Id::Node(fi.id.hir_id()), fi);
|
||||
self.record("ForeignItemRef", Some(fi.id.hir_id()), fi);
|
||||
hir_visit::walk_foreign_item_ref(self, fi)
|
||||
}
|
||||
|
||||
fn visit_impl_item_ref(&mut self, ii: &'v hir::ImplItemRef) {
|
||||
self.record("ImplItemRef", Id::Node(ii.id.hir_id()), ii);
|
||||
self.record("ImplItemRef", Some(ii.id.hir_id()), ii);
|
||||
hir_visit::walk_impl_item_ref(self, ii)
|
||||
}
|
||||
|
||||
fn visit_param_bound(&mut self, b: &'v hir::GenericBound<'v>) {
|
||||
record_variants!((self, b, b, Id::None, hir, GenericBound, GenericBound), [
|
||||
record_variants!((self, b, b, None, hir, GenericBound, GenericBound), [
|
||||
Trait, Outlives, Use
|
||||
]);
|
||||
hir_visit::walk_param_bound(self, b)
|
||||
}
|
||||
|
||||
fn visit_field_def(&mut self, s: &'v hir::FieldDef<'v>) {
|
||||
self.record("FieldDef", Id::Node(s.hir_id), s);
|
||||
self.record("FieldDef", Some(s.hir_id), s);
|
||||
hir_visit::walk_field_def(self, s)
|
||||
}
|
||||
|
||||
fn visit_variant(&mut self, v: &'v hir::Variant<'v>) {
|
||||
self.record("Variant", Id::None, v);
|
||||
self.record("Variant", None, v);
|
||||
hir_visit::walk_variant(self, v)
|
||||
}
|
||||
|
||||
fn visit_generic_arg(&mut self, ga: &'v hir::GenericArg<'v>) {
|
||||
record_variants!((self, ga, ga, Id::Node(ga.hir_id()), hir, GenericArg, GenericArg), [
|
||||
record_variants!((self, ga, ga, Some(ga.hir_id()), hir, GenericArg, GenericArg), [
|
||||
Lifetime, Type, Const, Infer
|
||||
]);
|
||||
match ga {
|
||||
@ -442,50 +438,50 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_lifetime(&mut self, lifetime: &'v hir::Lifetime) {
|
||||
self.record("Lifetime", Id::Node(lifetime.hir_id), lifetime);
|
||||
self.record("Lifetime", Some(lifetime.hir_id), lifetime);
|
||||
hir_visit::walk_lifetime(self, lifetime)
|
||||
}
|
||||
|
||||
fn visit_path(&mut self, path: &hir::Path<'v>, _id: HirId) {
|
||||
self.record("Path", Id::None, path);
|
||||
self.record("Path", None, path);
|
||||
hir_visit::walk_path(self, path)
|
||||
}
|
||||
|
||||
fn visit_path_segment(&mut self, path_segment: &'v hir::PathSegment<'v>) {
|
||||
self.record("PathSegment", Id::None, path_segment);
|
||||
self.record("PathSegment", None, path_segment);
|
||||
hir_visit::walk_path_segment(self, path_segment)
|
||||
}
|
||||
|
||||
fn visit_generic_args(&mut self, ga: &'v hir::GenericArgs<'v>) {
|
||||
self.record("GenericArgs", Id::None, ga);
|
||||
self.record("GenericArgs", None, ga);
|
||||
hir_visit::walk_generic_args(self, ga)
|
||||
}
|
||||
|
||||
fn visit_assoc_item_constraint(&mut self, constraint: &'v hir::AssocItemConstraint<'v>) {
|
||||
self.record("AssocItemConstraint", Id::Node(constraint.hir_id), constraint);
|
||||
self.record("AssocItemConstraint", Some(constraint.hir_id), constraint);
|
||||
hir_visit::walk_assoc_item_constraint(self, constraint)
|
||||
}
|
||||
|
||||
fn visit_attribute(&mut self, attr: &'v ast::Attribute) {
|
||||
self.record("Attribute", Id::Attr(attr.id), attr);
|
||||
self.record("Attribute", None, attr);
|
||||
}
|
||||
|
||||
fn visit_inline_asm(&mut self, asm: &'v hir::InlineAsm<'v>, id: HirId) {
|
||||
self.record("InlineAsm", Id::None, asm);
|
||||
self.record("InlineAsm", None, asm);
|
||||
hir_visit::walk_inline_asm(self, asm, id);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
|
||||
fn visit_foreign_item(&mut self, i: &'v ast::ForeignItem) {
|
||||
record_variants!((self, i, i.kind, Id::None, ast, ForeignItem, ForeignItemKind), [
|
||||
record_variants!((self, i, i.kind, None, ast, ForeignItem, ForeignItemKind), [
|
||||
Static, Fn, TyAlias, MacCall
|
||||
]);
|
||||
ast_visit::walk_item(self, i)
|
||||
}
|
||||
|
||||
fn visit_item(&mut self, i: &'v ast::Item) {
|
||||
record_variants!((self, i, i.kind, Id::None, ast, Item, ItemKind), [
|
||||
record_variants!((self, i, i.kind, None, ast, Item, ItemKind), [
|
||||
ExternCrate,
|
||||
Use,
|
||||
Static,
|
||||
@ -510,34 +506,34 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_local(&mut self, l: &'v ast::Local) {
|
||||
self.record("Local", Id::None, l);
|
||||
self.record("Local", None, l);
|
||||
ast_visit::walk_local(self, l)
|
||||
}
|
||||
|
||||
fn visit_block(&mut self, b: &'v ast::Block) {
|
||||
self.record("Block", Id::None, b);
|
||||
self.record("Block", None, b);
|
||||
ast_visit::walk_block(self, b)
|
||||
}
|
||||
|
||||
fn visit_stmt(&mut self, s: &'v ast::Stmt) {
|
||||
record_variants!((self, s, s.kind, Id::None, ast, Stmt, StmtKind), [
|
||||
record_variants!((self, s, s.kind, None, ast, Stmt, StmtKind), [
|
||||
Let, Item, Expr, Semi, Empty, MacCall
|
||||
]);
|
||||
ast_visit::walk_stmt(self, s)
|
||||
}
|
||||
|
||||
fn visit_param(&mut self, p: &'v ast::Param) {
|
||||
self.record("Param", Id::None, p);
|
||||
self.record("Param", None, p);
|
||||
ast_visit::walk_param(self, p)
|
||||
}
|
||||
|
||||
fn visit_arm(&mut self, a: &'v ast::Arm) {
|
||||
self.record("Arm", Id::None, a);
|
||||
self.record("Arm", None, a);
|
||||
ast_visit::walk_arm(self, a)
|
||||
}
|
||||
|
||||
fn visit_pat(&mut self, p: &'v ast::Pat) {
|
||||
record_variants!((self, p, p.kind, Id::None, ast, Pat, PatKind), [
|
||||
record_variants!((self, p, p.kind, None, ast, Pat, PatKind), [
|
||||
Wild,
|
||||
Ident,
|
||||
Struct,
|
||||
@ -563,7 +559,7 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
|
||||
fn visit_expr(&mut self, e: &'v ast::Expr) {
|
||||
#[rustfmt::skip]
|
||||
record_variants!(
|
||||
(self, e, e.kind, Id::None, ast, Expr, ExprKind),
|
||||
(self, e, e.kind, None, ast, Expr, ExprKind),
|
||||
[
|
||||
Array, ConstBlock, Call, MethodCall, Tup, Binary, Unary, Lit, Cast, Type, Let,
|
||||
If, While, ForLoop, Loop, Match, Closure, Block, Await, TryBlock, Assign,
|
||||
@ -576,7 +572,7 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_ty(&mut self, t: &'v ast::Ty) {
|
||||
record_variants!((self, t, t.kind, Id::None, ast, Ty, TyKind), [
|
||||
record_variants!((self, t, t.kind, None, ast, Ty, TyKind), [
|
||||
Slice,
|
||||
Array,
|
||||
Ptr,
|
||||
@ -603,12 +599,12 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_generic_param(&mut self, g: &'v ast::GenericParam) {
|
||||
self.record("GenericParam", Id::None, g);
|
||||
self.record("GenericParam", None, g);
|
||||
ast_visit::walk_generic_param(self, g)
|
||||
}
|
||||
|
||||
fn visit_where_predicate(&mut self, p: &'v ast::WherePredicate) {
|
||||
record_variants!((self, p, p, Id::None, ast, WherePredicate, WherePredicate), [
|
||||
record_variants!((self, p, p, None, ast, WherePredicate, WherePredicate), [
|
||||
BoundPredicate,
|
||||
RegionPredicate,
|
||||
EqPredicate
|
||||
@ -617,12 +613,12 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_fn(&mut self, fk: ast_visit::FnKind<'v>, _: Span, _: NodeId) {
|
||||
self.record("FnDecl", Id::None, fk.decl());
|
||||
self.record("FnDecl", None, fk.decl());
|
||||
ast_visit::walk_fn(self, fk)
|
||||
}
|
||||
|
||||
fn visit_assoc_item(&mut self, i: &'v ast::AssocItem, ctxt: ast_visit::AssocCtxt) {
|
||||
record_variants!((self, i, i.kind, Id::None, ast, AssocItem, AssocItemKind), [
|
||||
record_variants!((self, i, i.kind, None, ast, AssocItem, AssocItemKind), [
|
||||
Const,
|
||||
Fn,
|
||||
Type,
|
||||
@ -634,19 +630,19 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_param_bound(&mut self, b: &'v ast::GenericBound, _ctxt: BoundKind) {
|
||||
record_variants!((self, b, b, Id::None, ast, GenericBound, GenericBound), [
|
||||
record_variants!((self, b, b, None, ast, GenericBound, GenericBound), [
|
||||
Trait, Outlives, Use
|
||||
]);
|
||||
ast_visit::walk_param_bound(self, b)
|
||||
}
|
||||
|
||||
fn visit_field_def(&mut self, s: &'v ast::FieldDef) {
|
||||
self.record("FieldDef", Id::None, s);
|
||||
self.record("FieldDef", None, s);
|
||||
ast_visit::walk_field_def(self, s)
|
||||
}
|
||||
|
||||
fn visit_variant(&mut self, v: &'v ast::Variant) {
|
||||
self.record("Variant", Id::None, v);
|
||||
self.record("Variant", None, v);
|
||||
ast_visit::walk_variant(self, v)
|
||||
}
|
||||
|
||||
@ -660,7 +656,7 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
|
||||
// common than the former case, so we implement this visitor and tolerate
|
||||
// the double counting in the former case.
|
||||
fn visit_path_segment(&mut self, path_segment: &'v ast::PathSegment) {
|
||||
self.record("PathSegment", Id::None, path_segment);
|
||||
self.record("PathSegment", None, path_segment);
|
||||
ast_visit::walk_path_segment(self, path_segment)
|
||||
}
|
||||
|
||||
@ -669,7 +665,7 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
|
||||
// common, so we implement `visit_generic_args` and tolerate the double
|
||||
// counting in the former case.
|
||||
fn visit_generic_args(&mut self, g: &'v ast::GenericArgs) {
|
||||
record_variants!((self, g, g, Id::None, ast, GenericArgs, GenericArgs), [
|
||||
record_variants!((self, g, g, None, ast, GenericArgs, GenericArgs), [
|
||||
AngleBracketed,
|
||||
Parenthesized,
|
||||
ParenthesizedElided
|
||||
@ -678,24 +674,24 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
|
||||
}
|
||||
|
||||
fn visit_attribute(&mut self, attr: &'v ast::Attribute) {
|
||||
record_variants!((self, attr, attr.kind, Id::None, ast, Attribute, AttrKind), [
|
||||
record_variants!((self, attr, attr.kind, None, ast, Attribute, AttrKind), [
|
||||
Normal, DocComment
|
||||
]);
|
||||
ast_visit::walk_attribute(self, attr)
|
||||
}
|
||||
|
||||
fn visit_expr_field(&mut self, f: &'v ast::ExprField) {
|
||||
self.record("ExprField", Id::None, f);
|
||||
self.record("ExprField", None, f);
|
||||
ast_visit::walk_expr_field(self, f)
|
||||
}
|
||||
|
||||
fn visit_crate(&mut self, krate: &'v ast::Crate) {
|
||||
self.record("Crate", Id::None, krate);
|
||||
self.record("Crate", None, krate);
|
||||
ast_visit::walk_crate(self, krate)
|
||||
}
|
||||
|
||||
fn visit_inline_asm(&mut self, asm: &'v ast::InlineAsm) {
|
||||
self.record("InlineAsm", Id::None, asm);
|
||||
self.record("InlineAsm", None, asm);
|
||||
ast_visit::walk_inline_asm(self, asm)
|
||||
}
|
||||
}
|
||||
|
@ -378,7 +378,6 @@ symbols! {
|
||||
aggregate_raw_ptr,
|
||||
alias,
|
||||
align,
|
||||
align_offset,
|
||||
alignment,
|
||||
all,
|
||||
alloc,
|
||||
|
@ -1595,11 +1595,10 @@ macro_rules! supported_targets {
|
||||
pub const TARGETS: &[&str] = &[$($tuple),+];
|
||||
|
||||
fn load_builtin(target: &str) -> Option<Target> {
|
||||
let mut t = match target {
|
||||
let t = match target {
|
||||
$( $tuple => targets::$module::target(), )+
|
||||
_ => return None,
|
||||
};
|
||||
t.is_builtin = true;
|
||||
debug!("got builtin target: {:?}", t);
|
||||
Some(t)
|
||||
}
|
||||
@ -2128,9 +2127,6 @@ type StaticCow<T> = Cow<'static, T>;
|
||||
/// through `Deref` impls.
|
||||
#[derive(PartialEq, Clone, Debug)]
|
||||
pub struct TargetOptions {
|
||||
/// Whether the target is built-in or loaded from a custom target specification.
|
||||
pub is_builtin: bool,
|
||||
|
||||
/// Used as the `target_endian` `cfg` variable. Defaults to little endian.
|
||||
pub endian: Endian,
|
||||
/// Width of c_int type. Defaults to "32".
|
||||
@ -2606,7 +2602,6 @@ impl Default for TargetOptions {
|
||||
/// incomplete, and if used for compilation, will certainly not work.
|
||||
fn default() -> TargetOptions {
|
||||
TargetOptions {
|
||||
is_builtin: false,
|
||||
endian: Endian::Little,
|
||||
c_int_width: "32".into(),
|
||||
os: "none".into(),
|
||||
@ -3349,7 +3344,6 @@ impl Target {
|
||||
}
|
||||
}
|
||||
|
||||
key!(is_builtin, bool);
|
||||
key!(c_int_width = "target-c-int-width");
|
||||
key!(c_enum_min_bits, Option<u64>); // if None, matches c_int_width
|
||||
key!(os);
|
||||
@ -3462,10 +3456,6 @@ impl Target {
|
||||
key!(entry_abi, Conv)?;
|
||||
key!(supports_xray, bool);
|
||||
|
||||
if base.is_builtin {
|
||||
// This can cause unfortunate ICEs later down the line.
|
||||
return Err("may not set is_builtin for targets not built-in".into());
|
||||
}
|
||||
base.update_from_cli();
|
||||
|
||||
// Each field should have been read using `Json::remove` so any keys remaining are unused.
|
||||
@ -3635,7 +3625,6 @@ impl ToJson for Target {
|
||||
target_val!(arch);
|
||||
target_val!(data_layout);
|
||||
|
||||
target_option_val!(is_builtin);
|
||||
target_option_val!(endian, "target-endian");
|
||||
target_option_val!(c_int_width, "target-c-int-width");
|
||||
target_option_val!(os);
|
||||
|
@ -112,7 +112,6 @@
|
||||
#![feature(asm_experimental_arch)]
|
||||
#![feature(const_align_of_val)]
|
||||
#![feature(const_align_of_val_raw)]
|
||||
#![feature(const_align_offset)]
|
||||
#![feature(const_alloc_layout)]
|
||||
#![feature(const_black_box)]
|
||||
#![feature(const_char_encode_utf16)]
|
||||
@ -123,7 +122,6 @@
|
||||
#![feature(const_nonnull_new)]
|
||||
#![feature(const_option_ext)]
|
||||
#![feature(const_pin_2)]
|
||||
#![feature(const_pointer_is_aligned)]
|
||||
#![feature(const_ptr_is_null)]
|
||||
#![feature(const_ptr_sub_ptr)]
|
||||
#![feature(const_raw_ptr_comparison)]
|
||||
|
@ -1358,15 +1358,6 @@ impl<T: ?Sized> *const T {
|
||||
/// beyond the allocation that the pointer points into. It is up to the caller to ensure that
|
||||
/// the returned offset is correct in all terms other than alignment.
|
||||
///
|
||||
/// When this is called during compile-time evaluation (which is unstable), the implementation
|
||||
/// may return `usize::MAX` in cases where that can never happen at runtime. This is because the
|
||||
/// actual alignment of pointers is not known yet during compile-time, so an offset with
|
||||
/// guaranteed alignment can sometimes not be computed. For example, a buffer declared as `[u8;
|
||||
/// N]` might be allocated at an odd or an even address, but at compile-time this is not yet
|
||||
/// known, so the execution has to be correct for either choice. It is therefore impossible to
|
||||
/// find an offset that is guaranteed to be 2-aligned. (This behavior is subject to change, as usual
|
||||
/// for unstable APIs.)
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// The function panics if `align` is not a power-of-two.
|
||||
@ -1395,8 +1386,7 @@ impl<T: ?Sized> *const T {
|
||||
#[must_use]
|
||||
#[inline]
|
||||
#[stable(feature = "align_offset", since = "1.36.0")]
|
||||
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
|
||||
pub const fn align_offset(self, align: usize) -> usize
|
||||
pub fn align_offset(self, align: usize) -> usize
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
@ -1431,94 +1421,10 @@ impl<T: ?Sized> *const T {
|
||||
/// assert!(ptr.is_aligned());
|
||||
/// assert!(!ptr.wrapping_byte_add(1).is_aligned());
|
||||
/// ```
|
||||
///
|
||||
/// # At compiletime
|
||||
/// **Note: Alignment at compiletime is experimental and subject to change. See the
|
||||
/// [tracking issue] for details.**
|
||||
///
|
||||
/// At compiletime, the compiler may not know where a value will end up in memory.
|
||||
/// Calling this function on a pointer created from a reference at compiletime will only
|
||||
/// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
|
||||
/// is never aligned if cast to a type with a stricter alignment than the reference's
|
||||
/// underlying allocation.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of primitives is less than their size.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
/// #[repr(align(8))]
|
||||
/// struct AlignedI64(i64);
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let data = AlignedI32(42);
|
||||
/// let ptr = &data as *const AlignedI32;
|
||||
/// assert!(ptr.is_aligned());
|
||||
///
|
||||
/// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
|
||||
/// let ptr1 = ptr.cast::<AlignedI64>();
|
||||
/// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
|
||||
/// assert!(!ptr1.is_aligned());
|
||||
/// assert!(!ptr2.is_aligned());
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
|
||||
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of primitives is less than their size.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
/// #[repr(align(8))]
|
||||
/// struct AlignedI64(i64);
|
||||
///
|
||||
/// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
|
||||
/// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
|
||||
/// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
|
||||
/// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
|
||||
///
|
||||
/// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
|
||||
/// let runtime_ptr = COMPTIME_PTR;
|
||||
/// assert_ne!(
|
||||
/// runtime_ptr.cast::<AlignedI64>().is_aligned(),
|
||||
/// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
|
||||
/// );
|
||||
/// ```
|
||||
///
|
||||
/// If a pointer is created from a fixed address, this function behaves the same during
|
||||
/// runtime and compiletime.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of primitives is less than their size.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
/// #[repr(align(8))]
|
||||
/// struct AlignedI64(i64);
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let ptr = 40 as *const AlignedI32;
|
||||
/// assert!(ptr.is_aligned());
|
||||
///
|
||||
/// // For pointers with a known address, runtime and compiletime behavior are identical.
|
||||
/// let ptr1 = ptr.cast::<AlignedI64>();
|
||||
/// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
|
||||
/// assert!(ptr1.is_aligned());
|
||||
/// assert!(!ptr2.is_aligned());
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
|
||||
#[must_use]
|
||||
#[inline]
|
||||
#[stable(feature = "pointer_is_aligned", since = "1.79.0")]
|
||||
#[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
|
||||
pub const fn is_aligned(self) -> bool
|
||||
pub fn is_aligned(self) -> bool
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
@ -1555,105 +1461,15 @@ impl<T: ?Sized> *const T {
|
||||
///
|
||||
/// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
|
||||
/// ```
|
||||
///
|
||||
/// # At compiletime
|
||||
/// **Note: Alignment at compiletime is experimental and subject to change. See the
|
||||
/// [tracking issue] for details.**
|
||||
///
|
||||
/// At compiletime, the compiler may not know where a value will end up in memory.
|
||||
/// Calling this function on a pointer created from a reference at compiletime will only
|
||||
/// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
|
||||
/// cannot be stricter aligned than the reference's underlying allocation.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(pointer_is_aligned_to)]
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of i32 is less than 4.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let data = AlignedI32(42);
|
||||
/// let ptr = &data as *const AlignedI32;
|
||||
///
|
||||
/// assert!(ptr.is_aligned_to(1));
|
||||
/// assert!(ptr.is_aligned_to(2));
|
||||
/// assert!(ptr.is_aligned_to(4));
|
||||
///
|
||||
/// // At compiletime, we know for sure that the pointer isn't aligned to 8.
|
||||
/// assert!(!ptr.is_aligned_to(8));
|
||||
/// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
|
||||
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(pointer_is_aligned_to)]
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of i32 is less than 4.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
///
|
||||
/// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
|
||||
/// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
|
||||
/// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
|
||||
/// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
|
||||
///
|
||||
/// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
|
||||
/// let runtime_ptr = COMPTIME_PTR;
|
||||
/// assert_ne!(
|
||||
/// runtime_ptr.is_aligned_to(8),
|
||||
/// runtime_ptr.wrapping_add(1).is_aligned_to(8),
|
||||
/// );
|
||||
/// ```
|
||||
///
|
||||
/// If a pointer is created from a fixed address, this function behaves the same during
|
||||
/// runtime and compiletime.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(pointer_is_aligned_to)]
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let ptr = 40 as *const u8;
|
||||
/// assert!(ptr.is_aligned_to(1));
|
||||
/// assert!(ptr.is_aligned_to(2));
|
||||
/// assert!(ptr.is_aligned_to(4));
|
||||
/// assert!(ptr.is_aligned_to(8));
|
||||
/// assert!(!ptr.is_aligned_to(16));
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
|
||||
#[must_use]
|
||||
#[inline]
|
||||
#[unstable(feature = "pointer_is_aligned_to", issue = "96284")]
|
||||
#[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
|
||||
pub const fn is_aligned_to(self, align: usize) -> bool {
|
||||
pub fn is_aligned_to(self, align: usize) -> bool {
|
||||
if !align.is_power_of_two() {
|
||||
panic!("is_aligned_to: align is not a power-of-two");
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn runtime_impl(ptr: *const (), align: usize) -> bool {
|
||||
ptr.addr() & (align - 1) == 0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
|
||||
const fn const_impl(ptr: *const (), align: usize) -> bool {
|
||||
// We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
|
||||
ptr.align_offset(align) == 0
|
||||
}
|
||||
|
||||
// The cast to `()` is used to
|
||||
// 1. deal with fat pointers; and
|
||||
// 2. ensure that `align_offset` (in `const_impl`) doesn't actually try to compute an offset.
|
||||
const_eval_select((self.cast::<()>(), align), const_impl, runtime_impl)
|
||||
self.addr() & (align - 1) == 0
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1852,9 +1852,7 @@ pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
|
||||
///
|
||||
/// Any questions go to @nagisa.
|
||||
#[allow(ptr_to_integer_transmute_in_consts)]
|
||||
#[lang = "align_offset"]
|
||||
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
|
||||
pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
|
||||
pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
|
||||
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
|
||||
// 1, where the method versions of these operations are not inlined.
|
||||
use intrinsics::{
|
||||
@ -1915,11 +1913,7 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
|
||||
|
||||
let stride = mem::size_of::<T>();
|
||||
|
||||
// SAFETY: This is just an inlined `p.addr()` (which is not
|
||||
// a `const fn` so we cannot call it).
|
||||
// During const eval, we hook this function to ensure that the pointer never
|
||||
// has provenance, making this sound.
|
||||
let addr: usize = unsafe { mem::transmute(p) };
|
||||
let addr: usize = p.addr();
|
||||
|
||||
// SAFETY: `a` is a power-of-two, therefore non-zero.
|
||||
let a_minus_one = unsafe { unchecked_sub(a, 1) };
|
||||
|
@ -1,6 +1,5 @@
|
||||
use super::*;
|
||||
use crate::cmp::Ordering::{Equal, Greater, Less};
|
||||
use crate::intrinsics::const_eval_select;
|
||||
use crate::mem::SizedTypeProperties;
|
||||
use crate::slice::{self, SliceIndex};
|
||||
|
||||
@ -1636,8 +1635,7 @@ impl<T: ?Sized> *mut T {
|
||||
#[must_use]
|
||||
#[inline]
|
||||
#[stable(feature = "align_offset", since = "1.36.0")]
|
||||
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
|
||||
pub const fn align_offset(self, align: usize) -> usize
|
||||
pub fn align_offset(self, align: usize) -> usize
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
@ -1675,95 +1673,10 @@ impl<T: ?Sized> *mut T {
|
||||
/// assert!(ptr.is_aligned());
|
||||
/// assert!(!ptr.wrapping_byte_add(1).is_aligned());
|
||||
/// ```
|
||||
///
|
||||
/// # At compiletime
|
||||
/// **Note: Alignment at compiletime is experimental and subject to change. See the
|
||||
/// [tracking issue] for details.**
|
||||
///
|
||||
/// At compiletime, the compiler may not know where a value will end up in memory.
|
||||
/// Calling this function on a pointer created from a reference at compiletime will only
|
||||
/// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
|
||||
/// is never aligned if cast to a type with a stricter alignment than the reference's
|
||||
/// underlying allocation.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of primitives is less than their size.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
/// #[repr(align(8))]
|
||||
/// struct AlignedI64(i64);
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let mut data = AlignedI32(42);
|
||||
/// let ptr = &mut data as *mut AlignedI32;
|
||||
/// assert!(ptr.is_aligned());
|
||||
///
|
||||
/// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
|
||||
/// let ptr1 = ptr.cast::<AlignedI64>();
|
||||
/// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
|
||||
/// assert!(!ptr1.is_aligned());
|
||||
/// assert!(!ptr2.is_aligned());
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
|
||||
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of primitives is less than their size.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
/// #[repr(align(8))]
|
||||
/// struct AlignedI64(i64);
|
||||
///
|
||||
/// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
|
||||
/// // Also, note that mutable references are not allowed in the final value of constants.
|
||||
/// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut();
|
||||
/// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
|
||||
/// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
|
||||
///
|
||||
/// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
|
||||
/// let runtime_ptr = COMPTIME_PTR;
|
||||
/// assert_ne!(
|
||||
/// runtime_ptr.cast::<AlignedI64>().is_aligned(),
|
||||
/// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
|
||||
/// );
|
||||
/// ```
|
||||
///
|
||||
/// If a pointer is created from a fixed address, this function behaves the same during
|
||||
/// runtime and compiletime.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of primitives is less than their size.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
/// #[repr(align(8))]
|
||||
/// struct AlignedI64(i64);
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let ptr = 40 as *mut AlignedI32;
|
||||
/// assert!(ptr.is_aligned());
|
||||
///
|
||||
/// // For pointers with a known address, runtime and compiletime behavior are identical.
|
||||
/// let ptr1 = ptr.cast::<AlignedI64>();
|
||||
/// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
|
||||
/// assert!(ptr1.is_aligned());
|
||||
/// assert!(!ptr2.is_aligned());
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
|
||||
#[must_use]
|
||||
#[inline]
|
||||
#[stable(feature = "pointer_is_aligned", since = "1.79.0")]
|
||||
#[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
|
||||
pub const fn is_aligned(self) -> bool
|
||||
pub fn is_aligned(self) -> bool
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
@ -1800,106 +1713,15 @@ impl<T: ?Sized> *mut T {
|
||||
///
|
||||
/// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
|
||||
/// ```
|
||||
///
|
||||
/// # At compiletime
|
||||
/// **Note: Alignment at compiletime is experimental and subject to change. See the
|
||||
/// [tracking issue] for details.**
|
||||
///
|
||||
/// At compiletime, the compiler may not know where a value will end up in memory.
|
||||
/// Calling this function on a pointer created from a reference at compiletime will only
|
||||
/// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
|
||||
/// cannot be stricter aligned than the reference's underlying allocation.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(pointer_is_aligned_to)]
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of i32 is less than 4.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let mut data = AlignedI32(42);
|
||||
/// let ptr = &mut data as *mut AlignedI32;
|
||||
///
|
||||
/// assert!(ptr.is_aligned_to(1));
|
||||
/// assert!(ptr.is_aligned_to(2));
|
||||
/// assert!(ptr.is_aligned_to(4));
|
||||
///
|
||||
/// // At compiletime, we know for sure that the pointer isn't aligned to 8.
|
||||
/// assert!(!ptr.is_aligned_to(8));
|
||||
/// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
|
||||
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(pointer_is_aligned_to)]
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of i32 is less than 4.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
///
|
||||
/// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
|
||||
/// // Also, note that mutable references are not allowed in the final value of constants.
|
||||
/// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut();
|
||||
/// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
|
||||
/// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
|
||||
///
|
||||
/// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
|
||||
/// let runtime_ptr = COMPTIME_PTR;
|
||||
/// assert_ne!(
|
||||
/// runtime_ptr.is_aligned_to(8),
|
||||
/// runtime_ptr.wrapping_add(1).is_aligned_to(8),
|
||||
/// );
|
||||
/// ```
|
||||
///
|
||||
/// If a pointer is created from a fixed address, this function behaves the same during
|
||||
/// runtime and compiletime.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(pointer_is_aligned_to)]
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let ptr = 40 as *mut u8;
|
||||
/// assert!(ptr.is_aligned_to(1));
|
||||
/// assert!(ptr.is_aligned_to(2));
|
||||
/// assert!(ptr.is_aligned_to(4));
|
||||
/// assert!(ptr.is_aligned_to(8));
|
||||
/// assert!(!ptr.is_aligned_to(16));
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
|
||||
#[must_use]
|
||||
#[inline]
|
||||
#[unstable(feature = "pointer_is_aligned_to", issue = "96284")]
|
||||
#[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
|
||||
pub const fn is_aligned_to(self, align: usize) -> bool {
|
||||
pub fn is_aligned_to(self, align: usize) -> bool {
|
||||
if !align.is_power_of_two() {
|
||||
panic!("is_aligned_to: align is not a power-of-two");
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn runtime_impl(ptr: *mut (), align: usize) -> bool {
|
||||
ptr.addr() & (align - 1) == 0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
|
||||
const fn const_impl(ptr: *mut (), align: usize) -> bool {
|
||||
// We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
|
||||
ptr.align_offset(align) == 0
|
||||
}
|
||||
|
||||
// The cast to `()` is used to
|
||||
// 1. deal with fat pointers; and
|
||||
// 2. ensure that `align_offset` (in `const_impl`) doesn't actually try to compute an offset.
|
||||
const_eval_select((self.cast::<()>(), align), const_impl, runtime_impl)
|
||||
self.addr() & (align - 1) == 0
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1192,8 +1192,7 @@ impl<T: ?Sized> NonNull<T> {
|
||||
#[inline]
|
||||
#[must_use]
|
||||
#[stable(feature = "non_null_convenience", since = "1.80.0")]
|
||||
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
|
||||
pub const fn align_offset(self, align: usize) -> usize
|
||||
pub fn align_offset(self, align: usize) -> usize
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
@ -1224,98 +1223,10 @@ impl<T: ?Sized> NonNull<T> {
|
||||
/// assert!(ptr.is_aligned());
|
||||
/// assert!(!NonNull::new(ptr.as_ptr().wrapping_byte_add(1)).unwrap().is_aligned());
|
||||
/// ```
|
||||
///
|
||||
/// # At compiletime
|
||||
/// **Note: Alignment at compiletime is experimental and subject to change. See the
|
||||
/// [tracking issue] for details.**
|
||||
///
|
||||
/// At compiletime, the compiler may not know where a value will end up in memory.
|
||||
/// Calling this function on a pointer created from a reference at compiletime will only
|
||||
/// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
|
||||
/// is never aligned if cast to a type with a stricter alignment than the reference's
|
||||
/// underlying allocation.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(const_nonnull_new)]
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
/// use std::ptr::NonNull;
|
||||
///
|
||||
/// // On some platforms, the alignment of primitives is less than their size.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
/// #[repr(align(8))]
|
||||
/// struct AlignedI64(i64);
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let data = [AlignedI32(42), AlignedI32(42)];
|
||||
/// let ptr = NonNull::<AlignedI32>::new(&data[0] as *const _ as *mut _).unwrap();
|
||||
/// assert!(ptr.is_aligned());
|
||||
///
|
||||
/// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
|
||||
/// let ptr1 = ptr.cast::<AlignedI64>();
|
||||
/// let ptr2 = unsafe { ptr.add(1).cast::<AlignedI64>() };
|
||||
/// assert!(!ptr1.is_aligned());
|
||||
/// assert!(!ptr2.is_aligned());
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
|
||||
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of primitives is less than their size.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
/// #[repr(align(8))]
|
||||
/// struct AlignedI64(i64);
|
||||
///
|
||||
/// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
|
||||
/// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
|
||||
/// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
|
||||
/// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
|
||||
///
|
||||
/// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
|
||||
/// let runtime_ptr = COMPTIME_PTR;
|
||||
/// assert_ne!(
|
||||
/// runtime_ptr.cast::<AlignedI64>().is_aligned(),
|
||||
/// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
|
||||
/// );
|
||||
/// ```
|
||||
///
|
||||
/// If a pointer is created from a fixed address, this function behaves the same during
|
||||
/// runtime and compiletime.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
/// #![feature(const_nonnull_new)]
|
||||
/// use std::ptr::NonNull;
|
||||
///
|
||||
/// // On some platforms, the alignment of primitives is less than their size.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
/// #[repr(align(8))]
|
||||
/// struct AlignedI64(i64);
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let ptr = NonNull::new(40 as *mut AlignedI32).unwrap();
|
||||
/// assert!(ptr.is_aligned());
|
||||
///
|
||||
/// // For pointers with a known address, runtime and compiletime behavior are identical.
|
||||
/// let ptr1 = ptr.cast::<AlignedI64>();
|
||||
/// let ptr2 = NonNull::new(ptr.as_ptr().wrapping_add(1)).unwrap().cast::<AlignedI64>();
|
||||
/// assert!(ptr1.is_aligned());
|
||||
/// assert!(!ptr2.is_aligned());
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
|
||||
#[inline]
|
||||
#[must_use]
|
||||
#[stable(feature = "pointer_is_aligned", since = "1.79.0")]
|
||||
#[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
|
||||
pub const fn is_aligned(self) -> bool
|
||||
pub fn is_aligned(self) -> bool
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
@ -1352,85 +1263,10 @@ impl<T: ?Sized> NonNull<T> {
|
||||
///
|
||||
/// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
|
||||
/// ```
|
||||
///
|
||||
/// # At compiletime
|
||||
/// **Note: Alignment at compiletime is experimental and subject to change. See the
|
||||
/// [tracking issue] for details.**
|
||||
///
|
||||
/// At compiletime, the compiler may not know where a value will end up in memory.
|
||||
/// Calling this function on a pointer created from a reference at compiletime will only
|
||||
/// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
|
||||
/// cannot be stricter aligned than the reference's underlying allocation.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(pointer_is_aligned_to)]
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of i32 is less than 4.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let data = AlignedI32(42);
|
||||
/// let ptr = &data as *const AlignedI32;
|
||||
///
|
||||
/// assert!(ptr.is_aligned_to(1));
|
||||
/// assert!(ptr.is_aligned_to(2));
|
||||
/// assert!(ptr.is_aligned_to(4));
|
||||
///
|
||||
/// // At compiletime, we know for sure that the pointer isn't aligned to 8.
|
||||
/// assert!(!ptr.is_aligned_to(8));
|
||||
/// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
|
||||
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(pointer_is_aligned_to)]
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// // On some platforms, the alignment of i32 is less than 4.
|
||||
/// #[repr(align(4))]
|
||||
/// struct AlignedI32(i32);
|
||||
///
|
||||
/// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
|
||||
/// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
|
||||
/// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
|
||||
/// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
|
||||
///
|
||||
/// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
|
||||
/// let runtime_ptr = COMPTIME_PTR;
|
||||
/// assert_ne!(
|
||||
/// runtime_ptr.is_aligned_to(8),
|
||||
/// runtime_ptr.wrapping_add(1).is_aligned_to(8),
|
||||
/// );
|
||||
/// ```
|
||||
///
|
||||
/// If a pointer is created from a fixed address, this function behaves the same during
|
||||
/// runtime and compiletime.
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(pointer_is_aligned_to)]
|
||||
/// #![feature(const_pointer_is_aligned)]
|
||||
///
|
||||
/// const _: () = {
|
||||
/// let ptr = 40 as *const u8;
|
||||
/// assert!(ptr.is_aligned_to(1));
|
||||
/// assert!(ptr.is_aligned_to(2));
|
||||
/// assert!(ptr.is_aligned_to(4));
|
||||
/// assert!(ptr.is_aligned_to(8));
|
||||
/// assert!(!ptr.is_aligned_to(16));
|
||||
/// };
|
||||
/// ```
|
||||
///
|
||||
/// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
|
||||
#[inline]
|
||||
#[must_use]
|
||||
#[unstable(feature = "pointer_is_aligned_to", issue = "96284")]
|
||||
#[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
|
||||
pub const fn is_aligned_to(self, align: usize) -> bool {
|
||||
pub fn is_aligned_to(self, align: usize) -> bool {
|
||||
self.pointer.is_aligned_to(align)
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
use core::ascii::EscapeDefault;
|
||||
|
||||
use crate::fmt::{self, Write};
|
||||
use crate::intrinsics::const_eval_select;
|
||||
use crate::{ascii, iter, mem, ops};
|
||||
|
||||
#[cfg(not(test))]
|
||||
@ -346,89 +347,93 @@ pub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
|
||||
/// If any of these loads produces something for which `contains_nonascii`
|
||||
/// (above) returns true, then we know the answer is false.
|
||||
#[inline]
|
||||
#[rustc_allow_const_fn_unstable(const_raw_ptr_comparison, const_pointer_is_aligned)] // only in a debug assertion
|
||||
#[rustc_allow_const_fn_unstable(const_align_offset)] // behavior does not change when `align_offset` fails
|
||||
#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
|
||||
const fn is_ascii(s: &[u8]) -> bool {
|
||||
const USIZE_SIZE: usize = mem::size_of::<usize>();
|
||||
// The runtime version behaves the same as the compiletime version, it's
|
||||
// just more optimized.
|
||||
return const_eval_select((s,), compiletime, runtime);
|
||||
|
||||
let len = s.len();
|
||||
let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
|
||||
|
||||
// If we wouldn't gain anything from the word-at-a-time implementation, fall
|
||||
// back to a scalar loop.
|
||||
//
|
||||
// We also do this for architectures where `size_of::<usize>()` isn't
|
||||
// sufficient alignment for `usize`, because it's a weird edge case.
|
||||
if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < mem::align_of::<usize>() {
|
||||
return is_ascii_simple(s);
|
||||
const fn compiletime(s: &[u8]) -> bool {
|
||||
is_ascii_simple(s)
|
||||
}
|
||||
|
||||
// We always read the first word unaligned, which means `align_offset` is
|
||||
// 0, we'd read the same value again for the aligned read.
|
||||
let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
|
||||
#[inline]
|
||||
fn runtime(s: &[u8]) -> bool {
|
||||
const USIZE_SIZE: usize = mem::size_of::<usize>();
|
||||
|
||||
let start = s.as_ptr();
|
||||
// SAFETY: We verify `len < USIZE_SIZE` above.
|
||||
let first_word = unsafe { (start as *const usize).read_unaligned() };
|
||||
let len = s.len();
|
||||
let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
|
||||
|
||||
if contains_nonascii(first_word) {
|
||||
return false;
|
||||
}
|
||||
// We checked this above, somewhat implicitly. Note that `offset_to_aligned`
|
||||
// is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
|
||||
// above.
|
||||
debug_assert!(offset_to_aligned <= len);
|
||||
|
||||
// SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
|
||||
// middle chunk of the slice.
|
||||
let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
|
||||
|
||||
// `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
|
||||
let mut byte_pos = offset_to_aligned;
|
||||
|
||||
// Paranoia check about alignment, since we're about to do a bunch of
|
||||
// unaligned loads. In practice this should be impossible barring a bug in
|
||||
// `align_offset` though.
|
||||
// While this method is allowed to spuriously fail in CTFE, if it doesn't
|
||||
// have alignment information it should have given a `usize::MAX` for
|
||||
// `align_offset` earlier, sending things through the scalar path instead of
|
||||
// this one, so this check should pass if it's reachable.
|
||||
debug_assert!(word_ptr.is_aligned_to(mem::align_of::<usize>()));
|
||||
|
||||
// Read subsequent words until the last aligned word, excluding the last
|
||||
// aligned word by itself to be done in tail check later, to ensure that
|
||||
// tail is always one `usize` at most to extra branch `byte_pos == len`.
|
||||
while byte_pos < len - USIZE_SIZE {
|
||||
// Sanity check that the read is in bounds
|
||||
debug_assert!(byte_pos + USIZE_SIZE <= len);
|
||||
// And that our assumptions about `byte_pos` hold.
|
||||
debug_assert!(matches!(
|
||||
word_ptr.cast::<u8>().guaranteed_eq(start.wrapping_add(byte_pos)),
|
||||
// These are from the same allocation, so will hopefully always be
|
||||
// known to match even in CTFE, but if it refuses to compare them
|
||||
// that's ok since it's just a debug check anyway.
|
||||
None | Some(true),
|
||||
));
|
||||
|
||||
// SAFETY: We know `word_ptr` is properly aligned (because of
|
||||
// `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
|
||||
let word = unsafe { word_ptr.read() };
|
||||
if contains_nonascii(word) {
|
||||
return false;
|
||||
// If we wouldn't gain anything from the word-at-a-time implementation, fall
|
||||
// back to a scalar loop.
|
||||
//
|
||||
// We also do this for architectures where `size_of::<usize>()` isn't
|
||||
// sufficient alignment for `usize`, because it's a weird edge case.
|
||||
if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < mem::align_of::<usize>() {
|
||||
return is_ascii_simple(s);
|
||||
}
|
||||
|
||||
byte_pos += USIZE_SIZE;
|
||||
// SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
|
||||
// after this `add`, `word_ptr` will be at most one-past-the-end.
|
||||
word_ptr = unsafe { word_ptr.add(1) };
|
||||
// We always read the first word unaligned, which means `align_offset` is
|
||||
// 0, we'd read the same value again for the aligned read.
|
||||
let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
|
||||
|
||||
let start = s.as_ptr();
|
||||
// SAFETY: We verify `len < USIZE_SIZE` above.
|
||||
let first_word = unsafe { (start as *const usize).read_unaligned() };
|
||||
|
||||
if contains_nonascii(first_word) {
|
||||
return false;
|
||||
}
|
||||
// We checked this above, somewhat implicitly. Note that `offset_to_aligned`
|
||||
// is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
|
||||
// above.
|
||||
debug_assert!(offset_to_aligned <= len);
|
||||
|
||||
// SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
|
||||
// middle chunk of the slice.
|
||||
let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
|
||||
|
||||
// `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
|
||||
let mut byte_pos = offset_to_aligned;
|
||||
|
||||
// Paranoia check about alignment, since we're about to do a bunch of
|
||||
// unaligned loads. In practice this should be impossible barring a bug in
|
||||
// `align_offset` though.
|
||||
// While this method is allowed to spuriously fail in CTFE, if it doesn't
|
||||
// have alignment information it should have given a `usize::MAX` for
|
||||
// `align_offset` earlier, sending things through the scalar path instead of
|
||||
// this one, so this check should pass if it's reachable.
|
||||
debug_assert!(word_ptr.is_aligned_to(mem::align_of::<usize>()));
|
||||
|
||||
// Read subsequent words until the last aligned word, excluding the last
|
||||
// aligned word by itself to be done in tail check later, to ensure that
|
||||
// tail is always one `usize` at most to extra branch `byte_pos == len`.
|
||||
while byte_pos < len - USIZE_SIZE {
|
||||
// Sanity check that the read is in bounds
|
||||
debug_assert!(byte_pos + USIZE_SIZE <= len);
|
||||
// And that our assumptions about `byte_pos` hold.
|
||||
debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
|
||||
|
||||
// SAFETY: We know `word_ptr` is properly aligned (because of
|
||||
// `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
|
||||
let word = unsafe { word_ptr.read() };
|
||||
if contains_nonascii(word) {
|
||||
return false;
|
||||
}
|
||||
|
||||
byte_pos += USIZE_SIZE;
|
||||
// SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
|
||||
// after this `add`, `word_ptr` will be at most one-past-the-end.
|
||||
word_ptr = unsafe { word_ptr.add(1) };
|
||||
}
|
||||
|
||||
// Sanity check to ensure there really is only one `usize` left. This should
|
||||
// be guaranteed by our loop condition.
|
||||
debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
|
||||
|
||||
// SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
|
||||
let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
|
||||
|
||||
!contains_nonascii(last_word)
|
||||
}
|
||||
|
||||
// Sanity check to ensure there really is only one `usize` left. This should
|
||||
// be guaranteed by our loop condition.
|
||||
debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
|
||||
|
||||
// SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
|
||||
let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
|
||||
|
||||
!contains_nonascii(last_word)
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
// Original implementation taken from rust-memchr.
|
||||
// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
|
||||
|
||||
use crate::intrinsics::const_eval_select;
|
||||
use crate::mem;
|
||||
|
||||
const LO_USIZE: usize = usize::repeat_u8(0x01);
|
||||
@ -50,58 +51,66 @@ const fn memchr_naive(x: u8, text: &[u8]) -> Option<usize> {
|
||||
None
|
||||
}
|
||||
|
||||
#[rustc_allow_const_fn_unstable(const_cmp)]
|
||||
#[rustc_allow_const_fn_unstable(const_align_offset)]
|
||||
#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
|
||||
#[cfg_attr(bootstrap, rustc_const_stable(feature = "const_memchr", since = "1.65.0"))]
|
||||
const fn memchr_aligned(x: u8, text: &[u8]) -> Option<usize> {
|
||||
// Scan for a single byte value by reading two `usize` words at a time.
|
||||
//
|
||||
// Split `text` in three parts
|
||||
// - unaligned initial part, before the first word aligned address in text
|
||||
// - body, scan by 2 words at a time
|
||||
// - the last remaining part, < 2 word size
|
||||
// The runtime version behaves the same as the compiletime version, it's
|
||||
// just more optimized.
|
||||
return const_eval_select((x, text), compiletime, runtime);
|
||||
|
||||
// search up to an aligned boundary
|
||||
let len = text.len();
|
||||
let ptr = text.as_ptr();
|
||||
let mut offset = ptr.align_offset(USIZE_BYTES);
|
||||
|
||||
if offset > 0 {
|
||||
// FIXME(const-hack, fee1-dead): replace with min
|
||||
offset = if offset < len { offset } else { len };
|
||||
// FIXME(const-hack, fee1-dead): replace with range slicing
|
||||
// SAFETY: offset is within bounds
|
||||
let slice = unsafe { super::from_raw_parts(text.as_ptr(), offset) };
|
||||
if let Some(index) = memchr_naive(x, slice) {
|
||||
return Some(index);
|
||||
}
|
||||
const fn compiletime(x: u8, text: &[u8]) -> Option<usize> {
|
||||
memchr_naive(x, text)
|
||||
}
|
||||
|
||||
// search the body of the text
|
||||
let repeated_x = usize::repeat_u8(x);
|
||||
while offset <= len - 2 * USIZE_BYTES {
|
||||
// SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes
|
||||
// between the offset and the end of the slice.
|
||||
unsafe {
|
||||
let u = *(ptr.add(offset) as *const usize);
|
||||
let v = *(ptr.add(offset + USIZE_BYTES) as *const usize);
|
||||
#[inline]
|
||||
fn runtime(x: u8, text: &[u8]) -> Option<usize> {
|
||||
// Scan for a single byte value by reading two `usize` words at a time.
|
||||
//
|
||||
// Split `text` in three parts
|
||||
// - unaligned initial part, before the first word aligned address in text
|
||||
// - body, scan by 2 words at a time
|
||||
// - the last remaining part, < 2 word size
|
||||
|
||||
// break if there is a matching byte
|
||||
let zu = contains_zero_byte(u ^ repeated_x);
|
||||
let zv = contains_zero_byte(v ^ repeated_x);
|
||||
if zu || zv {
|
||||
break;
|
||||
// search up to an aligned boundary
|
||||
let len = text.len();
|
||||
let ptr = text.as_ptr();
|
||||
let mut offset = ptr.align_offset(USIZE_BYTES);
|
||||
|
||||
if offset > 0 {
|
||||
offset = offset.min(len);
|
||||
let slice = &text[..offset];
|
||||
if let Some(index) = memchr_naive(x, slice) {
|
||||
return Some(index);
|
||||
}
|
||||
}
|
||||
offset += USIZE_BYTES * 2;
|
||||
}
|
||||
|
||||
// Find the byte after the point the body loop stopped.
|
||||
// FIXME(const-hack): Use `?` instead.
|
||||
// FIXME(const-hack, fee1-dead): use range slicing
|
||||
// SAFETY: offset is within bounds
|
||||
let slice = unsafe { super::from_raw_parts(text.as_ptr().add(offset), text.len() - offset) };
|
||||
if let Some(i) = memchr_naive(x, slice) { Some(offset + i) } else { None }
|
||||
// search the body of the text
|
||||
let repeated_x = usize::repeat_u8(x);
|
||||
while offset <= len - 2 * USIZE_BYTES {
|
||||
// SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes
|
||||
// between the offset and the end of the slice.
|
||||
unsafe {
|
||||
let u = *(ptr.add(offset) as *const usize);
|
||||
let v = *(ptr.add(offset + USIZE_BYTES) as *const usize);
|
||||
|
||||
// break if there is a matching byte
|
||||
let zu = contains_zero_byte(u ^ repeated_x);
|
||||
let zv = contains_zero_byte(v ^ repeated_x);
|
||||
if zu || zv {
|
||||
break;
|
||||
}
|
||||
}
|
||||
offset += USIZE_BYTES * 2;
|
||||
}
|
||||
|
||||
// Find the byte after the point the body loop stopped.
|
||||
// FIXME(const-hack): Use `?` instead.
|
||||
// FIXME(const-hack, fee1-dead): use range slicing
|
||||
let slice =
|
||||
// SAFETY: offset is within bounds
|
||||
unsafe { super::from_raw_parts(text.as_ptr().add(offset), text.len() - offset) };
|
||||
if let Some(i) = memchr_naive(x, slice) { Some(offset + i) } else { None }
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the last index matching the byte `x` in `text`.
|
||||
|
@ -82,7 +82,6 @@ use crate::{mem, ptr};
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_str_from_utf8_shared", since = "1.63.0")]
|
||||
#[rustc_allow_const_fn_unstable(str_internals)]
|
||||
#[rustc_diagnostic_item = "str_from_utf8"]
|
||||
pub const fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
|
||||
// FIXME(const-hack): This should use `?` again, once it's `const`
|
||||
|
@ -1,6 +1,7 @@
|
||||
//! Operations related to UTF-8 validation.
|
||||
|
||||
use super::Utf8Error;
|
||||
use crate::intrinsics::const_eval_select;
|
||||
use crate::mem;
|
||||
|
||||
/// Returns the initial codepoint accumulator for the first byte.
|
||||
@ -122,15 +123,28 @@ const fn contains_nonascii(x: usize) -> bool {
|
||||
/// Walks through `v` checking that it's a valid UTF-8 sequence,
|
||||
/// returning `Ok(())` in that case, or, if it is invalid, `Err(err)`.
|
||||
#[inline(always)]
|
||||
#[rustc_const_unstable(feature = "str_internals", issue = "none")]
|
||||
#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
|
||||
pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
|
||||
let mut index = 0;
|
||||
let len = v.len();
|
||||
|
||||
let usize_bytes = mem::size_of::<usize>();
|
||||
let ascii_block_size = 2 * usize_bytes;
|
||||
const USIZE_BYTES: usize = mem::size_of::<usize>();
|
||||
|
||||
let ascii_block_size = 2 * USIZE_BYTES;
|
||||
let blocks_end = if len >= ascii_block_size { len - ascii_block_size + 1 } else { 0 };
|
||||
let align = v.as_ptr().align_offset(usize_bytes);
|
||||
let align = {
|
||||
const fn compiletime(_v: &[u8]) -> usize {
|
||||
usize::MAX
|
||||
}
|
||||
|
||||
fn runtime(v: &[u8]) -> usize {
|
||||
v.as_ptr().align_offset(USIZE_BYTES)
|
||||
}
|
||||
|
||||
// Below, we safely fall back to a slower codepath if the offset is `usize::MAX`,
|
||||
// so the end-to-end behavior is the same at compiletime and runtime.
|
||||
const_eval_select((v,), compiletime, runtime)
|
||||
};
|
||||
|
||||
while index < len {
|
||||
let old_offset = index;
|
||||
@ -209,11 +223,11 @@ pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
|
||||
// Ascii case, try to skip forward quickly.
|
||||
// When the pointer is aligned, read 2 words of data per iteration
|
||||
// until we find a word containing a non-ascii byte.
|
||||
if align != usize::MAX && align.wrapping_sub(index) % usize_bytes == 0 {
|
||||
if align != usize::MAX && align.wrapping_sub(index) % USIZE_BYTES == 0 {
|
||||
let ptr = v.as_ptr();
|
||||
while index < blocks_end {
|
||||
// SAFETY: since `align - index` and `ascii_block_size` are
|
||||
// multiples of `usize_bytes`, `block = ptr.add(index)` is
|
||||
// multiples of `USIZE_BYTES`, `block = ptr.add(index)` is
|
||||
// always aligned with a `usize` so it's safe to dereference
|
||||
// both `block` and `block.add(1)`.
|
||||
unsafe {
|
||||
|
@ -120,7 +120,19 @@ pub(crate) const fn check_language_ub() -> bool {
|
||||
#[inline]
|
||||
#[rustc_const_unstable(feature = "const_ub_checks", issue = "none")]
|
||||
pub(crate) const fn is_aligned_and_not_null(ptr: *const (), align: usize, is_zst: bool) -> bool {
|
||||
ptr.is_aligned_to(align) && (is_zst || !ptr.is_null())
|
||||
#[inline]
|
||||
fn runtime(ptr: *const (), align: usize, is_zst: bool) -> bool {
|
||||
ptr.is_aligned_to(align) && (is_zst || !ptr.is_null())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[rustc_const_unstable(feature = "const_ub_checks", issue = "none")]
|
||||
const fn comptime(ptr: *const (), _align: usize, is_zst: bool) -> bool {
|
||||
is_zst || !ptr.is_null()
|
||||
}
|
||||
|
||||
// This is just for safety checks so we can const_eval_select.
|
||||
const_eval_select((ptr, align, is_zst), comptime, runtime)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -16,14 +16,12 @@
|
||||
#![feature(cell_update)]
|
||||
#![feature(clone_to_uninit)]
|
||||
#![feature(const_align_of_val_raw)]
|
||||
#![feature(const_align_offset)]
|
||||
#![feature(const_black_box)]
|
||||
#![feature(const_eval_select)]
|
||||
#![feature(const_heap)]
|
||||
#![feature(const_nonnull_new)]
|
||||
#![feature(const_option_ext)]
|
||||
#![feature(const_pin_2)]
|
||||
#![feature(const_pointer_is_aligned)]
|
||||
#![feature(const_three_way_compare)]
|
||||
#![feature(const_trait_impl)]
|
||||
#![feature(core_intrinsics)]
|
||||
|
@ -359,22 +359,6 @@ fn align_offset_zst() {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn align_offset_zst_const() {
|
||||
const {
|
||||
// For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
|
||||
// all, because no amount of elements will align the pointer.
|
||||
let mut p = 1;
|
||||
while p < 1024 {
|
||||
assert!(ptr::without_provenance::<()>(p).align_offset(p) == 0);
|
||||
if p != 1 {
|
||||
assert!(ptr::without_provenance::<()>(p + 1).align_offset(p) == !0);
|
||||
}
|
||||
p = (p + 1).next_power_of_two();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn align_offset_stride_one() {
|
||||
// For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
|
||||
@ -396,25 +380,6 @@ fn align_offset_stride_one() {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn align_offset_stride_one_const() {
|
||||
const {
|
||||
// For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
|
||||
// number of bytes.
|
||||
let mut align = 1;
|
||||
while align < 1024 {
|
||||
let mut ptr = 1;
|
||||
while ptr < 2 * align {
|
||||
let expected = ptr % align;
|
||||
let offset = if expected == 0 { 0 } else { align - expected };
|
||||
assert!(ptr::without_provenance::<u8>(ptr).align_offset(align) == offset);
|
||||
ptr += 1;
|
||||
}
|
||||
align = (align + 1).next_power_of_two();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn align_offset_various_strides() {
|
||||
unsafe fn test_stride<T>(ptr: *const T, align: usize) -> bool {
|
||||
@ -495,192 +460,6 @@ fn align_offset_various_strides() {
|
||||
assert!(!x);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn align_offset_various_strides_const() {
|
||||
const unsafe fn test_stride<T>(ptr: *const T, numptr: usize, align: usize) {
|
||||
let mut expected = usize::MAX;
|
||||
// Naive but definitely correct way to find the *first* aligned element of stride::<T>.
|
||||
let mut el = 0;
|
||||
while el < align {
|
||||
if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
|
||||
expected = el;
|
||||
break;
|
||||
}
|
||||
el += 1;
|
||||
}
|
||||
let got = ptr.align_offset(align);
|
||||
assert!(got == expected);
|
||||
}
|
||||
|
||||
const {
|
||||
// For pointers of stride != 1, we verify the algorithm against the naivest possible
|
||||
// implementation
|
||||
let mut align = 1;
|
||||
let limit = 32;
|
||||
while align < limit {
|
||||
let mut ptr = 1;
|
||||
while ptr < 4 * align {
|
||||
unsafe {
|
||||
#[repr(packed)]
|
||||
struct A3(#[allow(dead_code)] u16, #[allow(dead_code)] u8);
|
||||
test_stride::<A3>(ptr::without_provenance::<A3>(ptr), ptr, align);
|
||||
|
||||
struct A4(#[allow(dead_code)] u32);
|
||||
test_stride::<A4>(ptr::without_provenance::<A4>(ptr), ptr, align);
|
||||
|
||||
#[repr(packed)]
|
||||
struct A5(#[allow(dead_code)] u32, #[allow(dead_code)] u8);
|
||||
test_stride::<A5>(ptr::without_provenance::<A5>(ptr), ptr, align);
|
||||
|
||||
#[repr(packed)]
|
||||
struct A6(#[allow(dead_code)] u32, #[allow(dead_code)] u16);
|
||||
test_stride::<A6>(ptr::without_provenance::<A6>(ptr), ptr, align);
|
||||
|
||||
#[repr(packed)]
|
||||
struct A7(
|
||||
#[allow(dead_code)] u32,
|
||||
#[allow(dead_code)] u16,
|
||||
#[allow(dead_code)] u8,
|
||||
);
|
||||
test_stride::<A7>(ptr::without_provenance::<A7>(ptr), ptr, align);
|
||||
|
||||
#[repr(packed)]
|
||||
struct A8(#[allow(dead_code)] u32, #[allow(dead_code)] u32);
|
||||
test_stride::<A8>(ptr::without_provenance::<A8>(ptr), ptr, align);
|
||||
|
||||
#[repr(packed)]
|
||||
struct A9(
|
||||
#[allow(dead_code)] u32,
|
||||
#[allow(dead_code)] u32,
|
||||
#[allow(dead_code)] u8,
|
||||
);
|
||||
test_stride::<A9>(ptr::without_provenance::<A9>(ptr), ptr, align);
|
||||
|
||||
#[repr(packed)]
|
||||
struct A10(
|
||||
#[allow(dead_code)] u32,
|
||||
#[allow(dead_code)] u32,
|
||||
#[allow(dead_code)] u16,
|
||||
);
|
||||
test_stride::<A10>(ptr::without_provenance::<A10>(ptr), ptr, align);
|
||||
|
||||
test_stride::<u32>(ptr::without_provenance::<u32>(ptr), ptr, align);
|
||||
test_stride::<u128>(ptr::without_provenance::<u128>(ptr), ptr, align);
|
||||
}
|
||||
ptr += 1;
|
||||
}
|
||||
align = (align + 1).next_power_of_two();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn align_offset_with_provenance_const() {
|
||||
const {
|
||||
// On some platforms (e.g. msp430-none-elf), the alignment of `i32` is less than 4.
|
||||
#[repr(align(4))]
|
||||
struct AlignedI32(i32);
|
||||
|
||||
let data = AlignedI32(42);
|
||||
|
||||
// `stride % align == 0` (usual case)
|
||||
|
||||
let ptr: *const i32 = &data.0;
|
||||
assert!(ptr.align_offset(1) == 0);
|
||||
assert!(ptr.align_offset(2) == 0);
|
||||
assert!(ptr.align_offset(4) == 0);
|
||||
assert!(ptr.align_offset(8) == usize::MAX);
|
||||
assert!(ptr.wrapping_byte_add(1).align_offset(1) == 0);
|
||||
assert!(ptr.wrapping_byte_add(1).align_offset(2) == usize::MAX);
|
||||
assert!(ptr.wrapping_byte_add(2).align_offset(1) == 0);
|
||||
assert!(ptr.wrapping_byte_add(2).align_offset(2) == 0);
|
||||
assert!(ptr.wrapping_byte_add(2).align_offset(4) == usize::MAX);
|
||||
assert!(ptr.wrapping_byte_add(3).align_offset(1) == 0);
|
||||
assert!(ptr.wrapping_byte_add(3).align_offset(2) == usize::MAX);
|
||||
|
||||
assert!(ptr.wrapping_add(42).align_offset(4) == 0);
|
||||
assert!(ptr.wrapping_add(42).align_offset(8) == usize::MAX);
|
||||
|
||||
let ptr1: *const i8 = ptr.cast();
|
||||
assert!(ptr1.align_offset(1) == 0);
|
||||
assert!(ptr1.align_offset(2) == 0);
|
||||
assert!(ptr1.align_offset(4) == 0);
|
||||
assert!(ptr1.align_offset(8) == usize::MAX);
|
||||
assert!(ptr1.wrapping_byte_add(1).align_offset(1) == 0);
|
||||
assert!(ptr1.wrapping_byte_add(1).align_offset(2) == 1);
|
||||
assert!(ptr1.wrapping_byte_add(1).align_offset(4) == 3);
|
||||
assert!(ptr1.wrapping_byte_add(1).align_offset(8) == usize::MAX);
|
||||
assert!(ptr1.wrapping_byte_add(2).align_offset(1) == 0);
|
||||
assert!(ptr1.wrapping_byte_add(2).align_offset(2) == 0);
|
||||
assert!(ptr1.wrapping_byte_add(2).align_offset(4) == 2);
|
||||
assert!(ptr1.wrapping_byte_add(2).align_offset(8) == usize::MAX);
|
||||
assert!(ptr1.wrapping_byte_add(3).align_offset(1) == 0);
|
||||
assert!(ptr1.wrapping_byte_add(3).align_offset(2) == 1);
|
||||
assert!(ptr1.wrapping_byte_add(3).align_offset(4) == 1);
|
||||
assert!(ptr1.wrapping_byte_add(3).align_offset(8) == usize::MAX);
|
||||
|
||||
let ptr2: *const i16 = ptr.cast();
|
||||
assert!(ptr2.align_offset(1) == 0);
|
||||
assert!(ptr2.align_offset(2) == 0);
|
||||
assert!(ptr2.align_offset(4) == 0);
|
||||
assert!(ptr2.align_offset(8) == usize::MAX);
|
||||
assert!(ptr2.wrapping_byte_add(1).align_offset(1) == 0);
|
||||
assert!(ptr2.wrapping_byte_add(1).align_offset(2) == usize::MAX);
|
||||
assert!(ptr2.wrapping_byte_add(2).align_offset(1) == 0);
|
||||
assert!(ptr2.wrapping_byte_add(2).align_offset(2) == 0);
|
||||
assert!(ptr2.wrapping_byte_add(2).align_offset(4) == 1);
|
||||
assert!(ptr2.wrapping_byte_add(2).align_offset(8) == usize::MAX);
|
||||
assert!(ptr2.wrapping_byte_add(3).align_offset(1) == 0);
|
||||
assert!(ptr2.wrapping_byte_add(3).align_offset(2) == usize::MAX);
|
||||
|
||||
let ptr3: *const i64 = ptr.cast();
|
||||
assert!(ptr3.align_offset(1) == 0);
|
||||
assert!(ptr3.align_offset(2) == 0);
|
||||
assert!(ptr3.align_offset(4) == 0);
|
||||
assert!(ptr3.align_offset(8) == usize::MAX);
|
||||
assert!(ptr3.wrapping_byte_add(1).align_offset(1) == 0);
|
||||
assert!(ptr3.wrapping_byte_add(1).align_offset(2) == usize::MAX);
|
||||
|
||||
// `stride % align != 0` (edge case)
|
||||
|
||||
let ptr4: *const [u8; 3] = ptr.cast();
|
||||
assert!(ptr4.align_offset(1) == 0);
|
||||
assert!(ptr4.align_offset(2) == 0);
|
||||
assert!(ptr4.align_offset(4) == 0);
|
||||
assert!(ptr4.align_offset(8) == usize::MAX);
|
||||
assert!(ptr4.wrapping_byte_add(1).align_offset(1) == 0);
|
||||
assert!(ptr4.wrapping_byte_add(1).align_offset(2) == 1);
|
||||
assert!(ptr4.wrapping_byte_add(1).align_offset(4) == 1);
|
||||
assert!(ptr4.wrapping_byte_add(1).align_offset(8) == usize::MAX);
|
||||
assert!(ptr4.wrapping_byte_add(2).align_offset(1) == 0);
|
||||
assert!(ptr4.wrapping_byte_add(2).align_offset(2) == 0);
|
||||
assert!(ptr4.wrapping_byte_add(2).align_offset(4) == 2);
|
||||
assert!(ptr4.wrapping_byte_add(2).align_offset(8) == usize::MAX);
|
||||
assert!(ptr4.wrapping_byte_add(3).align_offset(1) == 0);
|
||||
assert!(ptr4.wrapping_byte_add(3).align_offset(2) == 1);
|
||||
assert!(ptr4.wrapping_byte_add(3).align_offset(4) == 3);
|
||||
assert!(ptr4.wrapping_byte_add(3).align_offset(8) == usize::MAX);
|
||||
|
||||
let ptr5: *const [u8; 5] = ptr.cast();
|
||||
assert!(ptr5.align_offset(1) == 0);
|
||||
assert!(ptr5.align_offset(2) == 0);
|
||||
assert!(ptr5.align_offset(4) == 0);
|
||||
assert!(ptr5.align_offset(8) == usize::MAX);
|
||||
assert!(ptr5.wrapping_byte_add(1).align_offset(1) == 0);
|
||||
assert!(ptr5.wrapping_byte_add(1).align_offset(2) == 1);
|
||||
assert!(ptr5.wrapping_byte_add(1).align_offset(4) == 3);
|
||||
assert!(ptr5.wrapping_byte_add(1).align_offset(8) == usize::MAX);
|
||||
assert!(ptr5.wrapping_byte_add(2).align_offset(1) == 0);
|
||||
assert!(ptr5.wrapping_byte_add(2).align_offset(2) == 0);
|
||||
assert!(ptr5.wrapping_byte_add(2).align_offset(4) == 2);
|
||||
assert!(ptr5.wrapping_byte_add(2).align_offset(8) == usize::MAX);
|
||||
assert!(ptr5.wrapping_byte_add(3).align_offset(1) == 0);
|
||||
assert!(ptr5.wrapping_byte_add(3).align_offset(2) == 1);
|
||||
assert!(ptr5.wrapping_byte_add(3).align_offset(4) == 1);
|
||||
assert!(ptr5.wrapping_byte_add(3).align_offset(8) == usize::MAX);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn align_offset_issue_103361() {
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
@ -693,23 +472,6 @@ fn align_offset_issue_103361() {
|
||||
let _ = ptr::without_provenance::<HugeSize>(SIZE).align_offset(SIZE);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn align_offset_issue_103361_const() {
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
const SIZE: usize = 1 << 47;
|
||||
#[cfg(target_pointer_width = "32")]
|
||||
const SIZE: usize = 1 << 30;
|
||||
#[cfg(target_pointer_width = "16")]
|
||||
const SIZE: usize = 1 << 13;
|
||||
struct HugeSize(#[allow(dead_code)] [u8; SIZE - 1]);
|
||||
|
||||
const {
|
||||
assert!(ptr::without_provenance::<HugeSize>(SIZE - 1).align_offset(SIZE) == SIZE - 1);
|
||||
assert!(ptr::without_provenance::<HugeSize>(SIZE).align_offset(SIZE) == 0);
|
||||
assert!(ptr::without_provenance::<HugeSize>(SIZE + 1).align_offset(SIZE) == 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_aligned() {
|
||||
let data = 42;
|
||||
@ -726,25 +488,6 @@ fn is_aligned() {
|
||||
assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_aligned_const() {
|
||||
const {
|
||||
let data = 42;
|
||||
let ptr: *const i32 = &data;
|
||||
assert!(ptr.is_aligned());
|
||||
assert!(ptr.is_aligned_to(1));
|
||||
assert!(ptr.is_aligned_to(2));
|
||||
assert!(ptr.is_aligned_to(4));
|
||||
assert!(ptr.wrapping_byte_add(2).is_aligned_to(1));
|
||||
assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
|
||||
assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
|
||||
|
||||
// At comptime neither `ptr` nor `ptr+1` is aligned to 8.
|
||||
assert!(!ptr.is_aligned_to(8));
|
||||
assert!(!ptr.wrapping_add(1).is_aligned_to(8));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn offset_from() {
|
||||
let mut a = [0; 5];
|
||||
|
@ -1,7 +0,0 @@
|
||||
{
|
||||
"arch": "x86_64",
|
||||
"is-builtin": true,
|
||||
"data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128",
|
||||
"llvm-target": "x86_64-unknown-unknown-gnu",
|
||||
"target-pointer-width": "64"
|
||||
}
|
@ -52,11 +52,6 @@ fn main() {
|
||||
.expected_file("test-platform.json")
|
||||
.actual_text("test-platform-2", test_platform_2)
|
||||
.run();
|
||||
rustc()
|
||||
.input("foo.rs")
|
||||
.target("definitely-not-builtin-target")
|
||||
.run_fail()
|
||||
.assert_stderr_contains("may not set is_builtin");
|
||||
rustc()
|
||||
.input("foo.rs")
|
||||
.target("endianness-mismatch")
|
||||
|
Loading…
Reference in New Issue
Block a user