Auto merge of #138155 - matthiaskrgr:rollup-xq5buio, r=matthiaskrgr

Rollup of 6 pull requests

Successful merges:

 - #137674 (Enable `f16` for LoongArch)
 - #138034 (library: Use `size_of` from the prelude instead of imported)
 - #138060 (Revert #138019 after further discussion about how hir-pretty printing should work)
 - #138073 (Break critical edges in inline asm before code generation)
 - #138107 (`librustdoc`: clippy fixes)
 - #138111 (Use `default_field_values` for `rustc_errors::Context`, `rustc_session::config::NextSolverConfig` and `rustc_session::config::ErrorOutputType`)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2025-03-07 13:47:27 +00:00
commit 03eb454523
145 changed files with 579 additions and 691 deletions

View File

@ -14,6 +14,7 @@
#![feature(associated_type_defaults)]
#![feature(box_into_inner)]
#![feature(box_patterns)]
#![feature(default_field_values)]
#![feature(error_reporter)]
#![feature(if_let_guard)]
#![feature(let_chains)]

View File

@ -40,11 +40,13 @@ type ParseResult<'a> = Option<Parsed<'a>>;
/// Parsing context
#[derive(Clone, Copy, Debug, PartialEq)]
// The default values are the most common setting for non top-level parsing: not top block, not at
// line start (yes leading whitespace, not escaped).
struct Context {
/// If true, we are at a the topmost level (not recursing a nested tt)
top_block: bool,
top_block: bool = false,
/// Previous character
prev: Prev,
prev: Prev = Prev::Whitespace,
}
/// Character class preceding this one
@ -57,14 +59,6 @@ enum Prev {
Any,
}
impl Default for Context {
/// Most common setting for non top-level parsing: not top block, not at
/// line start (yes leading whitespace, not escaped)
fn default() -> Self {
Self { top_block: false, prev: Prev::Whitespace }
}
}
/// Flags to simple parser function
#[derive(Clone, Copy, Debug, PartialEq)]
enum ParseOpt {
@ -248,7 +242,7 @@ fn parse_heading(buf: &[u8]) -> ParseResult<'_> {
}
let (txt, rest) = parse_to_newline(&buf[1..]);
let ctx = Context { top_block: false, prev: Prev::Whitespace };
let ctx = Context { .. };
let stream = parse_recursive(txt, ctx);
Some((MdTree::Heading(level.try_into().unwrap(), stream), rest))
@ -257,7 +251,7 @@ fn parse_heading(buf: &[u8]) -> ParseResult<'_> {
/// Bulleted list
fn parse_unordered_li(buf: &[u8]) -> Parsed<'_> {
let (txt, rest) = get_indented_section(&buf[2..]);
let ctx = Context { top_block: false, prev: Prev::Whitespace };
let ctx = Context { .. };
let stream = parse_recursive(trim_ascii_start(txt), ctx);
(MdTree::UnorderedListItem(stream), rest)
}
@ -266,7 +260,7 @@ fn parse_unordered_li(buf: &[u8]) -> Parsed<'_> {
fn parse_ordered_li(buf: &[u8]) -> Parsed<'_> {
let (num, pos) = ord_list_start(buf).unwrap(); // success tested in caller
let (txt, rest) = get_indented_section(&buf[pos..]);
let ctx = Context { top_block: false, prev: Prev::Whitespace };
let ctx = Context { .. };
let stream = parse_recursive(trim_ascii_start(txt), ctx);
(MdTree::OrderedListItem(num, stream), rest)
}

View File

@ -117,80 +117,6 @@ impl<'a> State<'a> {
));
self.hardbreak()
}
hir::Attribute::Parsed(AttributeKind::Deprecation { deprecation, .. }) => {
self.word("#[deprecated");
// There are three possible forms here:
// 1. a form with explicit components like
// `#[deprecated(since = "1.2.3", note = "some note", suggestion = "something")]`
// where each component may be present or absent.
// 2. `#[deprecated = "message"]`
// 3. `#[deprecated]`
//
// Let's figure out which we need.
// If there's a `since` or `suggestion` value, we're definitely in form 1.
if matches!(
deprecation.since,
rustc_attr_parsing::DeprecatedSince::RustcVersion(..)
| rustc_attr_parsing::DeprecatedSince::Future
| rustc_attr_parsing::DeprecatedSince::NonStandard(..)
) || deprecation.suggestion.is_some()
{
self.word("(");
let mut use_comma = false;
match &deprecation.since {
rustc_attr_parsing::DeprecatedSince::RustcVersion(rustc_version) => {
self.word("since = \"");
self.word(format!(
"{}.{}.{}",
rustc_version.major, rustc_version.minor, rustc_version.patch
));
self.word("\"");
use_comma = true;
}
rustc_attr_parsing::DeprecatedSince::Future => {
self.word("since = \"future\"");
use_comma = true;
}
rustc_attr_parsing::DeprecatedSince::NonStandard(symbol) => {
self.word("since = \"");
self.word(symbol.to_ident_string());
self.word("\"");
use_comma = true;
}
_ => {}
}
if let Some(note) = &deprecation.note {
if use_comma {
self.word(", ");
}
self.word("note = \"");
self.word(note.to_ident_string());
self.word("\"");
use_comma = true;
}
if let Some(suggestion) = &deprecation.suggestion {
if use_comma {
self.word(", ");
}
self.word("suggestion = \"");
self.word(suggestion.to_ident_string());
self.word("\"");
}
} else if let Some(note) = &deprecation.note {
// We're in form 2: `#[deprecated = "message"]`.
self.word(" = \"");
self.word(note.to_ident_string());
self.word("\"");
} else {
// We're in form 3: `#[deprecated]`. Nothing to do here.
}
self.word("]");
}
hir::Attribute::Parsed(pa) => {
self.word("#[attr=\"");
pa.print_attribute(self);

View File

@ -40,6 +40,16 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards {
let mut new_blocks = Vec::new();
let cur_len = body.basic_blocks.len();
let mut new_block = |source_info: SourceInfo, is_cleanup: bool, target: BasicBlock| {
let block = BasicBlockData {
statements: vec![],
is_cleanup,
terminator: Some(Terminator { source_info, kind: TerminatorKind::Goto { target } }),
};
let idx = cur_len + new_blocks.len();
new_blocks.push(block);
BasicBlock::new(idx)
};
for block in body.basic_blocks_mut() {
match block.terminator {
@ -47,25 +57,34 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards {
kind: TerminatorKind::Call { target: Some(ref mut destination), unwind, .. },
source_info,
}) if pred_count[*destination] > 1
&& (matches!(
unwind,
UnwindAction::Cleanup(_) | UnwindAction::Terminate(_)
) || self == &AllCallEdges) =>
&& (generates_invoke(unwind) || self == &AllCallEdges) =>
{
// It's a critical edge, break it
let call_guard = BasicBlockData {
statements: vec![],
is_cleanup: block.is_cleanup,
terminator: Some(Terminator {
source_info,
kind: TerminatorKind::Goto { target: *destination },
}),
};
// Get the index it will be when inserted into the MIR
let idx = cur_len + new_blocks.len();
new_blocks.push(call_guard);
*destination = BasicBlock::new(idx);
*destination = new_block(source_info, block.is_cleanup, *destination);
}
Some(Terminator {
kind:
TerminatorKind::InlineAsm {
asm_macro: InlineAsmMacro::Asm,
ref mut targets,
ref operands,
unwind,
..
},
source_info,
}) if self == &CriticalCallEdges => {
let has_outputs = operands.iter().any(|op| {
matches!(op, InlineAsmOperand::InOut { .. } | InlineAsmOperand::Out { .. })
});
let has_labels =
operands.iter().any(|op| matches!(op, InlineAsmOperand::Label { .. }));
if has_outputs && (has_labels || generates_invoke(unwind)) {
for target in targets.iter_mut() {
if pred_count[*target] > 1 {
*target = new_block(source_info, block.is_cleanup, *target);
}
}
}
}
_ => {}
}
@ -80,3 +99,11 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards {
true
}
}
/// Returns true if this unwind action is code generated as an invoke as opposed to a call.
fn generates_invoke(unwind: UnwindAction) -> bool {
match unwind {
UnwindAction::Continue | UnwindAction::Unreachable => false,
UnwindAction::Cleanup(_) | UnwindAction::Terminate(_) => true,
}
}

View File

@ -681,10 +681,14 @@ impl OutputType {
}
/// The type of diagnostics output to generate.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
pub enum ErrorOutputType {
/// Output meant for the consumption of humans.
HumanReadable(HumanReadableErrorType, ColorConfig),
#[default]
HumanReadable {
kind: HumanReadableErrorType = HumanReadableErrorType::Default,
color_config: ColorConfig = ColorConfig::Auto,
},
/// Output that's consumed by other tools such as `rustfix` or the `RLS`.
Json {
/// Render the JSON in a human readable way (with indents and newlines).
@ -696,12 +700,6 @@ pub enum ErrorOutputType {
},
}
impl Default for ErrorOutputType {
fn default() -> Self {
Self::HumanReadable(HumanReadableErrorType::Default, ColorConfig::Auto)
}
}
#[derive(Clone, Hash, Debug)]
pub enum ResolveDocLinks {
/// Do not resolve doc links.
@ -898,18 +896,13 @@ pub enum PrintKind {
DeploymentTarget,
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Default)]
pub struct NextSolverConfig {
/// Whether the new trait solver should be enabled in coherence.
pub coherence: bool,
pub coherence: bool = true,
/// Whether the new trait solver should be enabled everywhere.
/// This is only `true` if `coherence` is also enabled.
pub globally: bool,
}
impl Default for NextSolverConfig {
fn default() -> Self {
NextSolverConfig { coherence: true, globally: false }
}
pub globally: bool = false,
}
#[derive(Clone)]
@ -1825,7 +1818,7 @@ pub fn parse_json(early_dcx: &EarlyDiagCtxt, matches: &getopts::Matches) -> Json
pub fn parse_error_format(
early_dcx: &mut EarlyDiagCtxt,
matches: &getopts::Matches,
color: ColorConfig,
color_config: ColorConfig,
json_color: ColorConfig,
json_rendered: HumanReadableErrorType,
) -> ErrorOutputType {
@ -1835,27 +1828,26 @@ pub fn parse_error_format(
// `opt_present` because the latter will panic.
let error_format = if matches.opts_present(&["error-format".to_owned()]) {
match matches.opt_str("error-format").as_deref() {
None | Some("human") => {
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default, color)
}
Some("human-annotate-rs") => {
ErrorOutputType::HumanReadable(HumanReadableErrorType::AnnotateSnippet, color)
}
None | Some("human") => ErrorOutputType::HumanReadable { color_config, .. },
Some("human-annotate-rs") => ErrorOutputType::HumanReadable {
kind: HumanReadableErrorType::AnnotateSnippet,
color_config,
},
Some("json") => {
ErrorOutputType::Json { pretty: false, json_rendered, color_config: json_color }
}
Some("pretty-json") => {
ErrorOutputType::Json { pretty: true, json_rendered, color_config: json_color }
}
Some("short") => ErrorOutputType::HumanReadable(HumanReadableErrorType::Short, color),
Some("human-unicode") => {
ErrorOutputType::HumanReadable(HumanReadableErrorType::Unicode, color)
Some("short") => {
ErrorOutputType::HumanReadable { kind: HumanReadableErrorType::Short, color_config }
}
Some("human-unicode") => ErrorOutputType::HumanReadable {
kind: HumanReadableErrorType::Unicode,
color_config,
},
Some(arg) => {
early_dcx.set_error_format(ErrorOutputType::HumanReadable(
HumanReadableErrorType::Default,
color,
));
early_dcx.set_error_format(ErrorOutputType::HumanReadable { color_config, .. });
early_dcx.early_fatal(format!(
"argument for `--error-format` must be `human`, `human-annotate-rs`, \
`human-unicode`, `json`, `pretty-json` or `short` (instead was `{arg}`)"
@ -1863,7 +1855,7 @@ pub fn parse_error_format(
}
}
} else {
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default, color)
ErrorOutputType::HumanReadable { color_config, .. }
};
match error_format {
@ -1918,7 +1910,7 @@ fn check_error_format_stability(
}
let format = match format {
ErrorOutputType::Json { pretty: true, .. } => "pretty-json",
ErrorOutputType::HumanReadable(format, _) => match format {
ErrorOutputType::HumanReadable { kind, .. } => match kind {
HumanReadableErrorType::AnnotateSnippet => "human-annotate-rs",
HumanReadableErrorType::Unicode => "human-unicode",
_ => return,

View File

@ -1,5 +1,6 @@
// tidy-alphabetical-start
#![allow(internal_features)]
#![feature(default_field_values)]
#![feature(iter_intersperse)]
#![feature(let_chains)]
#![feature(rustc_attrs)]

View File

@ -913,7 +913,7 @@ fn default_emitter(
let source_map = if sopts.unstable_opts.link_only { None } else { Some(source_map) };
match sopts.error_format {
config::ErrorOutputType::HumanReadable(kind, color_config) => {
config::ErrorOutputType::HumanReadable { kind, color_config } => {
let short = kind.short();
if let HumanReadableErrorType::AnnotateSnippet = kind {
@ -1430,7 +1430,7 @@ fn mk_emitter(output: ErrorOutputType) -> Box<DynEmitter> {
let fallback_bundle =
fallback_fluent_bundle(vec![rustc_errors::DEFAULT_LOCALE_RESOURCE], false);
let emitter: Box<DynEmitter> = match output {
config::ErrorOutputType::HumanReadable(kind, color_config) => {
config::ErrorOutputType::HumanReadable { kind, color_config } => {
let short = kind.short();
Box::new(
HumanEmitter::new(stderr_destination(color_config), fallback_bundle)

View File

@ -1,4 +1,4 @@
use std::{mem, ptr};
use std::ptr;
use rand::Rng;
use rand::distr::{Alphanumeric, SampleString, StandardUniform};
@ -234,7 +234,7 @@ macro_rules! sort {
fn $name(b: &mut Bencher) {
let v = $gen($len);
b.iter(|| v.clone().$f());
b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
b.bytes = $len * size_of_val(&$gen(1)[0]) as u64;
}
};
}
@ -246,7 +246,7 @@ macro_rules! sort_strings {
let v = $gen($len);
let v = v.iter().map(|s| &**s).collect::<Vec<&str>>();
b.iter(|| v.clone().$f());
b.bytes = $len * mem::size_of::<&str>() as u64;
b.bytes = $len * size_of::<&str>() as u64;
}
};
}
@ -268,7 +268,7 @@ macro_rules! sort_expensive {
});
black_box(count);
});
b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
b.bytes = $len * size_of_val(&$gen(1)[0]) as u64;
}
};
}
@ -279,7 +279,7 @@ macro_rules! sort_lexicographic {
fn $name(b: &mut Bencher) {
let v = $gen($len);
b.iter(|| v.clone().$f(|x| x.to_string()));
b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
b.bytes = $len * size_of_val(&$gen(1)[0]) as u64;
}
};
}
@ -322,7 +322,7 @@ macro_rules! reverse {
fn $name(b: &mut Bencher) {
// odd length and offset by 1 to be as unaligned as possible
let n = 0xFFFFF;
let mut v: Vec<_> = (0..1 + (n / mem::size_of::<$ty>() as u64)).map($f).collect();
let mut v: Vec<_> = (0..1 + (n / size_of::<$ty>() as u64)).map($f).collect();
b.iter(|| black_box(&mut v[1..]).reverse());
b.bytes = n;
}
@ -346,7 +346,7 @@ macro_rules! rotate {
($name:ident, $gen:expr, $len:expr, $mid:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let size = mem::size_of_val(&$gen(1)[0]);
let size = size_of_val(&$gen(1)[0]);
let mut v = $gen($len * 8 / size);
b.iter(|| black_box(&mut v).rotate_left(($mid * 8 + size - 1) / size));
b.bytes = (v.len() * size) as u64;

View File

@ -669,7 +669,7 @@ fn random_sorted_fill(mut seed: u32, buf: &mut [u32]) {
// This algorithm was used for Vecs prior to Rust 1.52.
fn bench_dedup_slice_truncate(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
b.bytes = size_of_val(template.as_slice()) as u64;
random_sorted_fill(0x43, &mut template);
let mut vec = template.clone();
@ -691,7 +691,7 @@ fn bench_dedup_slice_truncate(b: &mut Bencher, sz: usize) {
// Measures performance of Vec::dedup on random data.
fn bench_vec_dedup_random(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
b.bytes = size_of_val(template.as_slice()) as u64;
random_sorted_fill(0x43, &mut template);
let mut vec = template.clone();
@ -708,7 +708,7 @@ fn bench_vec_dedup_random(b: &mut Bencher, sz: usize) {
// Measures performance of Vec::dedup when there is no items removed
fn bench_vec_dedup_none(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
b.bytes = size_of_val(template.as_slice()) as u64;
template.chunks_exact_mut(2).for_each(|w| {
w[0] = black_box(0);
w[1] = black_box(5);
@ -729,7 +729,7 @@ fn bench_vec_dedup_none(b: &mut Bencher, sz: usize) {
// Measures performance of Vec::dedup when there is all items removed
fn bench_vec_dedup_all(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
b.bytes = size_of_val(template.as_slice()) as u64;
template.iter_mut().for_each(|w| {
*w = black_box(0);
});

View File

@ -529,7 +529,6 @@ impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
/// ```
/// use std::error::Error;
/// use std::fmt;
/// use std::mem;
///
/// #[derive(Debug)]
/// struct AnError;
@ -543,9 +542,9 @@ impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
/// impl Error for AnError {}
///
/// let an_error = AnError;
/// assert!(0 == mem::size_of_val(&an_error));
/// assert!(0 == size_of_val(&an_error));
/// let a_boxed_error = Box::<dyn Error>::from(an_error);
/// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
/// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error))
/// ```
fn from(err: E) -> Box<dyn Error + 'a> {
Box::new(err)
@ -563,7 +562,6 @@ impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync +
/// ```
/// use std::error::Error;
/// use std::fmt;
/// use std::mem;
///
/// #[derive(Debug)]
/// struct AnError;
@ -581,10 +579,10 @@ impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync +
/// unsafe impl Sync for AnError {}
///
/// let an_error = AnError;
/// assert!(0 == mem::size_of_val(&an_error));
/// assert!(0 == size_of_val(&an_error));
/// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error);
/// assert!(
/// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
/// size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error))
/// ```
fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> {
Box::new(err)
@ -600,12 +598,11 @@ impl<'a> From<String> for Box<dyn Error + Send + Sync + 'a> {
///
/// ```
/// use std::error::Error;
/// use std::mem;
///
/// let a_string_error = "a string error".to_string();
/// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error);
/// assert!(
/// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
/// size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error))
/// ```
#[inline]
fn from(err: String) -> Box<dyn Error + Send + Sync + 'a> {
@ -644,11 +641,10 @@ impl<'a> From<String> for Box<dyn Error + 'a> {
///
/// ```
/// use std::error::Error;
/// use std::mem;
///
/// let a_string_error = "a string error".to_string();
/// let a_boxed_error = Box::<dyn Error>::from(a_string_error);
/// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
/// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error))
/// ```
fn from(str_err: String) -> Box<dyn Error + 'a> {
let err1: Box<dyn Error + Send + Sync> = From::from(str_err);
@ -668,12 +664,11 @@ impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> {
///
/// ```
/// use std::error::Error;
/// use std::mem;
///
/// let a_str_error = "a str error";
/// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error);
/// assert!(
/// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
/// size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error))
/// ```
#[inline]
fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> {
@ -692,11 +687,10 @@ impl<'a> From<&str> for Box<dyn Error + 'a> {
///
/// ```
/// use std::error::Error;
/// use std::mem;
///
/// let a_str_error = "a str error";
/// let a_boxed_error = Box::<dyn Error>::from(a_str_error);
/// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
/// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error))
/// ```
fn from(err: &str) -> Box<dyn Error + 'a> {
From::from(String::from(err))
@ -712,13 +706,12 @@ impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> {
///
/// ```
/// use std::error::Error;
/// use std::mem;
/// use std::borrow::Cow;
///
/// let a_cow_str_error = Cow::from("a str error");
/// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error);
/// assert!(
/// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
/// size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error))
/// ```
fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> {
From::from(String::from(err))
@ -734,12 +727,11 @@ impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + 'a> {
///
/// ```
/// use std::error::Error;
/// use std::mem;
/// use std::borrow::Cow;
///
/// let a_cow_str_error = Cow::from("a str error");
/// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error);
/// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
/// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error))
/// ```
fn from(err: Cow<'b, str>) -> Box<dyn Error + 'a> {
From::from(String::from(err))

View File

@ -9,9 +9,8 @@ use core::intrinsics::const_allocate;
use core::marker::PhantomData;
#[cfg(not(no_global_oom_handling))]
use core::marker::Unsize;
use core::mem;
#[cfg(not(no_global_oom_handling))]
use core::mem::SizedTypeProperties;
use core::mem::{self, SizedTypeProperties};
use core::ops::{Deref, DerefMut};
use core::ptr::{self, NonNull, Pointee};
@ -30,7 +29,6 @@ use crate::alloc::{self, Layout, LayoutError};
/// let five = ThinBox::new(5);
/// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]);
///
/// use std::mem::{size_of, size_of_val};
/// let size_of_ptr = size_of::<*const ()>();
/// assert_eq!(size_of_ptr, size_of_val(&five));
/// assert_eq!(size_of_ptr, size_of_val(&thin_slice));
@ -114,7 +112,7 @@ impl<Dyn: ?Sized> ThinBox<Dyn> {
where
T: Unsize<Dyn>,
{
if mem::size_of::<T>() == 0 {
if size_of::<T>() == 0 {
let ptr = WithOpaqueHeader::new_unsize_zst::<Dyn, T>(value);
ThinBox { ptr, _marker: PhantomData }
} else {
@ -283,9 +281,7 @@ impl<H> WithHeader<H> {
let ptr = if layout.size() == 0 {
// Some paranoia checking, mostly so that the ThinBox tests are
// more able to catch issues.
debug_assert!(
value_offset == 0 && mem::size_of::<T>() == 0 && mem::size_of::<H>() == 0
);
debug_assert!(value_offset == 0 && size_of::<T>() == 0 && size_of::<H>() == 0);
layout.dangling()
} else {
let ptr = alloc::alloc(layout);
@ -315,7 +311,7 @@ impl<H> WithHeader<H> {
Dyn: Pointee<Metadata = H> + ?Sized,
T: Unsize<Dyn>,
{
assert!(mem::size_of::<T>() == 0);
assert!(size_of::<T>() == 0);
const fn max(a: usize, b: usize) -> usize {
if a > b { a } else { b }
@ -329,18 +325,16 @@ impl<H> WithHeader<H> {
// FIXME: just call `WithHeader::alloc_layout` with size reset to 0.
// Currently that's blocked on `Layout::extend` not being `const fn`.
let alloc_align =
max(mem::align_of::<T>(), mem::align_of::<<Dyn as Pointee>::Metadata>());
let alloc_align = max(align_of::<T>(), align_of::<<Dyn as Pointee>::Metadata>());
let alloc_size =
max(mem::align_of::<T>(), mem::size_of::<<Dyn as Pointee>::Metadata>());
let alloc_size = max(align_of::<T>(), size_of::<<Dyn as Pointee>::Metadata>());
unsafe {
// SAFETY: align is power of two because it is the maximum of two alignments.
let alloc: *mut u8 = const_allocate(alloc_size, alloc_align);
let metadata_offset =
alloc_size.checked_sub(mem::size_of::<<Dyn as Pointee>::Metadata>()).unwrap();
alloc_size.checked_sub(size_of::<<Dyn as Pointee>::Metadata>()).unwrap();
// SAFETY: adding offset within the allocation.
let metadata_ptr: *mut <Dyn as Pointee>::Metadata =
alloc.add(metadata_offset).cast();
@ -421,7 +415,7 @@ impl<H> WithHeader<H> {
}
const fn header_size() -> usize {
mem::size_of::<H>()
size_of::<H>()
}
fn alloc_layout(value_layout: Layout) -> Result<(Layout, usize), LayoutError> {

View File

@ -92,8 +92,8 @@ fn test_partial_eq() {
#[cfg(target_arch = "x86_64")]
#[cfg_attr(any(miri, randomized_layouts), ignore)] // We'd like to run Miri with layout randomization
fn test_sizes() {
assert_eq!(core::mem::size_of::<LeafNode<(), ()>>(), 16);
assert_eq!(core::mem::size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8);
assert_eq!(core::mem::size_of::<InternalNode<(), ()>>(), 16 + (CAPACITY + 1) * 8);
assert_eq!(core::mem::size_of::<InternalNode<i64, i64>>(), 16 + (CAPACITY * 3 + 1) * 8);
assert_eq!(size_of::<LeafNode<(), ()>>(), 16);
assert_eq!(size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8);
assert_eq!(size_of::<InternalNode<(), ()>>(), 16 + (CAPACITY + 1) * 8);
assert_eq!(size_of::<InternalNode<i64, i64>>(), 16 + (CAPACITY * 3 + 1) * 8);
}

View File

@ -480,7 +480,7 @@ impl<A: Allocator> RawVecInner<A> {
// Allocators currently return a `NonNull<[u8]>` whose length
// matches the size requested. If that ever changes, the capacity
// here should change to `ptr.len() / mem::size_of::<T>()`.
// here should change to `ptr.len() / size_of::<T>()`.
Ok(Self {
ptr: Unique::from(ptr.cast()),
cap: unsafe { Cap::new_unchecked(capacity) },
@ -627,7 +627,7 @@ impl<A: Allocator> RawVecInner<A> {
unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
// Allocators currently return a `NonNull<[u8]>` whose length matches
// the size requested. If that ever changes, the capacity here should
// change to `ptr.len() / mem::size_of::<T>()`.
// change to `ptr.len() / size_of::<T>()`.
self.ptr = Unique::from(ptr.cast());
self.cap = unsafe { Cap::new_unchecked(cap) };
}

View File

@ -1,4 +1,3 @@
use core::mem::size_of;
use std::cell::Cell;
use super::*;
@ -93,7 +92,7 @@ fn zst_sanity<T>(v: &RawVec<T>) {
fn zst() {
let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into());
assert_eq!(std::mem::size_of::<ZST>(), 0);
assert_eq!(size_of::<ZST>(), 0);
// All these different ways of creating the RawVec produce the same thing.

View File

@ -16,7 +16,7 @@ use core::borrow::{Borrow, BorrowMut};
#[cfg(not(no_global_oom_handling))]
use core::cmp::Ordering::{self, Less};
#[cfg(not(no_global_oom_handling))]
use core::mem::{self, MaybeUninit};
use core::mem::MaybeUninit;
#[cfg(not(no_global_oom_handling))]
use core::ptr;
#[unstable(feature = "array_chunks", issue = "74985")]
@ -446,7 +446,7 @@ impl<T> [T] {
// Avoids binary-size usage in cases where the alignment doesn't work out to make this
// beneficial or on 32-bit platforms.
let is_using_u32_as_idx_type_helpful =
const { mem::size_of::<(K, u32)>() < mem::size_of::<(K, usize)>() };
const { size_of::<(K, u32)>() < size_of::<(K, usize)>() };
// It's possible to instantiate this for u8 and u16 but, doing so is very wasteful in terms
// of compile-times and binary-size, the peak saved heap memory for u16 is (u8 + u16) -> 4

View File

@ -119,8 +119,6 @@ use crate::vec::{self, Vec};
/// the same `char`s:
///
/// ```
/// use std::mem;
///
/// // `s` is ASCII which represents each `char` as one byte
/// let s = "hello";
/// assert_eq!(s.len(), 5);
@ -128,7 +126,7 @@ use crate::vec::{self, Vec};
/// // A `char` array with the same contents would be longer because
/// // every `char` is four bytes
/// let s = ['h', 'e', 'l', 'l', 'o'];
/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum();
/// let size: usize = s.into_iter().map(|c| size_of_val(&c)).sum();
/// assert_eq!(size, 20);
///
/// // However, for non-ASCII strings, the difference will be smaller
@ -137,7 +135,7 @@ use crate::vec::{self, Vec};
/// assert_eq!(s.len(), 20);
///
/// let s = ['💖', '💖', '💖', '💖', '💖'];
/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum();
/// let size: usize = s.into_iter().map(|c| size_of_val(&c)).sum();
/// assert_eq!(size, 20);
/// ```
///

View File

@ -2274,7 +2274,7 @@ impl<T: ?Sized + CloneToUninit, A: Allocator + Clone> Arc<T, A> {
#[inline]
#[stable(feature = "arc_unique", since = "1.4.0")]
pub fn make_mut(this: &mut Self) -> &mut T {
let size_of_val = mem::size_of_val::<T>(&**this);
let size_of_val = size_of_val::<T>(&**this);
// Note that we hold both a strong reference and a weak reference.
// Thus, releasing our strong reference only will not, by itself, cause
@ -3544,7 +3544,7 @@ impl<T> Default for Arc<[T]> {
/// This may or may not share an allocation with other Arcs.
#[inline]
fn default() -> Self {
if mem::align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
if align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
// We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so
// we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows.
// (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.)

View File

@ -171,7 +171,7 @@ const fn in_place_collectible<DEST, SRC>(
) -> bool {
// Require matching alignments because an alignment-changing realloc is inefficient on many
// system allocators and better implementations would require the unstable Allocator trait.
if const { SRC::IS_ZST || DEST::IS_ZST || mem::align_of::<SRC>() != mem::align_of::<DEST>() } {
if const { SRC::IS_ZST || DEST::IS_ZST || align_of::<SRC>() != align_of::<DEST>() } {
return false;
}
@ -181,7 +181,7 @@ const fn in_place_collectible<DEST, SRC>(
// e.g.
// - 1 x [u8; 4] -> 4x u8, via flatten
// - 4 x u8 -> 1x [u8; 4], via array_chunks
mem::size_of::<SRC>() * step_merge.get() >= mem::size_of::<DEST>() * step_expand.get()
size_of::<SRC>() * step_merge.get() >= size_of::<DEST>() * step_expand.get()
}
// Fall back to other from_iter impls if an overflow occurred in the step merge/expansion
// tracking.
@ -190,7 +190,7 @@ const fn in_place_collectible<DEST, SRC>(
}
const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool {
if const { mem::align_of::<SRC>() != mem::align_of::<DEST>() } {
if const { align_of::<SRC>() != align_of::<DEST>() } {
// FIXME(const-hack): use unreachable! once that works in const
panic!("in_place_collectible() prevents this");
}
@ -199,8 +199,8 @@ const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool {
// the caller will have calculated a `dst_cap` that is an integer multiple of
// `src_cap` without remainder.
if const {
let src_sz = mem::size_of::<SRC>();
let dest_sz = mem::size_of::<DEST>();
let src_sz = size_of::<SRC>();
let dest_sz = size_of::<DEST>();
dest_sz != 0 && src_sz % dest_sz == 0
} {
return false;
@ -208,7 +208,7 @@ const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool {
// type layouts don't guarantee a fit, so do a runtime check to see if
// the allocations happen to match
src_cap > 0 && src_cap * mem::size_of::<SRC>() != dst_cap * mem::size_of::<DEST>()
src_cap > 0 && src_cap * size_of::<SRC>() != dst_cap * size_of::<DEST>()
}
/// This provides a shorthand for the source type since local type aliases aren't a thing.
@ -262,7 +262,7 @@ where
inner.buf.cast::<T>(),
inner.end as *const T,
// SAFETY: the multiplication can not overflow, since `inner.cap * size_of::<I::SRC>()` is the size of the allocation.
inner.cap.unchecked_mul(mem::size_of::<I::Src>()) / mem::size_of::<T>(),
inner.cap.unchecked_mul(size_of::<I::Src>()) / size_of::<T>(),
)
};
@ -310,14 +310,14 @@ where
debug_assert_ne!(dst_cap, 0);
unsafe {
// The old allocation exists, therefore it must have a valid layout.
let src_align = mem::align_of::<I::Src>();
let src_size = mem::size_of::<I::Src>().unchecked_mul(src_cap);
let src_align = align_of::<I::Src>();
let src_size = size_of::<I::Src>().unchecked_mul(src_cap);
let old_layout = Layout::from_size_align_unchecked(src_size, src_align);
// The allocation must be equal or smaller for in-place iteration to be possible
// therefore the new layout must be ≤ the old one and therefore valid.
let dst_align = mem::align_of::<T>();
let dst_size = mem::size_of::<T>().unchecked_mul(dst_cap);
let dst_align = align_of::<T>();
let dst_size = size_of::<T>().unchecked_mul(dst_cap);
let new_layout = Layout::from_size_align_unchecked(dst_size, dst_align);
let result = alloc.shrink(dst_buf.cast(), old_layout, new_layout);
@ -325,7 +325,7 @@ where
dst_buf = reallocated.cast::<T>();
}
} else {
debug_assert_eq!(src_cap * mem::size_of::<I::Src>(), dst_cap * mem::size_of::<T>());
debug_assert_eq!(src_cap * size_of::<I::Src>(), dst_cap * size_of::<T>());
}
mem::forget(dst_guard);

View File

@ -293,7 +293,7 @@ mod spec_extend;
/// on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized
/// types inside a `Vec`, it will not allocate space for them. *Note that in this case
/// the `Vec` might not report a [`capacity`] of 0*. `Vec` will allocate if and only
/// if <code>[mem::size_of::\<T>]\() * [capacity]\() > 0</code>. In general, `Vec`'s allocation
/// if <code>[size_of::\<T>]\() * [capacity]\() > 0</code>. In general, `Vec`'s allocation
/// details are very subtle --- if you intend to allocate memory using a `Vec`
/// and use it for something else (either to pass to unsafe code, or to build your
/// own memory-backed collection), be sure to deallocate this memory by using
@ -393,7 +393,7 @@ mod spec_extend;
/// [capacity]: Vec::capacity
/// [`capacity`]: Vec::capacity
/// [`Vec::capacity`]: Vec::capacity
/// [mem::size_of::\<T>]: core::mem::size_of
/// [size_of::\<T>]: size_of
/// [len]: Vec::len
/// [`len`]: Vec::len
/// [`push`]: Vec::push
@ -1573,7 +1573,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub const fn as_slice(&self) -> &[T] {
// SAFETY: `slice::from_raw_parts` requires pointee is a contiguous, aligned buffer of size
// `len` containing properly-initialized `T`s. Data must not be mutated for the returned
// lifetime. Further, `len * mem::size_of::<T>` <= `ISIZE::MAX`, and allocation does not
// lifetime. Further, `len * size_of::<T>` <= `isize::MAX`, and allocation does not
// "wrap" through overflowing memory addresses.
//
// * Vec API guarantees that self.buf:
@ -1605,7 +1605,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub const fn as_mut_slice(&mut self) -> &mut [T] {
// SAFETY: `slice::from_raw_parts_mut` requires pointee is a contiguous, aligned buffer of
// size `len` containing properly-initialized `T`s. Data must not be accessed through any
// other pointer for the returned lifetime. Further, `len * mem::size_of::<T>` <=
// other pointer for the returned lifetime. Further, `len * size_of::<T>` <=
// `ISIZE::MAX` and allocation does not "wrap" through overflowing memory addresses.
//
// * Vec API guarantees that self.buf:
@ -2693,7 +2693,7 @@ impl<T, A: Allocator> Vec<T, A> {
let len = self.len;
// SAFETY: The maximum capacity of `Vec<T>` is `isize::MAX` bytes, so the maximum value can
// be returned is `usize::checked_div(mem::size_of::<T>()).unwrap_or(usize::MAX)`, which
// be returned is `usize::checked_div(size_of::<T>()).unwrap_or(usize::MAX)`, which
// matches the definition of `T::MAX_SLICE_LEN`.
unsafe { intrinsics::assume(len <= T::MAX_SLICE_LEN) };

View File

@ -1,7 +1,6 @@
use std::any::Any;
use std::cell::{Cell, RefCell};
use std::iter::TrustedLen;
use std::mem;
use std::sync::{Arc, Weak};
#[test]
@ -129,7 +128,7 @@ fn shared_from_iter_trustedlen_normal() {
let vec = iter.clone().collect::<Vec<_>>();
let rc = iter.collect::<Rc<[_]>>();
assert_eq!(&*vec, &*rc);
assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc));
assert_eq!(size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, size_of_val(&*rc));
// Clone a bit and let these get dropped.
{
@ -145,7 +144,7 @@ fn shared_from_iter_trustedlen_normal() {
let vec = iter.clone().collect::<Vec<_>>();
let rc = iter.collect::<Rc<[_]>>();
assert_eq!(&*vec, &*rc);
assert_eq!(0, mem::size_of_val(&*rc));
assert_eq!(0, size_of_val(&*rc));
{
let _rc_2 = rc.clone();
let _rc_3 = rc.clone();

View File

@ -1,7 +1,6 @@
use std::any::Any;
use std::cell::{Cell, RefCell};
use std::iter::TrustedLen;
use std::mem;
use std::rc::{Rc, Weak};
#[test]
@ -125,7 +124,7 @@ fn shared_from_iter_trustedlen_normal() {
let vec = iter.clone().collect::<Vec<_>>();
let rc = iter.collect::<Rc<[_]>>();
assert_eq!(&*vec, &*rc);
assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc));
assert_eq!(size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, size_of_val(&*rc));
// Clone a bit and let these get dropped.
{
@ -141,7 +140,7 @@ fn shared_from_iter_trustedlen_normal() {
let vec = iter.clone().collect::<Vec<_>>();
let rc = iter.collect::<Rc<[_]>>();
assert_eq!(&*vec, &*rc);
assert_eq!(0, mem::size_of_val(&*rc));
assert_eq!(0, size_of_val(&*rc));
{
let _rc_2 = rc.clone();
let _rc_3 = rc.clone();

View File

@ -1,7 +1,7 @@
use std::cmp::Ordering::{Equal, Greater, Less};
use std::convert::identity;
use std::rc::Rc;
use std::{fmt, mem, panic};
use std::{fmt, panic};
fn square(n: usize) -> usize {
n * n
@ -73,7 +73,7 @@ fn test_len_divzero() {
let v0: &[Z] = &[];
let v1: &[Z] = &[[]];
let v2: &[Z] = &[[], []];
assert_eq!(mem::size_of::<Z>(), 0);
assert_eq!(size_of::<Z>(), 0);
assert_eq!(v0.len(), 0);
assert_eq!(v1.len(), 1);
assert_eq!(v2.len(), 2);

View File

@ -5,7 +5,7 @@
// Based on https://github.com/voultapher/tiny-sort-rs.
use alloc::alloc::{Layout, alloc, dealloc};
use std::{mem, ptr};
use std::ptr;
/// Sort `v` preserving initial order of equal elements.
///
@ -26,7 +26,7 @@ pub fn sort<T: Ord>(v: &mut [T]) {
#[inline(always)]
fn stable_sort<T, F: FnMut(&T, &T) -> bool>(v: &mut [T], mut is_less: F) {
if mem::size_of::<T>() == 0 {
if size_of::<T>() == 0 {
return;
}
@ -166,7 +166,7 @@ struct BufGuard<T> {
impl<T> BufGuard<T> {
// SAFETY: The caller has to ensure that len is not 0 and that T is not a ZST.
unsafe fn new(len: usize) -> Self {
debug_assert!(len > 0 && mem::size_of::<T>() > 0);
debug_assert!(len > 0 && size_of::<T>() > 0);
// SAFETY: See function safety description.
let layout = unsafe { unwrap_unchecked(Layout::array::<T>(len).ok()) };

View File

@ -1,5 +1,4 @@
use core::fmt::Debug;
use core::mem::size_of;
use std::boxed::ThinBox;
#[test]
@ -52,7 +51,7 @@ fn verify_aligned<T>(ptr: *const T) {
ptr.is_aligned() && !ptr.is_null(),
"misaligned ThinBox data; valid pointers to `{ty}` should be aligned to {align}: {ptr:p}",
ty = core::any::type_name::<T>(),
align = core::mem::align_of::<T>(),
align = align_of::<T>(),
);
}

View File

@ -11,14 +11,14 @@ use std::borrow::Cow;
use std::cell::Cell;
use std::collections::TryReserveErrorKind::*;
use std::fmt::Debug;
use std::hint;
use std::iter::InPlaceIterable;
use std::mem::{size_of, swap};
use std::mem::swap;
use std::ops::Bound::*;
use std::panic::{AssertUnwindSafe, catch_unwind};
use std::rc::Rc;
use std::sync::atomic::{AtomicU32, Ordering};
use std::vec::{Drain, IntoIter};
use std::{hint, mem};
struct DropCounter<'a> {
count: &'a mut u32,
@ -1134,7 +1134,7 @@ fn test_into_iter_zst() {
impl Drop for AlignedZstWithDrop {
fn drop(&mut self) {
let addr = self as *mut _ as usize;
assert!(hint::black_box(addr) % mem::align_of::<u64>() == 0);
assert!(hint::black_box(addr) % align_of::<u64>() == 0);
}
}

View File

@ -17,7 +17,7 @@ use crate::{assert_unsafe_precondition, fmt, mem};
// * https://github.com/rust-lang/rust/pull/72189
// * https://github.com/rust-lang/rust/pull/79827
const fn size_align<T>() -> (usize, usize) {
(mem::size_of::<T>(), mem::align_of::<T>())
(size_of::<T>(), align_of::<T>())
}
/// Layout of a block of memory.
@ -182,7 +182,7 @@ impl Layout {
#[must_use]
#[inline]
pub const fn for_value<T: ?Sized>(t: &T) -> Self {
let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
let (size, align) = (size_of_val(t), align_of_val(t));
// SAFETY: see rationale in `new` for why this is using the unsafe variant
unsafe { Layout::from_size_align_unchecked(size, align) }
}

View File

@ -40,11 +40,9 @@ impl From<char> for u32 {
/// # Examples
///
/// ```
/// use std::mem;
///
/// let c = 'c';
/// let u = u32::from(c);
/// assert!(4 == mem::size_of_val(&u))
/// assert!(4 == size_of_val(&u))
/// ```
#[inline]
fn from(c: char) -> Self {
@ -59,11 +57,9 @@ impl From<char> for u64 {
/// # Examples
///
/// ```
/// use std::mem;
///
/// let c = '👤';
/// let u = u64::from(c);
/// assert!(8 == mem::size_of_val(&u))
/// assert!(8 == size_of_val(&u))
/// ```
#[inline]
fn from(c: char) -> Self {
@ -80,11 +76,9 @@ impl From<char> for u128 {
/// # Examples
///
/// ```
/// use std::mem;
///
/// let c = '⚙';
/// let u = u128::from(c);
/// assert!(16 == mem::size_of_val(&u))
/// assert!(16 == size_of_val(&u))
/// ```
#[inline]
fn from(c: char) -> Self {
@ -167,11 +161,9 @@ impl From<u8> for char {
/// # Examples
///
/// ```
/// use std::mem;
///
/// let u = 32 as u8;
/// let c = char::from(u);
/// assert!(4 == mem::size_of_val(&c))
/// assert!(4 == size_of_val(&c))
/// ```
#[inline]
fn from(i: u8) -> Self {

View File

@ -244,8 +244,8 @@ pub unsafe trait CloneToUninit {
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// * `dst` must be [valid] for writes for `std::mem::size_of_val(self)` bytes.
/// * `dst` must be properly aligned to `std::mem::align_of_val(self)`.
/// * `dst` must be [valid] for writes for `size_of_val(self)` bytes.
/// * `dst` must be properly aligned to `align_of_val(self)`.
///
/// [valid]: crate::ptr#safety
/// [pointer metadata]: crate::ptr::metadata()

View File

@ -801,7 +801,7 @@ impl<H> Eq for BuildHasherDefault<H> {}
mod impls {
use super::*;
use crate::{mem, slice};
use crate::slice;
macro_rules! impl_write {
($(($ty:ident, $meth:ident),)*) => {$(
@ -814,7 +814,7 @@ mod impls {
#[inline]
fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = mem::size_of_val(data);
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only

View File

@ -3,7 +3,7 @@
#![allow(deprecated)] // the types in this module are deprecated
use crate::marker::PhantomData;
use crate::{cmp, mem, ptr};
use crate::{cmp, ptr};
/// An implementation of SipHash 1-3.
///
@ -99,12 +99,12 @@ macro_rules! compress {
/// `$i..$i+size_of::<$int_ty>()`, so that must be in-bounds.
macro_rules! load_int_le {
($buf:expr, $i:expr, $int_ty:ident) => {{
debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
debug_assert!($i + size_of::<$int_ty>() <= $buf.len());
let mut data = 0 as $int_ty;
ptr::copy_nonoverlapping(
$buf.as_ptr().add($i),
&mut data as *mut _ as *mut u8,
mem::size_of::<$int_ty>(),
size_of::<$int_ty>(),
);
data.to_le()
}};

View File

@ -3340,7 +3340,7 @@ pub unsafe fn vtable_align(_ptr: *const ()) -> usize;
/// More specifically, this is the offset in bytes between successive
/// items of the same type, including alignment padding.
///
/// The stabilized version of this intrinsic is [`core::mem::size_of`].
/// The stabilized version of this intrinsic is [`size_of`].
#[rustc_nounwind]
#[unstable(feature = "core_intrinsics", issue = "none")]
#[rustc_intrinsic_const_stable_indirect]
@ -3354,7 +3354,7 @@ pub const fn size_of<T>() -> usize;
/// Therefore, implementations must not require the user to uphold
/// any safety invariants.
///
/// The stabilized version of this intrinsic is [`core::mem::align_of`].
/// The stabilized version of this intrinsic is [`align_of`].
#[rustc_nounwind]
#[unstable(feature = "core_intrinsics", issue = "none")]
#[rustc_intrinsic_const_stable_indirect]
@ -3386,7 +3386,7 @@ pub const fn variant_count<T>() -> usize;
/// The size of the referenced value in bytes.
///
/// The stabilized version of this intrinsic is [`crate::mem::size_of_val`].
/// The stabilized version of this intrinsic is [`size_of_val`].
///
/// # Safety
///
@ -3399,7 +3399,7 @@ pub const unsafe fn size_of_val<T: ?Sized>(_ptr: *const T) -> usize;
/// The required alignment of the referenced value.
///
/// The stabilized version of this intrinsic is [`core::mem::align_of_val`].
/// The stabilized version of this intrinsic is [`align_of_val`].
///
/// # Safety
///

View File

@ -1,5 +1,5 @@
use crate::iter::FusedIterator;
use crate::mem::{self, MaybeUninit};
use crate::mem::MaybeUninit;
use crate::{fmt, ptr};
/// An iterator over the mapped windows of another iterator.
@ -50,7 +50,7 @@ impl<I: Iterator, F, const N: usize> MapWindows<I, F, N> {
assert!(N != 0, "array in `Iterator::map_windows` must contain more than 0 elements");
// Only ZST arrays' length can be so large.
if mem::size_of::<I::Item>() == 0 {
if size_of::<I::Item>() == 0 {
assert!(
N.checked_mul(2).is_some(),
"array size of `Iterator::map_windows` is too large"

View File

@ -405,7 +405,7 @@ marker_impls! {
///
/// [`Vec<T>`]: ../../std/vec/struct.Vec.html
/// [`String`]: ../../std/string/struct.String.html
/// [`size_of::<T>`]: crate::mem::size_of
/// [`size_of::<T>`]: size_of
/// [impls]: #implementors
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "copy"]
@ -731,7 +731,6 @@ impl<T: ?Sized> !Sync for *mut T {}
/// # }
/// # fn convert_params(_: ParamType) -> usize { 42 }
/// use std::marker::PhantomData;
/// use std::mem;
///
/// struct ExternalResource<R> {
/// resource_handle: *mut (),
@ -740,7 +739,7 @@ impl<T: ?Sized> !Sync for *mut T {}
///
/// impl<R: ResType> ExternalResource<R> {
/// fn new() -> Self {
/// let size_of_res = mem::size_of::<R>();
/// let size_of_res = size_of::<R>();
/// Self {
/// resource_handle: foreign_lib::new(size_of_res),
/// resource_type: PhantomData,

View File

@ -203,7 +203,7 @@ use crate::{fmt, intrinsics, ptr, slice};
/// `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as `T`:
///
/// ```rust
/// use std::mem::{MaybeUninit, size_of, align_of};
/// use std::mem::MaybeUninit;
/// assert_eq!(size_of::<MaybeUninit<u64>>(), size_of::<u64>());
/// assert_eq!(align_of::<MaybeUninit<u64>>(), align_of::<u64>());
/// ```
@ -215,7 +215,7 @@ use crate::{fmt, intrinsics, ptr, slice};
/// optimizations, potentially resulting in a larger size:
///
/// ```rust
/// # use std::mem::{MaybeUninit, size_of};
/// # use std::mem::MaybeUninit;
/// assert_eq!(size_of::<Option<bool>>(), 1);
/// assert_eq!(size_of::<Option<MaybeUninit<bool>>>(), 2);
/// ```

View File

@ -226,31 +226,27 @@ pub fn forget_unsized<T: ?Sized>(t: T) {
/// # Examples
///
/// ```
/// use std::mem;
///
/// // Some primitives
/// assert_eq!(4, mem::size_of::<i32>());
/// assert_eq!(8, mem::size_of::<f64>());
/// assert_eq!(0, mem::size_of::<()>());
/// assert_eq!(4, size_of::<i32>());
/// assert_eq!(8, size_of::<f64>());
/// assert_eq!(0, size_of::<()>());
///
/// // Some arrays
/// assert_eq!(8, mem::size_of::<[i32; 2]>());
/// assert_eq!(12, mem::size_of::<[i32; 3]>());
/// assert_eq!(0, mem::size_of::<[i32; 0]>());
/// assert_eq!(8, size_of::<[i32; 2]>());
/// assert_eq!(12, size_of::<[i32; 3]>());
/// assert_eq!(0, size_of::<[i32; 0]>());
///
///
/// // Pointer size equality
/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<*const i32>());
/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Box<i32>>());
/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Option<&i32>>());
/// assert_eq!(mem::size_of::<Box<i32>>(), mem::size_of::<Option<Box<i32>>>());
/// assert_eq!(size_of::<&i32>(), size_of::<*const i32>());
/// assert_eq!(size_of::<&i32>(), size_of::<Box<i32>>());
/// assert_eq!(size_of::<&i32>(), size_of::<Option<&i32>>());
/// assert_eq!(size_of::<Box<i32>>(), size_of::<Option<Box<i32>>>());
/// ```
///
/// Using `#[repr(C)]`.
///
/// ```
/// use std::mem;
///
/// #[repr(C)]
/// struct FieldStruct {
/// first: u8,
@ -265,13 +261,13 @@ pub fn forget_unsized<T: ?Sized>(t: T) {
/// // The size of the third field is 1, so add 1 to the size. Size is 5.
/// // Finally, the alignment of the struct is 2 (because the largest alignment amongst its
/// // fields is 2), so add 1 to the size for padding. Size is 6.
/// assert_eq!(6, mem::size_of::<FieldStruct>());
/// assert_eq!(6, size_of::<FieldStruct>());
///
/// #[repr(C)]
/// struct TupleStruct(u8, u16, u8);
///
/// // Tuple structs follow the same rules.
/// assert_eq!(6, mem::size_of::<TupleStruct>());
/// assert_eq!(6, size_of::<TupleStruct>());
///
/// // Note that reordering the fields can lower the size. We can remove both padding bytes
/// // by putting `third` before `second`.
@ -282,7 +278,7 @@ pub fn forget_unsized<T: ?Sized>(t: T) {
/// second: u16
/// }
///
/// assert_eq!(4, mem::size_of::<FieldStructOptimized>());
/// assert_eq!(4, size_of::<FieldStructOptimized>());
///
/// // Union size is the size of the largest field.
/// #[repr(C)]
@ -291,7 +287,7 @@ pub fn forget_unsized<T: ?Sized>(t: T) {
/// larger: u16
/// }
///
/// assert_eq!(2, mem::size_of::<ExampleUnion>());
/// assert_eq!(2, size_of::<ExampleUnion>());
/// ```
///
/// [alignment]: align_of
@ -320,13 +316,11 @@ pub const fn size_of<T>() -> usize {
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::size_of_val(&5i32));
/// assert_eq!(4, size_of_val(&5i32));
///
/// let x: [u8; 13] = [0; 13];
/// let y: &[u8] = &x;
/// assert_eq!(13, mem::size_of_val(y));
/// assert_eq!(13, size_of_val(y));
/// ```
///
/// [`size_of::<T>()`]: size_of
@ -381,7 +375,7 @@ pub const fn size_of_val<T: ?Sized>(val: &T) -> usize {
/// #![feature(layout_for_ptr)]
/// use std::mem;
///
/// assert_eq!(4, mem::size_of_val(&5i32));
/// assert_eq!(4, size_of_val(&5i32));
///
/// let x: [u8; 13] = [0; 13];
/// let y: &[u8] = &x;
@ -454,9 +448,7 @@ pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize {
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::align_of::<i32>());
/// assert_eq!(4, align_of::<i32>());
/// ```
#[inline(always)]
#[must_use]
@ -477,9 +469,7 @@ pub const fn align_of<T>() -> usize {
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::align_of_val(&5i32));
/// assert_eq!(4, align_of_val(&5i32));
/// ```
#[inline]
#[must_use]

View File

@ -153,7 +153,7 @@ pub struct Assume {
///
/// ```compile_fail,E0277
/// #![feature(transmutability)]
/// use core::mem::{align_of, TransmuteFrom};
/// use core::mem::TransmuteFrom;
///
/// assert_eq!(align_of::<[u8; 2]>(), 1);
/// assert_eq!(align_of::<u16>(), 2);
@ -172,7 +172,7 @@ pub struct Assume {
///
/// ```rust
/// #![feature(pointer_is_aligned_to, transmutability)]
/// use core::mem::{align_of, Assume, TransmuteFrom};
/// use core::mem::{Assume, TransmuteFrom};
///
/// let src: &[u8; 2] = &[0xFF, 0xFF];
///
@ -337,7 +337,7 @@ impl Assume {
/// transmutability,
/// )]
/// #![allow(incomplete_features)]
/// use core::mem::{align_of, Assume, TransmuteFrom};
/// use core::mem::{Assume, TransmuteFrom};
///
/// /// Attempts to transmute `src` to `&Dst`.
/// ///

View File

@ -253,12 +253,11 @@ macro_rules! define_bignum {
/// Multiplies itself by `5^e` and returns its own mutable reference.
pub fn mul_pow5(&mut self, mut e: usize) -> &mut $name {
use crate::mem;
use crate::num::bignum::SMALL_POW5;
// There are exactly n trailing zeros on 2^n, and the only relevant digit sizes
// are consecutive powers of two, so this is well suited index for the table.
let table_index = mem::size_of::<$ty>().trailing_zeros() as usize;
let table_index = size_of::<$ty>().trailing_zeros() as usize;
let (small_power, small_e) = SMALL_POW5[table_index];
let small_power = small_power as $ty;

View File

@ -22,7 +22,6 @@ pub(super) use fpu_precision::set_precision;
#[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
mod fpu_precision {
use core::arch::asm;
use core::mem::size_of;
/// A structure used to preserve the original value of the FPU control word, so that it can be
/// restored when the structure is dropped.

View File

@ -3627,7 +3627,7 @@ macro_rules! int_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn to_be_bytes(self) -> [u8; mem::size_of::<Self>()] {
pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
@ -3647,7 +3647,7 @@ macro_rules! int_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn to_le_bytes(self) -> [u8; mem::size_of::<Self>()] {
pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
@ -3683,7 +3683,7 @@ macro_rules! int_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn to_ne_bytes(self) -> [u8; mem::size_of::<Self>()] {
pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
@ -3705,7 +3705,7 @@ macro_rules! int_impl {
///
/// ```
#[doc = concat!("fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
/// *input = rest;
#[doc = concat!(" ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap())")]
/// }
@ -3714,7 +3714,7 @@ macro_rules! int_impl {
#[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
#[must_use]
#[inline]
pub const fn from_be_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
@ -3734,7 +3734,7 @@ macro_rules! int_impl {
///
/// ```
#[doc = concat!("fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
/// *input = rest;
#[doc = concat!(" ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap())")]
/// }
@ -3743,7 +3743,7 @@ macro_rules! int_impl {
#[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
#[must_use]
#[inline]
pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
@ -3774,7 +3774,7 @@ macro_rules! int_impl {
///
/// ```
#[doc = concat!("fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
/// *input = rest;
#[doc = concat!(" ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap())")]
/// }
@ -3785,7 +3785,7 @@ macro_rules! int_impl {
// SAFETY: const sound because integers are plain old datatypes so we can always
// transmute to them
#[inline]
pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}

View File

@ -1241,7 +1241,7 @@ impl usize {
/// Returns an `usize` where every byte is equal to `x`.
#[inline]
pub(crate) const fn repeat_u8(x: u8) -> usize {
usize::from_ne_bytes([x; mem::size_of::<usize>()])
usize::from_ne_bytes([x; size_of::<usize>()])
}
/// Returns an `usize` where every byte pair is equal to `x`.
@ -1249,7 +1249,7 @@ impl usize {
pub(crate) const fn repeat_u16(x: u16) -> usize {
let mut r = 0usize;
let mut i = 0;
while i < mem::size_of::<usize>() {
while i < size_of::<usize>() {
// Use `wrapping_shl` to make it work on targets with 16-bit `usize`
r = r.wrapping_shl(16) | (x as usize);
i += 2;
@ -1330,7 +1330,7 @@ pub enum FpCategory {
#[inline(always)]
#[unstable(issue = "none", feature = "std_internals")]
pub const fn can_not_overflow<T>(radix: u32, is_signed_ty: bool, digits: &[u8]) -> bool {
radix <= 16 && digits.len() <= mem::size_of::<T>() * 2 - is_signed_ty as usize
radix <= 16 && digits.len() <= size_of::<T>() * 2 - is_signed_ty as usize
}
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]

View File

@ -86,7 +86,7 @@ impl_zeroable_primitive!(
/// For example, `Option<NonZero<u32>>` is the same size as `u32`:
///
/// ```
/// use core::{mem::size_of, num::NonZero};
/// use core::{num::NonZero};
///
/// assert_eq!(size_of::<Option<NonZero<u32>>>(), size_of::<u32>());
/// ```
@ -102,7 +102,6 @@ impl_zeroable_primitive!(
/// `Option<NonZero<T>>` are guaranteed to have the same size and alignment:
///
/// ```
/// # use std::mem::{size_of, align_of};
/// use std::num::NonZero;
///
/// assert_eq!(size_of::<NonZero<u32>>(), size_of::<Option<NonZero<u32>>>());
@ -500,7 +499,6 @@ macro_rules! nonzero_integer {
#[doc = concat!("For example, `Option<", stringify!($Ty), ">` is the same size as `", stringify!($Int), "`:")]
///
/// ```rust
/// use std::mem::size_of;
#[doc = concat!("assert_eq!(size_of::<Option<core::num::", stringify!($Ty), ">>(), size_of::<", stringify!($Int), ">());")]
/// ```
///
@ -516,7 +514,6 @@ macro_rules! nonzero_integer {
/// are guaranteed to have the same size and alignment:
///
/// ```
/// # use std::mem::{size_of, align_of};
#[doc = concat!("use std::num::", stringify!($Ty), ";")]
///
#[doc = concat!("assert_eq!(size_of::<", stringify!($Ty), ">(), size_of::<Option<", stringify!($Ty), ">>());")]

View File

@ -2586,7 +2586,7 @@ macro_rules! uint_impl {
without modifying the original"]
#[inline]
pub const fn abs_diff(self, other: Self) -> Self {
if mem::size_of::<Self>() == 1 {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).abs() as Self
@ -3465,7 +3465,7 @@ macro_rules! uint_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn to_be_bytes(self) -> [u8; mem::size_of::<Self>()] {
pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
@ -3485,7 +3485,7 @@ macro_rules! uint_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn to_le_bytes(self) -> [u8; mem::size_of::<Self>()] {
pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
@ -3521,7 +3521,7 @@ macro_rules! uint_impl {
// SAFETY: const sound because integers are plain old datatypes so we can always
// transmute them to arrays of bytes
#[inline]
pub const fn to_ne_bytes(self) -> [u8; mem::size_of::<Self>()] {
pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
@ -3543,7 +3543,7 @@ macro_rules! uint_impl {
///
/// ```
#[doc = concat!("fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
/// *input = rest;
#[doc = concat!(" ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap())")]
/// }
@ -3552,7 +3552,7 @@ macro_rules! uint_impl {
#[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
#[must_use]
#[inline]
pub const fn from_be_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
@ -3572,7 +3572,7 @@ macro_rules! uint_impl {
///
/// ```
#[doc = concat!("fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
/// *input = rest;
#[doc = concat!(" ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap())")]
/// }
@ -3581,7 +3581,7 @@ macro_rules! uint_impl {
#[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
#[must_use]
#[inline]
pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
@ -3612,7 +3612,7 @@ macro_rules! uint_impl {
///
/// ```
#[doc = concat!("fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
#[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
/// *input = rest;
#[doc = concat!(" ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap())")]
/// }
@ -3623,7 +3623,7 @@ macro_rules! uint_impl {
// SAFETY: const sound because integers are plain old datatypes so we can always
// transmute to them
#[inline]
pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}

View File

@ -398,12 +398,12 @@ mod prim_never {}
/// let v = vec!['h', 'e', 'l', 'l', 'o'];
///
/// // five elements times four bytes for each element
/// assert_eq!(20, v.len() * std::mem::size_of::<char>());
/// assert_eq!(20, v.len() * size_of::<char>());
///
/// let s = String::from("hello");
///
/// // five elements times one byte per element
/// assert_eq!(5, s.len() * std::mem::size_of::<u8>());
/// assert_eq!(5, s.len() * size_of::<u8>());
/// ```
///
/// [`String`]: ../std/string/struct.String.html
@ -443,8 +443,8 @@ mod prim_never {}
/// let s = String::from("love: ❤️");
/// let v: Vec<char> = s.chars().collect();
///
/// assert_eq!(12, std::mem::size_of_val(&s[..]));
/// assert_eq!(32, std::mem::size_of_val(&v[..]));
/// assert_eq!(12, size_of_val(&s[..]));
/// assert_eq!(32, size_of_val(&v[..]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_char {}
@ -594,10 +594,8 @@ impl () {}
/// #[allow(unused_extern_crates)]
/// extern crate libc;
///
/// use std::mem;
///
/// unsafe {
/// let my_num: *mut i32 = libc::malloc(mem::size_of::<i32>()) as *mut i32;
/// let my_num: *mut i32 = libc::malloc(size_of::<i32>()) as *mut i32;
/// if my_num.is_null() {
/// panic!("failed to allocate memory");
/// }
@ -893,11 +891,11 @@ mod prim_array {}
///
/// ```
/// # use std::rc::Rc;
/// let pointer_size = std::mem::size_of::<&u8>();
/// assert_eq!(2 * pointer_size, std::mem::size_of::<&[u8]>());
/// assert_eq!(2 * pointer_size, std::mem::size_of::<*const [u8]>());
/// assert_eq!(2 * pointer_size, std::mem::size_of::<Box<[u8]>>());
/// assert_eq!(2 * pointer_size, std::mem::size_of::<Rc<[u8]>>());
/// let pointer_size = size_of::<&u8>();
/// assert_eq!(2 * pointer_size, size_of::<&[u8]>());
/// assert_eq!(2 * pointer_size, size_of::<*const [u8]>());
/// assert_eq!(2 * pointer_size, size_of::<Box<[u8]>>());
/// assert_eq!(2 * pointer_size, size_of::<Rc<[u8]>>());
/// ```
///
/// ## Trait Implementations
@ -1692,15 +1690,13 @@ mod prim_ref {}
/// This zero-sized type *coerces* to a regular function pointer. For example:
///
/// ```rust
/// use std::mem;
///
/// fn bar(x: i32) {}
///
/// let not_bar_ptr = bar; // `not_bar_ptr` is zero-sized, uniquely identifying `bar`
/// assert_eq!(mem::size_of_val(&not_bar_ptr), 0);
/// assert_eq!(size_of_val(&not_bar_ptr), 0);
///
/// let bar_ptr: fn(i32) = not_bar_ptr; // force coercion to function pointer
/// assert_eq!(mem::size_of_val(&bar_ptr), mem::size_of::<usize>());
/// assert_eq!(size_of_val(&bar_ptr), size_of::<usize>());
///
/// let footgun = &bar; // this is a shared reference to the zero-sized type identifying `bar`
/// ```

View File

@ -13,8 +13,8 @@ use crate::{cmp, fmt, hash, mem, num};
pub struct Alignment(AlignmentEnum);
// Alignment is `repr(usize)`, but via extra steps.
const _: () = assert!(mem::size_of::<Alignment>() == mem::size_of::<usize>());
const _: () = assert!(mem::align_of::<Alignment>() == mem::align_of::<usize>());
const _: () = assert!(size_of::<Alignment>() == size_of::<usize>());
const _: () = assert!(align_of::<Alignment>() == align_of::<usize>());
fn _alignment_can_be_structurally_matched(a: Alignment) -> bool {
matches!(a, Alignment::MIN)
@ -38,14 +38,14 @@ impl Alignment {
/// Returns the alignment for a type.
///
/// This provides the same numerical value as [`mem::align_of`],
/// This provides the same numerical value as [`align_of`],
/// but in an `Alignment` instead of a `usize`.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
#[must_use]
pub const fn of<T>() -> Self {
// This can't actually panic since type alignment is always a power of two.
const { Alignment::new(mem::align_of::<T>()).unwrap() }
const { Alignment::new(align_of::<T>()).unwrap() }
}
/// Creates an `Alignment` from a `usize`, or returns `None` if it's

View File

@ -1,7 +1,7 @@
use super::*;
use crate::cmp::Ordering::{Equal, Greater, Less};
use crate::intrinsics::const_eval_select;
use crate::mem::SizedTypeProperties;
use crate::mem::{self, SizedTypeProperties};
use crate::slice::{self, SliceIndex};
impl<T: ?Sized> *const T {
@ -595,9 +595,9 @@ impl<T: ?Sized> *const T {
}
/// Calculates the distance between two pointers within the same allocation. The returned value is in
/// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
/// units of T: the distance in bytes divided by `size_of::<T>()`.
///
/// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
/// This is equivalent to `(self as isize - origin as isize) / (size_of::<T>() as isize)`,
/// except that it has a lot more opportunities for UB, in exchange for the compiler
/// better understanding what you are doing.
///
@ -633,7 +633,7 @@ impl<T: ?Sized> *const T {
/// objects is not known at compile-time. However, the requirement also exists at
/// runtime and may be exploited by optimizations. If you wish to compute the difference between
/// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
/// origin as isize) / mem::size_of::<T>()`.
/// origin as isize) / size_of::<T>()`.
// FIXME: recommend `addr()` instead of `as usize` once that is stable.
///
/// [`add`]: #method.add
@ -683,7 +683,7 @@ impl<T: ?Sized> *const T {
where
T: Sized,
{
let pointee_size = mem::size_of::<T>();
let pointee_size = size_of::<T>();
assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
// SAFETY: the caller must uphold the safety contract for `ptr_offset_from`.
unsafe { intrinsics::ptr_offset_from(self, origin) }
@ -709,7 +709,7 @@ impl<T: ?Sized> *const T {
/// Calculates the distance between two pointers within the same allocation, *where it's known that
/// `self` is equal to or greater than `origin`*. The returned value is in
/// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
/// units of T: the distance in bytes is divided by `size_of::<T>()`.
///
/// This computes the same value that [`offset_from`](#method.offset_from)
/// would compute, but with the added precondition that the offset is
@ -793,7 +793,7 @@ impl<T: ?Sized> *const T {
) => runtime_ptr_ge(this, origin)
);
let pointee_size = mem::size_of::<T>();
let pointee_size = size_of::<T>();
assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
// SAFETY: the caller must uphold the safety contract for `ptr_offset_from_unsigned`.
unsafe { intrinsics::ptr_offset_from_unsigned(self, origin) }
@ -1313,7 +1313,7 @@ impl<T: ?Sized> *const T {
unsafe { read_unaligned(self) }
}
/// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
/// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
/// and destination may overlap.
///
/// NOTE: this has the *same* argument order as [`ptr::copy`].
@ -1333,7 +1333,7 @@ impl<T: ?Sized> *const T {
unsafe { copy(self, dest, count) }
}
/// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
/// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
/// and destination may *not* overlap.
///
/// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
@ -1375,8 +1375,6 @@ impl<T: ?Sized> *const T {
/// Accessing adjacent `u8` as `u16`
///
/// ```
/// use std::mem::align_of;
///
/// # unsafe {
/// let x = [5_u8, 6, 7, 8, 9];
/// let ptr = x.as_ptr();
@ -1436,7 +1434,7 @@ impl<T: ?Sized> *const T {
where
T: Sized,
{
self.is_aligned_to(mem::align_of::<T>())
self.is_aligned_to(align_of::<T>())
}
/// Returns whether the pointer is aligned to `align`.
@ -1595,7 +1593,7 @@ impl<T> *const [T] {
/// When calling this method, you have to ensure that *either* the pointer is null *or*
/// all of the following is true:
///
/// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
/// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single [allocated object]!
@ -1607,7 +1605,7 @@ impl<T> *const [T] {
/// them from other data. You can obtain a pointer that is usable as `data`
/// for zero-length slices using [`NonNull::dangling()`].
///
/// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// * The total size `ptr.len() * size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
///
/// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is

View File

@ -74,7 +74,7 @@ pub trait Pointee {
/// #![feature(ptr_metadata)]
///
/// fn this_never_panics<T: std::ptr::Thin>() {
/// assert_eq!(std::mem::size_of::<&T>(), std::mem::size_of::<usize>())
/// assert_eq!(size_of::<&T>(), size_of::<usize>())
/// }
/// ```
#[unstable(feature = "ptr_metadata", issue = "81513")]

View File

@ -48,7 +48,7 @@
//!
//! Valid raw pointers as defined above are not necessarily properly aligned (where
//! "proper" alignment is defined by the pointee type, i.e., `*const T` must be
//! aligned to `mem::align_of::<T>()`). However, most functions require their
//! aligned to `align_of::<T>()`). However, most functions require their
//! arguments to be properly aligned, and will explicitly state
//! this requirement in their documentation. Notable exceptions to this are
//! [`read_unaligned`] and [`write_unaligned`].
@ -297,7 +297,7 @@
//!
//! // Our value, which must have enough alignment to have spare least-significant-bits.
//! let my_precious_data: u32 = 17;
//! assert!(core::mem::align_of::<u32>() > 1);
//! assert!(align_of::<u32>() > 1);
//!
//! // Create a tagged pointer
//! let ptr = &my_precious_data as *const u32;
@ -1098,12 +1098,12 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
} else {
macro_rules! attempt_swap_as_chunks {
($ChunkTy:ty) => {
if mem::align_of::<T>() >= mem::align_of::<$ChunkTy>()
&& mem::size_of::<T>() % mem::size_of::<$ChunkTy>() == 0
if align_of::<T>() >= align_of::<$ChunkTy>()
&& size_of::<T>() % size_of::<$ChunkTy>() == 0
{
let x: *mut $ChunkTy = x.cast();
let y: *mut $ChunkTy = y.cast();
let count = count * (mem::size_of::<T>() / mem::size_of::<$ChunkTy>());
let count = count * (size_of::<T>() / size_of::<$ChunkTy>());
// SAFETY: these are the same bytes that the caller promised were
// ok, just typed as `MaybeUninit<ChunkTy>`s instead of as `T`s.
// The `if` condition above ensures that we're not violating
@ -1117,9 +1117,9 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
// Split up the slice into small power-of-two-sized chunks that LLVM is able
// to vectorize (unless it's a special type with more-than-pointer alignment,
// because we don't want to pessimize things like slices of SIMD vectors.)
if mem::align_of::<T>() <= mem::size_of::<usize>()
&& (!mem::size_of::<T>().is_power_of_two()
|| mem::size_of::<T>() > mem::size_of::<usize>() * 2)
if align_of::<T>() <= size_of::<usize>()
&& (!size_of::<T>().is_power_of_two()
|| size_of::<T>() > size_of::<usize>() * 2)
{
attempt_swap_as_chunks!(usize);
attempt_swap_as_chunks!(u8);
@ -1443,10 +1443,8 @@ pub const unsafe fn read<T>(src: *const T) -> T {
/// Read a `usize` value from a byte buffer:
///
/// ```
/// use std::mem;
///
/// fn read_usize(x: &[u8]) -> usize {
/// assert!(x.len() >= mem::size_of::<usize>());
/// assert!(x.len() >= size_of::<usize>());
///
/// let ptr = x.as_ptr() as *const usize;
///
@ -1467,7 +1465,7 @@ pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
// Also, since we just wrote a valid value into `tmp`, it is guaranteed
// to be properly initialized.
unsafe {
copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>());
copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, size_of::<T>());
tmp.assume_init()
}
}
@ -1647,10 +1645,8 @@ pub const unsafe fn write<T>(dst: *mut T, src: T) {
/// Write a `usize` value to a byte buffer:
///
/// ```
/// use std::mem;
///
/// fn write_usize(x: &mut [u8], val: usize) {
/// assert!(x.len() >= mem::size_of::<usize>());
/// assert!(x.len() >= size_of::<usize>());
///
/// let ptr = x.as_mut_ptr() as *mut usize;
///
@ -1667,7 +1663,7 @@ pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
// `dst` cannot overlap `src` because the caller has mutable access
// to `dst` while `src` is owned by this function.
unsafe {
copy_nonoverlapping((&raw const src) as *const u8, dst as *mut u8, mem::size_of::<T>());
copy_nonoverlapping((&raw const src) as *const u8, dst as *mut u8, size_of::<T>());
// We are calling the intrinsic directly to avoid function calls in the generated code.
intrinsics::forget(src);
}
@ -1911,7 +1907,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
inverse & m_minus_one
}
let stride = mem::size_of::<T>();
let stride = size_of::<T>();
let addr: usize = p.addr();

View File

@ -1,7 +1,7 @@
use super::*;
use crate::cmp::Ordering::{Equal, Greater, Less};
use crate::intrinsics::const_eval_select;
use crate::mem::SizedTypeProperties;
use crate::mem::{self, SizedTypeProperties};
use crate::slice::{self, SliceIndex};
impl<T: ?Sized> *mut T {
@ -769,9 +769,9 @@ impl<T: ?Sized> *mut T {
}
/// Calculates the distance between two pointers within the same allocation. The returned value is in
/// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
/// units of T: the distance in bytes divided by `size_of::<T>()`.
///
/// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
/// This is equivalent to `(self as isize - origin as isize) / (size_of::<T>() as isize)`,
/// except that it has a lot more opportunities for UB, in exchange for the compiler
/// better understanding what you are doing.
///
@ -807,7 +807,7 @@ impl<T: ?Sized> *mut T {
/// objects is not known at compile-time. However, the requirement also exists at
/// runtime and may be exploited by optimizations. If you wish to compute the difference between
/// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
/// origin as isize) / mem::size_of::<T>()`.
/// origin as isize) / size_of::<T>()`.
// FIXME: recommend `addr()` instead of `as usize` once that is stable.
///
/// [`add`]: #method.add
@ -881,7 +881,7 @@ impl<T: ?Sized> *mut T {
/// Calculates the distance between two pointers within the same allocation, *where it's known that
/// `self` is equal to or greater than `origin`*. The returned value is in
/// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
/// units of T: the distance in bytes is divided by `size_of::<T>()`.
///
/// This computes the same value that [`offset_from`](#method.offset_from)
/// would compute, but with the added precondition that the offset is
@ -1397,7 +1397,7 @@ impl<T: ?Sized> *mut T {
unsafe { read_unaligned(self) }
}
/// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
/// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
/// and destination may overlap.
///
/// NOTE: this has the *same* argument order as [`ptr::copy`].
@ -1417,7 +1417,7 @@ impl<T: ?Sized> *mut T {
unsafe { copy(self, dest, count) }
}
/// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
/// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
/// and destination may *not* overlap.
///
/// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
@ -1437,7 +1437,7 @@ impl<T: ?Sized> *mut T {
unsafe { copy_nonoverlapping(self, dest, count) }
}
/// Copies `count * size_of<T>` bytes from `src` to `self`. The source
/// Copies `count * size_of::<T>()` bytes from `src` to `self`. The source
/// and destination may overlap.
///
/// NOTE: this has the *opposite* argument order of [`ptr::copy`].
@ -1457,7 +1457,7 @@ impl<T: ?Sized> *mut T {
unsafe { copy(src, self, count) }
}
/// Copies `count * size_of<T>` bytes from `src` to `self`. The source
/// Copies `count * size_of::<T>()` bytes from `src` to `self`. The source
/// and destination may *not* overlap.
///
/// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
@ -1623,8 +1623,6 @@ impl<T: ?Sized> *mut T {
/// Accessing adjacent `u8` as `u16`
///
/// ```
/// use std::mem::align_of;
///
/// # unsafe {
/// let mut x = [5_u8, 6, 7, 8, 9];
/// let ptr = x.as_mut_ptr();
@ -1689,7 +1687,7 @@ impl<T: ?Sized> *mut T {
where
T: Sized,
{
self.is_aligned_to(mem::align_of::<T>())
self.is_aligned_to(align_of::<T>())
}
/// Returns whether the pointer is aligned to `align`.
@ -1950,7 +1948,7 @@ impl<T> *mut [T] {
/// When calling this method, you have to ensure that *either* the pointer is null *or*
/// all of the following is true:
///
/// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
/// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single [allocated object]!
@ -1962,7 +1960,7 @@ impl<T> *mut [T] {
/// them from other data. You can obtain a pointer that is usable as `data`
/// for zero-length slices using [`NonNull::dangling()`].
///
/// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// * The total size `ptr.len() * size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
///
/// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
@ -2008,7 +2006,7 @@ impl<T> *mut [T] {
/// When calling this method, you have to ensure that *either* the pointer is null *or*
/// all of the following is true:
///
/// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::<T>()`
/// * The pointer must be [valid] for reads and writes for `ptr.len() * size_of::<T>()`
/// many bytes, and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single [allocated object]!
@ -2020,7 +2018,7 @@ impl<T> *mut [T] {
/// them from other data. You can obtain a pointer that is usable as `data`
/// for zero-length slices using [`NonNull::dangling()`].
///
/// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// * The total size `ptr.len() * size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
///
/// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is

View File

@ -49,7 +49,6 @@ use crate::{fmt, hash, intrinsics, mem, ptr};
/// are guaranteed to have the same size and alignment:
///
/// ```
/// # use std::mem::{size_of, align_of};
/// use std::ptr::NonNull;
///
/// assert_eq!(size_of::<NonNull<i16>>(), size_of::<Option<NonNull<i16>>>());
@ -724,9 +723,9 @@ impl<T: ?Sized> NonNull<T> {
}
/// Calculates the distance between two pointers within the same allocation. The returned value is in
/// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
/// units of T: the distance in bytes divided by `size_of::<T>()`.
///
/// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
/// This is equivalent to `(self as isize - origin as isize) / (size_of::<T>() as isize)`,
/// except that it has a lot more opportunities for UB, in exchange for the compiler
/// better understanding what you are doing.
///
@ -762,7 +761,7 @@ impl<T: ?Sized> NonNull<T> {
/// objects is not known at compile-time. However, the requirement also exists at
/// runtime and may be exploited by optimizations. If you wish to compute the difference between
/// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
/// origin as isize) / mem::size_of::<T>()`.
/// origin as isize) / size_of::<T>()`.
// FIXME: recommend `addr()` instead of `as usize` once that is stable.
///
/// [`add`]: #method.add
@ -842,7 +841,7 @@ impl<T: ?Sized> NonNull<T> {
/// Calculates the distance between two pointers within the same allocation, *where it's known that
/// `self` is equal to or greater than `origin`*. The returned value is in
/// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
/// units of T: the distance in bytes is divided by `size_of::<T>()`.
///
/// This computes the same value that [`offset_from`](#method.offset_from)
/// would compute, but with the added precondition that the offset is
@ -989,7 +988,7 @@ impl<T: ?Sized> NonNull<T> {
unsafe { ptr::read_unaligned(self.as_ptr()) }
}
/// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
/// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
/// and destination may overlap.
///
/// NOTE: this has the *same* argument order as [`ptr::copy`].
@ -1009,7 +1008,7 @@ impl<T: ?Sized> NonNull<T> {
unsafe { ptr::copy(self.as_ptr(), dest.as_ptr(), count) }
}
/// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
/// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
/// and destination may *not* overlap.
///
/// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
@ -1029,7 +1028,7 @@ impl<T: ?Sized> NonNull<T> {
unsafe { ptr::copy_nonoverlapping(self.as_ptr(), dest.as_ptr(), count) }
}
/// Copies `count * size_of<T>` bytes from `src` to `self`. The source
/// Copies `count * size_of::<T>()` bytes from `src` to `self`. The source
/// and destination may overlap.
///
/// NOTE: this has the *opposite* argument order of [`ptr::copy`].
@ -1049,7 +1048,7 @@ impl<T: ?Sized> NonNull<T> {
unsafe { ptr::copy(src.as_ptr(), self.as_ptr(), count) }
}
/// Copies `count * size_of<T>` bytes from `src` to `self`. The source
/// Copies `count * size_of::<T>()` bytes from `src` to `self`. The source
/// and destination may *not* overlap.
///
/// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
@ -1223,7 +1222,6 @@ impl<T: ?Sized> NonNull<T> {
/// Accessing adjacent `u8` as `u16`
///
/// ```
/// use std::mem::align_of;
/// use std::ptr::NonNull;
///
/// # unsafe {
@ -1443,7 +1441,7 @@ impl<T> NonNull<[T]> {
///
/// When calling this method, you have to ensure that all of the following is true:
///
/// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
/// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
@ -1455,7 +1453,7 @@ impl<T> NonNull<[T]> {
/// them from other data. You can obtain a pointer that is usable as `data`
/// for zero-length slices using [`NonNull::dangling()`].
///
/// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// * The total size `ptr.len() * size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
///
/// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
@ -1488,7 +1486,7 @@ impl<T> NonNull<[T]> {
///
/// When calling this method, you have to ensure that all of the following is true:
///
/// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::<T>()`
/// * The pointer must be [valid] for reads and writes for `ptr.len() * size_of::<T>()`
/// many bytes, and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
@ -1500,7 +1498,7 @@ impl<T> NonNull<[T]> {
/// them from other data. You can obtain a pointer that is usable as `data`
/// for zero-length slices using [`NonNull::dangling()`].
///
/// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// * The total size `ptr.len() * size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
///
/// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is

View File

@ -1,10 +1,10 @@
//! Comparison traits for `[T]`.
use super::{from_raw_parts, memchr};
use crate::ascii;
use crate::cmp::{self, BytewiseEq, Ordering};
use crate::intrinsics::compare_bytes;
use crate::num::NonZero;
use crate::{ascii, mem};
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, U> PartialEq<[U]> for [T]
@ -87,7 +87,7 @@ where
// SAFETY: `self` and `other` are references and are thus guaranteed to be valid.
// The two slices have been checked to have the same size above.
unsafe {
let size = mem::size_of_val(self);
let size = size_of_val(self);
compare_bytes(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
}
}
@ -266,7 +266,7 @@ macro_rules! impl_slice_contains {
fn slice_contains(&self, arr: &[$t]) -> bool {
// Make our LANE_COUNT 4x the normal lane count (aiming for 128 bit vectors).
// The compiler will nicely unroll it.
const LANE_COUNT: usize = 4 * (128 / (mem::size_of::<$t>() * 8));
const LANE_COUNT: usize = 4 * (128 / (size_of::<$t>() * 8));
// SIMD
let mut chunks = arr.chunks_exact(LANE_COUNT);
for chunk in &mut chunks {

View File

@ -2,11 +2,10 @@
// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
use crate::intrinsics::const_eval_select;
use crate::mem;
const LO_USIZE: usize = usize::repeat_u8(0x01);
const HI_USIZE: usize = usize::repeat_u8(0x80);
const USIZE_BYTES: usize = mem::size_of::<usize>();
const USIZE_BYTES: usize = size_of::<usize>();
/// Returns `true` if `x` contains any zero byte.
///
@ -138,7 +137,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
// offset is always aligned, so just testing `>` is sufficient and avoids possible
// overflow.
let repeated_x = usize::repeat_u8(x);
let chunk_bytes = mem::size_of::<Chunk>();
let chunk_bytes = size_of::<Chunk>();
while offset > min_aligned_offset {
// SAFETY: offset starts at len - suffix.len(), as long as it is greater than

View File

@ -3893,9 +3893,9 @@ impl<T> [T] {
// Explicitly wrap the function call in a const block so it gets
// constant-evaluated even in debug mode.
let gcd: usize = const { gcd(mem::size_of::<T>(), mem::size_of::<U>()) };
let ts: usize = mem::size_of::<U>() / gcd;
let us: usize = mem::size_of::<T>() / gcd;
let gcd: usize = const { gcd(size_of::<T>(), size_of::<U>()) };
let ts: usize = size_of::<U>() / gcd;
let us: usize = size_of::<T>() / gcd;
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
@ -3945,7 +3945,7 @@ impl<T> [T] {
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: See the `align_to_mut` method for the detailed safety comment.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
let offset = unsafe { crate::ptr::align_offset(ptr, align_of::<U>()) };
if offset > self.len() {
(self, &[], &[])
} else {
@ -3955,7 +3955,7 @@ impl<T> [T] {
#[cfg(miri)]
crate::intrinsics::miri_promise_symbolic_alignment(
rest.as_ptr().cast(),
mem::align_of::<U>(),
align_of::<U>(),
);
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
@ -4016,7 +4016,7 @@ impl<T> [T] {
// valid pointer `ptr` (it comes from a reference to `self`) and with
// a size that is a power of two (since it comes from the alignment for U),
// satisfying its safety constraints.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
let offset = unsafe { crate::ptr::align_offset(ptr, align_of::<U>()) };
if offset > self.len() {
(self, &mut [], &mut [])
} else {
@ -4028,7 +4028,7 @@ impl<T> [T] {
#[cfg(miri)]
crate::intrinsics::miri_promise_symbolic_alignment(
mut_ptr.cast() as *const (),
mem::align_of::<U>(),
align_of::<U>(),
);
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
@ -4099,7 +4099,7 @@ impl<T> [T] {
// These are expected to always match, as vector types are laid out like
// arrays per <https://llvm.org/docs/LangRef.html#vector-type>, but we
// might as well double-check since it'll optimize away anyhow.
assert_eq!(mem::size_of::<Simd<T, LANES>>(), mem::size_of::<[T; LANES]>());
assert_eq!(size_of::<Simd<T, LANES>>(), size_of::<[T; LANES]>());
// SAFETY: The simd types have the same layout as arrays, just with
// potentially-higher alignment, so the de-facto transmutes are sound.
@ -4135,7 +4135,7 @@ impl<T> [T] {
// These are expected to always match, as vector types are laid out like
// arrays per <https://llvm.org/docs/LangRef.html#vector-type>, but we
// might as well double-check since it'll optimize away anyhow.
assert_eq!(mem::size_of::<Simd<T, LANES>>(), mem::size_of::<[T; LANES]>());
assert_eq!(size_of::<Simd<T, LANES>>(), size_of::<[T; LANES]>());
// SAFETY: The simd types have the same layout as arrays, just with
// potentially-higher alignment, so the de-facto transmutes are sound.
@ -4700,11 +4700,11 @@ impl<T> [T] {
let byte_offset = elem_start.wrapping_sub(self_start);
if byte_offset % mem::size_of::<T>() != 0 {
if byte_offset % size_of::<T>() != 0 {
return None;
}
let offset = byte_offset / mem::size_of::<T>();
let offset = byte_offset / size_of::<T>();
if offset < self.len() { Some(offset) } else { None }
}
@ -4754,11 +4754,11 @@ impl<T> [T] {
let byte_start = subslice_start.wrapping_sub(self_start);
if byte_start % core::mem::size_of::<T>() != 0 {
if byte_start % size_of::<T>() != 0 {
return None;
}
let start = byte_start / core::mem::size_of::<T>();
let start = byte_start / size_of::<T>();
let end = start.wrapping_add(subslice.len());
if start <= self.len() && end <= self.len() { Some(start..end) } else { None }

View File

@ -11,7 +11,7 @@ use crate::{array, ptr, ub_checks};
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// * `data` must be non-null, [valid] for reads for `len * mem::size_of::<T>()` many bytes,
/// * `data` must be non-null, [valid] for reads for `len * size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
@ -28,7 +28,7 @@ use crate::{array, ptr, ub_checks};
/// * The memory referenced by the returned slice must not be mutated for the duration
/// of lifetime `'a`, except inside an `UnsafeCell`.
///
/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`,
/// * The total size `len * size_of::<T>()` of the slice must be no larger than `isize::MAX`,
/// and adding that size to `data` must not "wrap around" the address space.
/// See the safety documentation of [`pointer::offset`].
///
@ -146,7 +146,7 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T]
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// * `data` must be non-null, [valid] for both reads and writes for `len * mem::size_of::<T>()` many bytes,
/// * `data` must be non-null, [valid] for both reads and writes for `len * size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
@ -163,7 +163,7 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T]
/// (not derived from the return value) for the duration of lifetime `'a`.
/// Both read and write accesses are forbidden.
///
/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`,
/// * The total size `len * size_of::<T>()` of the slice must be no larger than `isize::MAX`,
/// and adding that size to `data` must not "wrap around" the address space.
/// See the safety documentation of [`pointer::offset`].
///

View File

@ -1,4 +1,4 @@
use crate::mem::{self, MaybeUninit, SizedTypeProperties};
use crate::mem::{MaybeUninit, SizedTypeProperties};
use crate::{cmp, ptr};
type BufType = [usize; 32];
@ -21,12 +21,12 @@ pub(super) unsafe fn ptr_rotate<T>(left: usize, mid: *mut T, right: usize) {
}
// `T` is not a zero-sized type, so it's okay to divide by its size.
if !cfg!(feature = "optimize_for_size")
&& cmp::min(left, right) <= mem::size_of::<BufType>() / mem::size_of::<T>()
&& cmp::min(left, right) <= size_of::<BufType>() / size_of::<T>()
{
// SAFETY: guaranteed by the caller
unsafe { ptr_rotate_memmove(left, mid, right) };
} else if !cfg!(feature = "optimize_for_size")
&& ((left + right < 24) || (mem::size_of::<T>() > mem::size_of::<[usize; 4]>()))
&& ((left + right < 24) || (size_of::<T>() > size_of::<[usize; 4]>()))
{
// SAFETY: guaranteed by the caller
unsafe { ptr_rotate_gcd(left, mid, right) }

View File

@ -113,7 +113,7 @@ pub(crate) trait UnstableSmallSortFreezeTypeImpl: Sized + FreezeMarker {
impl<T: FreezeMarker> UnstableSmallSortFreezeTypeImpl for T {
#[inline(always)]
default fn small_sort_threshold() -> usize {
if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
SMALL_SORT_GENERAL_THRESHOLD
} else {
SMALL_SORT_FALLBACK_THRESHOLD
@ -125,7 +125,7 @@ impl<T: FreezeMarker> UnstableSmallSortFreezeTypeImpl for T {
where
F: FnMut(&T, &T) -> bool,
{
if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
small_sort_general(v, is_less);
} else {
small_sort_fallback(v, is_less);
@ -143,10 +143,10 @@ impl<T: FreezeMarker + CopyMarker> UnstableSmallSortFreezeTypeImpl for T {
#[inline(always)]
fn small_sort_threshold() -> usize {
if has_efficient_in_place_swap::<T>()
&& (mem::size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
&& (size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
{
SMALL_SORT_NETWORK_THRESHOLD
} else if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
} else if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
SMALL_SORT_GENERAL_THRESHOLD
} else {
SMALL_SORT_FALLBACK_THRESHOLD
@ -159,10 +159,10 @@ impl<T: FreezeMarker + CopyMarker> UnstableSmallSortFreezeTypeImpl for T {
F: FnMut(&T, &T) -> bool,
{
if has_efficient_in_place_swap::<T>()
&& (mem::size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
&& (size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
{
small_sort_network(v, is_less);
} else if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
} else if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
small_sort_general(v, is_less);
} else {
small_sort_fallback(v, is_less);
@ -238,7 +238,7 @@ fn small_sort_general_with_scratch<T: FreezeMarker, F: FnMut(&T, &T) -> bool>(
unsafe {
let scratch_base = scratch.as_mut_ptr() as *mut T;
let presorted_len = if const { mem::size_of::<T>() <= 16 } && len >= 16 {
let presorted_len = if const { size_of::<T>() <= 16 } && len >= 16 {
// SAFETY: scratch_base is valid and has enough space.
sort8_stable(v_base, scratch_base, scratch_base.add(len), is_less);
sort8_stable(
@ -863,5 +863,5 @@ fn panic_on_ord_violation() -> ! {
#[must_use]
pub(crate) const fn has_efficient_in_place_swap<T>() -> bool {
// Heuristic that holds true on all tested 64-bit capable architectures.
mem::size_of::<T>() <= 8 // mem::size_of::<u64>()
size_of::<T>() <= 8 // size_of::<u64>()
}

View File

@ -3,7 +3,7 @@
#[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
use crate::cmp;
use crate::intrinsics;
use crate::mem::{self, MaybeUninit, SizedTypeProperties};
use crate::mem::{MaybeUninit, SizedTypeProperties};
#[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
use crate::slice::sort::shared::smallsort::{
SMALL_SORT_GENERAL_SCRATCH_LEN, StableSmallSortTypeImpl, insertion_sort_shift_left,
@ -107,7 +107,7 @@ fn driftsort_main<T, F: FnMut(&T, &T) -> bool, BufT: BufGuard<T>>(v: &mut [T], i
// If min_good_run_len is ever modified, this code must be updated to allocate
// the correct scratch size for it.
const MAX_FULL_ALLOC_BYTES: usize = 8_000_000; // 8MB
let max_full_alloc = MAX_FULL_ALLOC_BYTES / mem::size_of::<T>();
let max_full_alloc = MAX_FULL_ALLOC_BYTES / size_of::<T>();
let len = v.len();
let alloc_len = cmp::max(
cmp::max(len - len / 2, cmp::min(len, max_full_alloc)),
@ -155,7 +155,7 @@ impl<T, const N: usize> AlignedStorage<T, N> {
}
fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit<T>] {
let len = N / mem::size_of::<T>();
let len = N / size_of::<T>();
// SAFETY: `_align` ensures we are correctly aligned.
unsafe { core::slice::from_raw_parts_mut(self.storage.as_mut_ptr().cast(), len) }

View File

@ -1,6 +1,6 @@
//! This module contains a stable quicksort and partition implementation.
use crate::mem::{self, ManuallyDrop, MaybeUninit};
use crate::mem::{ManuallyDrop, MaybeUninit};
use crate::slice::sort::shared::FreezeMarker;
use crate::slice::sort::shared::pivot::choose_pivot;
use crate::slice::sort::shared::smallsort::StableSmallSortTypeImpl;
@ -126,7 +126,7 @@ fn stable_partition<T, F: FnMut(&T, &T) -> bool>(
// this gave significant performance boosts in benchmarks. Unrolling
// through for _ in 0..UNROLL_LEN { .. } instead of manually improves
// compile times but has a ~10-20% performance penalty on opt-level=s.
if const { mem::size_of::<T>() <= 16 } {
if const { size_of::<T>() <= 16 } {
const UNROLL_LEN: usize = 4;
let unroll_end = v_base.add(loop_end_pos.saturating_sub(UNROLL_LEN - 1));
while state.scan < unroll_end {

View File

@ -1,6 +1,8 @@
//! This module contains an unstable quicksort and two partition implementations.
use crate::mem::{self, ManuallyDrop};
#[cfg(not(feature = "optimize_for_size"))]
use crate::mem;
use crate::mem::ManuallyDrop;
#[cfg(not(feature = "optimize_for_size"))]
use crate::slice::sort::shared::pivot::choose_pivot;
#[cfg(not(feature = "optimize_for_size"))]
@ -137,7 +139,7 @@ where
const fn inst_partition<T, F: FnMut(&T, &T) -> bool>() -> fn(&mut [T], &T, &mut F) -> usize {
const MAX_BRANCHLESS_PARTITION_SIZE: usize = 96;
if mem::size_of::<T>() <= MAX_BRANCHLESS_PARTITION_SIZE {
if size_of::<T>() <= MAX_BRANCHLESS_PARTITION_SIZE {
// Specialize for types that are relatively cheap to copy, where branchless optimizations
// have large leverage e.g. `u64` and `String`.
cfg_if! {
@ -304,7 +306,7 @@ where
// Manual unrolling that works well on x86, Arm and with opt-level=s without murdering
// compile-times. Leaving this to the compiler yields ok to bad results.
let unroll_len = const { if mem::size_of::<T>() <= 16 { 2 } else { 1 } };
let unroll_len = const { if size_of::<T>() <= 16 { 2 } else { 1 } };
let unroll_end = v_base.add(len - (unroll_len - 1));
while state.right < unroll_end {

View File

@ -20,7 +20,7 @@
use core::intrinsics::unlikely;
const USIZE_SIZE: usize = core::mem::size_of::<usize>();
const USIZE_SIZE: usize = size_of::<usize>();
const UNROLL_INNER: usize = 4;
#[inline]

View File

@ -2,7 +2,6 @@
use super::Utf8Error;
use crate::intrinsics::const_eval_select;
use crate::mem;
/// Returns the initial codepoint accumulator for the first byte.
/// The first byte is special, only want bottom 5 bits for width 2, 4 bits
@ -128,7 +127,7 @@ pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
let mut index = 0;
let len = v.len();
const USIZE_BYTES: usize = mem::size_of::<usize>();
const USIZE_BYTES: usize = size_of::<usize>();
let ascii_block_size = 2 * USIZE_BYTES;
let blocks_end = if len >= ascii_block_size { len - ascii_block_size + 1 } else { 0 };

View File

@ -2033,7 +2033,7 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T {
self.fetch_byte_add(val.wrapping_mul(core::mem::size_of::<T>()), order)
self.fetch_byte_add(val.wrapping_mul(size_of::<T>()), order)
}
/// Offsets the pointer's address by subtracting `val` (in units of `T`),
@ -2078,7 +2078,7 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T {
self.fetch_byte_sub(val.wrapping_mul(core::mem::size_of::<T>()), order)
self.fetch_byte_sub(val.wrapping_mul(size_of::<T>()), order)
}
/// Offsets the pointer's address by adding `val` *bytes*, returning the

View File

@ -95,7 +95,7 @@ benches! {
// These are separate since it's easier to debug errors if they don't go through
// macro expansion first.
fn is_ascii_align_to(bytes: &[u8]) -> bool {
if bytes.len() < core::mem::size_of::<usize>() {
if bytes.len() < size_of::<usize>() {
return bytes.iter().all(|b| b.is_ascii());
}
// SAFETY: transmuting a sequence of `u8` to `usize` is always fine
@ -106,7 +106,7 @@ fn is_ascii_align_to(bytes: &[u8]) -> bool {
}
fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool {
if bytes.len() < core::mem::size_of::<usize>() {
if bytes.len() < size_of::<usize>() {
return bytes.iter().all(|b| b.is_ascii());
}
// SAFETY: transmuting a sequence of `u8` to `[usize; 2]` is always fine
@ -118,6 +118,6 @@ fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool {
#[inline]
fn contains_nonascii(v: usize) -> bool {
const NONASCII_MASK: usize = usize::from_ne_bytes([0x80; core::mem::size_of::<usize>()]);
const NONASCII_MASK: usize = usize::from_ne_bytes([0x80; size_of::<usize>()]);
(NONASCII_MASK & v) != 0
}

View File

@ -1,6 +1,5 @@
use core::borrow::Borrow;
use core::iter::*;
use core::mem;
use core::num::Wrapping;
use core::ops::Range;
@ -477,7 +476,7 @@ fn bench_next_chunk_copied(b: &mut Bencher) {
let mut iter = black_box(&v).iter().copied();
let mut acc = Wrapping(0);
// This uses a while-let loop to side-step the TRA specialization in ArrayChunks
while let Ok(chunk) = iter.next_chunk::<{ mem::size_of::<u64>() }>() {
while let Ok(chunk) = iter.next_chunk::<{ size_of::<u64>() }>() {
let d = u64::from_ne_bytes(chunk);
acc += Wrapping(d.rotate_left(7).wrapping_add(1));
}
@ -496,7 +495,7 @@ fn bench_next_chunk_trusted_random_access(b: &mut Bencher) {
.iter()
// this shows that we're not relying on the slice::Iter specialization in Copied
.map(|b| *b.borrow())
.array_chunks::<{ mem::size_of::<u64>() }>()
.array_chunks::<{ size_of::<u64>() }>()
.map(|ary| {
let d = u64::from_ne_bytes(ary);
Wrapping(d.rotate_left(7).wrapping_add(1))

View File

@ -1,5 +1,4 @@
use core::alloc::Layout;
use core::mem::size_of;
use core::ptr::{self, NonNull};
#[test]

View File

@ -250,8 +250,6 @@ fn atomic_access_bool() {
#[test]
fn atomic_alignment() {
use std::mem::{align_of, size_of};
#[cfg(target_has_atomic = "8")]
assert_eq!(align_of::<AtomicBool>(), size_of::<AtomicBool>());
#[cfg(target_has_atomic = "ptr")]

View File

@ -1,7 +1,7 @@
#![allow(deprecated)]
use core::hash::{Hash, Hasher, SipHasher, SipHasher13};
use core::{mem, slice};
use core::slice;
// Hash just the bytes of the slice, without length prefix
struct Bytes<'a>(&'a [u8]);
@ -314,7 +314,7 @@ fn test_write_short_works() {
h1.write_u8(0x01u8);
let mut h2 = SipHasher::new();
h2.write(unsafe {
slice::from_raw_parts(&test_usize as *const _ as *const u8, mem::size_of::<usize>())
slice::from_raw_parts(&test_usize as *const _ as *const u8, size_of::<usize>())
});
h2.write(b"bytes");
h2.write(b"string");

View File

@ -1,6 +1,5 @@
use core::num::{IntErrorKind, NonZero};
use core::option::Option::None;
use std::mem::size_of;
#[test]
fn test_create_nonzero_instance() {

View File

@ -1,6 +1,6 @@
use core::cell::RefCell;
use core::marker::Freeze;
use core::mem::{self, MaybeUninit};
use core::mem::MaybeUninit;
use core::num::NonZero;
use core::ptr;
use core::ptr::*;
@ -388,7 +388,7 @@ fn align_offset_various_strides() {
let mut expected = usize::MAX;
// Naive but definitely correct way to find the *first* aligned element of stride::<T>.
for el in 0..align {
if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
if (numptr + el * size_of::<T>()) % align == 0 {
expected = el;
break;
}
@ -398,7 +398,7 @@ fn align_offset_various_strides() {
eprintln!(
"aligning {:p} (with stride of {}) to {}, expected {}, got {}",
ptr,
::std::mem::size_of::<T>(),
size_of::<T>(),
align,
expected,
got
@ -605,9 +605,9 @@ fn dyn_metadata() {
let meta = metadata(trait_object);
assert_eq!(meta.size_of(), 64);
assert_eq!(meta.size_of(), std::mem::size_of::<Something>());
assert_eq!(meta.size_of(), size_of::<Something>());
assert_eq!(meta.align_of(), 32);
assert_eq!(meta.align_of(), std::mem::align_of::<Something>());
assert_eq!(meta.align_of(), align_of::<Something>());
assert_eq!(meta.layout(), std::alloc::Layout::new::<Something>());
assert!(format!("{meta:?}").starts_with("DynMetadata(0x"));
@ -781,7 +781,7 @@ fn nonnull_tagged_pointer_with_provenance() {
impl<T> TaggedPointer<T> {
/// The ABI-required minimum alignment of the `P` type.
pub const ALIGNMENT: usize = core::mem::align_of::<T>();
pub const ALIGNMENT: usize = align_of::<T>();
/// A mask for data-carrying bits of the address.
pub const DATA_MASK: usize = !Self::ADDRESS_MASK;
/// Number of available bits of storage in the address.
@ -865,7 +865,7 @@ fn test_const_copy_ptr() {
ptr::copy(
&ptr1 as *const _ as *const MaybeUninit<u8>,
&mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
mem::size_of::<&i32>(),
size_of::<&i32>(),
);
}
@ -883,7 +883,7 @@ fn test_const_copy_ptr() {
ptr::copy_nonoverlapping(
&ptr1 as *const _ as *const MaybeUninit<u8>,
&mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
mem::size_of::<&i32>(),
size_of::<&i32>(),
);
}
@ -928,7 +928,7 @@ fn test_const_swap_ptr() {
let mut s2 = A(S { ptr: &666, f1: 0, f2: [0; 3] });
// Swap ptr1 and ptr2, as an array.
type T = [u8; mem::size_of::<A>()];
type T = [u8; size_of::<A>()];
unsafe {
ptr::swap(ptr::from_mut(&mut s1).cast::<T>(), ptr::from_mut(&mut s2).cast::<T>());
}

View File

@ -2057,15 +2057,13 @@ fn test_align_to_non_trivial() {
#[test]
fn test_align_to_empty_mid() {
use core::mem;
// Make sure that we do not create empty unaligned slices for the mid part, even when the
// overall slice is too short to contain an aligned address.
let bytes = [1, 2, 3, 4, 5, 6, 7];
type Chunk = u32;
for offset in 0..4 {
let (_, mid, _) = unsafe { bytes[offset..offset + 1].align_to::<Chunk>() };
assert_eq!(mid.as_ptr() as usize % mem::align_of::<Chunk>(), 0);
assert_eq!(mid.as_ptr() as usize % align_of::<Chunk>(), 0);
}
}

View File

@ -9,7 +9,7 @@
use alloc::boxed::Box;
use core::any::Any;
use core::sync::atomic::{AtomicBool, Ordering};
use core::{intrinsics, mem, ptr};
use core::{intrinsics, ptr};
use unwind as uw;
@ -97,7 +97,7 @@ pub(crate) unsafe fn cleanup(ptr: *mut u8) -> Box<dyn Any + Send> {
pub(crate) unsafe fn panic(data: Box<dyn Any + Send>) -> u32 {
unsafe {
let exception = __cxa_allocate_exception(mem::size_of::<Exception>()) as *mut Exception;
let exception = __cxa_allocate_exception(size_of::<Exception>()) as *mut Exception;
if exception.is_null() {
return uw::_URC_FATAL_PHASE1_ERROR as u32;
}

View File

@ -49,7 +49,7 @@
use alloc::boxed::Box;
use core::any::Any;
use core::ffi::{c_int, c_uint, c_void};
use core::mem::{self, ManuallyDrop};
use core::mem::ManuallyDrop;
// NOTE(nbdd0121): The `canary` field is part of stable ABI.
#[repr(C)]
@ -225,7 +225,7 @@ static mut CATCHABLE_TYPE: _CatchableType = _CatchableType {
properties: 0,
pType: ptr_t::null(),
thisDisplacement: _PMD { mdisp: 0, pdisp: -1, vdisp: 0 },
sizeOrOffset: mem::size_of::<Exception>() as c_int,
sizeOrOffset: size_of::<Exception>() as c_int,
copyFunction: ptr_t::null(),
};

View File

@ -50,7 +50,7 @@ macro_rules! define_reify_functions {
>(f: F) -> $(extern $abi)? fn($($arg_ty),*) -> $ret_ty {
// FIXME(eddyb) describe the `F` type (e.g. via `type_name::<F>`) once panic
// formatting becomes possible in `const fn`.
assert!(mem::size_of::<F>() == 0, "selfless_reify: closure must be zero-sized");
assert!(size_of::<F>() == 0, "selfless_reify: closure must be zero-sized");
$(extern $abi)? fn wrapper<
$($($param,)*)?

View File

@ -113,7 +113,6 @@ fn main() {
// Infinite recursion <https://github.com/llvm/llvm-project/issues/97981>
("csky", _) => false,
("hexagon", _) => false,
("loongarch64", _) => false,
("powerpc" | "powerpc64", _) => false,
("sparc" | "sparc64", _) => false,
("wasm32" | "wasm64", _) => false,

View File

@ -1878,7 +1878,7 @@ fn windows_unix_socket_exists() {
let bytes = socket_path.as_os_str().as_encoded_bytes();
let bytes = core::slice::from_raw_parts(bytes.as_ptr().cast::<i8>(), bytes.len());
addr.sun_path[..bytes.len()].copy_from_slice(bytes);
let len = mem::size_of_val(&addr) as i32;
let len = size_of_val(&addr) as i32;
let result = c::bind(socket, (&raw const addr).cast::<c::SOCKADDR>(), len);
c::closesocket(socket);
assert_eq!(result, 0);

View File

@ -1,6 +1,5 @@
use super::{Custom, Error, ErrorData, ErrorKind, Repr, SimpleMessage, const_error};
use crate::assert_matches::assert_matches;
use crate::mem::size_of;
use crate::sys::decode_error_kind;
use crate::sys::os::error_string;
use crate::{error, fmt};

View File

@ -36,7 +36,6 @@ fn test_fd() {
#[cfg(any(unix, target_os = "wasi"))]
#[test]
fn test_niche_optimizations() {
use crate::mem::size_of;
#[cfg(unix)]
use crate::os::unix::io::{BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
#[cfg(target_os = "wasi")]

View File

@ -1,4 +1,3 @@
use crate::mem::size_of;
use crate::os::unix::io::RawFd;
#[test]

View File

@ -94,7 +94,7 @@ impl SocketAddr {
{
unsafe {
let mut addr: libc::sockaddr_un = mem::zeroed();
let mut len = mem::size_of::<libc::sockaddr_un>() as libc::socklen_t;
let mut len = size_of::<libc::sockaddr_un>() as libc::socklen_t;
cvt(f((&raw mut addr) as *mut _, &mut len))?;
SocketAddr::from_parts(addr, len)
}

View File

@ -177,7 +177,7 @@ impl UnixListener {
#[stable(feature = "unix_socket", since = "1.10.0")]
pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> {
let mut storage: libc::sockaddr_un = unsafe { mem::zeroed() };
let mut len = mem::size_of_val(&storage) as libc::socklen_t;
let mut len = size_of_val(&storage) as libc::socklen_t;
let sock = self.0.accept((&raw mut storage) as *mut _, &mut len)?;
let addr = SocketAddr::from_parts(storage, len)?;
Ok((UnixStream(sock), addr))

View File

@ -41,15 +41,15 @@ mod impl_linux {
use libc::{SO_PEERCRED, SOL_SOCKET, c_void, getsockopt, socklen_t, ucred};
use super::UCred;
use crate::io;
use crate::os::unix::io::AsRawFd;
use crate::os::unix::net::UnixStream;
use crate::{io, mem};
pub fn peer_cred(socket: &UnixStream) -> io::Result<UCred> {
let ucred_size = mem::size_of::<ucred>();
let ucred_size = size_of::<ucred>();
// Trivial sanity checks.
assert!(mem::size_of::<u32>() <= mem::size_of::<usize>());
assert!(size_of::<u32>() <= size_of::<usize>());
assert!(ucred_size <= u32::MAX as usize);
let mut ucred_size = ucred_size as socklen_t;
@ -64,7 +64,7 @@ mod impl_linux {
&mut ucred_size,
);
if ret == 0 && ucred_size as usize == mem::size_of::<ucred>() {
if ret == 0 && ucred_size as usize == size_of::<ucred>() {
Ok(UCred { uid: ucred.uid, gid: ucred.gid, pid: Some(ucred.pid) })
} else {
Err(io::Error::last_os_error())
@ -101,9 +101,9 @@ mod impl_apple {
use libc::{LOCAL_PEERPID, SOL_LOCAL, c_void, getpeereid, getsockopt, pid_t, socklen_t};
use super::UCred;
use crate::io;
use crate::os::unix::io::AsRawFd;
use crate::os::unix::net::UnixStream;
use crate::{io, mem};
pub fn peer_cred(socket: &UnixStream) -> io::Result<UCred> {
let mut cred = UCred { uid: 1, gid: 1, pid: None };
@ -115,7 +115,7 @@ mod impl_apple {
}
let mut pid: pid_t = 1;
let mut pid_size = mem::size_of::<pid_t>() as socklen_t;
let mut pid_size = size_of::<pid_t>() as socklen_t;
let ret = getsockopt(
socket.as_raw_fd(),
@ -125,7 +125,7 @@ mod impl_apple {
&mut pid_size,
);
if ret == 0 && pid_size as usize == mem::size_of::<pid_t>() {
if ret == 0 && pid_size as usize == size_of::<pid_t>() {
cred.pid = Some(pid);
Ok(cred)
} else {

View File

@ -1,4 +1,3 @@
use crate::mem::size_of;
use crate::os::wasi::io::RawFd;
#[test]

View File

@ -1,6 +1,5 @@
#[test]
fn test_niche_optimizations_socket() {
use crate::mem::size_of;
use crate::os::windows::io::{
BorrowedSocket, FromRawSocket, IntoRawSocket, OwnedSocket, RawSocket,
};

View File

@ -500,11 +500,7 @@ impl<'a> ProcThreadAttributeListBuilder<'a> {
/// [1]: <https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-updateprocthreadattribute#parameters>
pub fn attribute<T>(self, attribute: usize, value: &'a T) -> Self {
unsafe {
self.raw_attribute(
attribute,
ptr::addr_of!(*value).cast::<c_void>(),
crate::mem::size_of::<T>(),
)
self.raw_attribute(attribute, ptr::addr_of!(*value).cast::<c_void>(), size_of::<T>())
}
}
@ -574,7 +570,7 @@ impl<'a> ProcThreadAttributeListBuilder<'a> {
/// .raw_attribute(
/// PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
/// h_pc as *const c_void,
/// std::mem::size_of::<isize>(),
/// size_of::<isize>(),
/// )
/// .finish()?
/// };

View File

@ -368,7 +368,7 @@ pub(crate) unsafe fn map_memory<T>(
let mut a0 = Syscall::MapMemory as usize;
let mut a1 = phys.map(|p| p.as_ptr() as usize).unwrap_or_default();
let mut a2 = virt.map(|p| p.as_ptr() as usize).unwrap_or_default();
let a3 = count * core::mem::size_of::<T>();
let a3 = count * size_of::<T>();
let a4 = flags.bits();
let a5 = 0;
let a6 = 0;
@ -392,7 +392,7 @@ pub(crate) unsafe fn map_memory<T>(
if result == SyscallResult::MemoryRange as usize {
let start = core::ptr::with_exposed_provenance_mut::<T>(a1);
let len = a2 / core::mem::size_of::<T>();
let len = a2 / size_of::<T>();
let end = unsafe { start.add(len) };
Ok(unsafe { core::slice::from_raw_parts_mut(start, len) })
} else if result == SyscallResult::Error as usize {
@ -409,7 +409,7 @@ pub(crate) unsafe fn map_memory<T>(
pub(crate) unsafe fn unmap_memory<T>(range: *mut [T]) -> Result<(), Error> {
let mut a0 = Syscall::UnmapMemory as usize;
let mut a1 = range.as_mut_ptr() as usize;
let a2 = range.len() * core::mem::size_of::<T>();
let a2 = range.len() * size_of::<T>();
let a3 = 0;
let a4 = 0;
let a5 = 0;
@ -455,7 +455,7 @@ pub(crate) unsafe fn update_memory_flags<T>(
) -> Result<(), Error> {
let mut a0 = Syscall::UpdateMemoryFlags as usize;
let mut a1 = range.as_mut_ptr() as usize;
let a2 = range.len() * core::mem::size_of::<T>();
let a2 = range.len() * size_of::<T>();
let a3 = new_flags.bits();
let a4 = 0; // Process ID is currently None
let a5 = 0;

View File

@ -7,8 +7,8 @@ use crate::os::xous::ffi::Connection;
/// `group_or_null([1,2,3,4,5,6,7,8], 1)` on a 32-bit system will return a
/// `usize` with 5678 packed into it.
fn group_or_null(data: &[u8], offset: usize) -> usize {
let start = offset * core::mem::size_of::<usize>();
let mut out_array = [0u8; core::mem::size_of::<usize>()];
let start = offset * size_of::<usize>();
let mut out_array = [0u8; size_of::<usize>()];
if start < data.len() {
for (dest, src) in out_array.iter_mut().zip(&data[start..]) {
*dest = *src;

View File

@ -81,7 +81,7 @@ cfg_if::cfg_if! {
// while others require the alignment to be at least the pointer size (Illumos, macOS).
// posix_memalign only has one, clear requirement: that the alignment be a multiple of
// `sizeof(void*)`. Since these are all powers of 2, we can just use max.
let align = layout.align().max(crate::mem::size_of::<usize>());
let align = layout.align().max(size_of::<usize>());
let ret = unsafe { libc::posix_memalign(&mut out, align, layout.size()) };
if ret != 0 { ptr::null_mut() } else { out as *mut u8 }
}

View File

@ -1,9 +1,8 @@
use super::{Header, MIN_ALIGN};
use crate::mem;
#[test]
fn alloc_header() {
// Header must fit in the padding before an aligned pointer
assert!(mem::size_of::<Header>() <= MIN_ALIGN);
assert!(mem::align_of::<Header>() <= MIN_ALIGN);
assert!(size_of::<Header>() <= MIN_ALIGN);
assert!(align_of::<Header>() <= MIN_ALIGN);
}

View File

@ -1,5 +1,4 @@
use crate::ffi::c_void;
use crate::mem::size_of;
use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle};
use crate::sys::c;

View File

@ -154,11 +154,11 @@ fn socket_addr_to_c(addr: &SocketAddr) -> (SocketAddrCRepr, c::socklen_t) {
match addr {
SocketAddr::V4(a) => {
let sockaddr = SocketAddrCRepr { v4: socket_addr_v4_to_c(a) };
(sockaddr, mem::size_of::<c::sockaddr_in>() as c::socklen_t)
(sockaddr, size_of::<c::sockaddr_in>() as c::socklen_t)
}
SocketAddr::V6(a) => {
let sockaddr = SocketAddrCRepr { v6: socket_addr_v6_to_c(a) };
(sockaddr, mem::size_of::<c::sockaddr_in6>() as c::socklen_t)
(sockaddr, size_of::<c::sockaddr_in6>() as c::socklen_t)
}
}
}
@ -169,13 +169,13 @@ unsafe fn socket_addr_from_c(
) -> io::Result<SocketAddr> {
match (*storage).ss_family as c_int {
c::AF_INET => {
assert!(len >= mem::size_of::<c::sockaddr_in>());
assert!(len >= size_of::<c::sockaddr_in>());
Ok(SocketAddr::V4(socket_addr_v4_from_c(unsafe {
*(storage as *const _ as *const c::sockaddr_in)
})))
}
c::AF_INET6 => {
assert!(len >= mem::size_of::<c::sockaddr_in6>());
assert!(len >= size_of::<c::sockaddr_in6>());
Ok(SocketAddr::V6(socket_addr_v6_from_c(unsafe {
*(storage as *const _ as *const c::sockaddr_in6)
})))
@ -200,7 +200,7 @@ pub fn setsockopt<T>(
level,
option_name,
(&raw const option_value) as *const _,
mem::size_of::<T>() as c::socklen_t,
size_of::<T>() as c::socklen_t,
))?;
Ok(())
}
@ -209,7 +209,7 @@ pub fn setsockopt<T>(
pub fn getsockopt<T: Copy>(sock: &Socket, level: c_int, option_name: c_int) -> io::Result<T> {
unsafe {
let mut option_value: T = mem::zeroed();
let mut option_len = mem::size_of::<T>() as c::socklen_t;
let mut option_len = size_of::<T>() as c::socklen_t;
cvt(c::getsockopt(
sock.as_raw(),
level,
@ -227,7 +227,7 @@ where
{
unsafe {
let mut storage: c::sockaddr_storage = mem::zeroed();
let mut len = mem::size_of_val(&storage) as c::socklen_t;
let mut len = size_of_val(&storage) as c::socklen_t;
cvt(f((&raw mut storage) as *mut _, &mut len))?;
socket_addr_from_c(&storage, len as usize)
}
@ -561,7 +561,7 @@ impl TcpListener {
// so we don't need to zero it here.
// reference: https://linux.die.net/man/2/accept4
let mut storage: mem::MaybeUninit<c::sockaddr_storage> = mem::MaybeUninit::uninit();
let mut len = mem::size_of_val(&storage) as c::socklen_t;
let mut len = size_of_val(&storage) as c::socklen_t;
let sock = self.inner.accept(storage.as_mut_ptr() as *mut _, &mut len)?;
let addr = unsafe { socket_addr_from_c(storage.as_ptr(), len as usize)? };
Ok((TcpStream { inner: sock }, addr))

View File

@ -183,7 +183,7 @@ impl Socket {
fn recv_from_with_flags(&self, buf: &mut [u8], flags: i32) -> io::Result<(usize, SocketAddr)> {
let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() };
let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
let mut addrlen = size_of_val(&storage) as netc::socklen_t;
let n = cvt(unsafe {
netc::recvfrom(

View File

@ -244,7 +244,7 @@ impl Socket {
flags: c_int,
) -> io::Result<(usize, SocketAddr)> {
let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() };
let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
let mut addrlen = size_of_val(&storage) as netc::socklen_t;
let n = cvt(unsafe {
netc::recvfrom(

View File

@ -326,7 +326,7 @@ impl Socket {
// so we don't need to zero it here.
// reference: https://linux.die.net/man/2/recvfrom
let mut storage: mem::MaybeUninit<libc::sockaddr_storage> = mem::MaybeUninit::uninit();
let mut addrlen = mem::size_of_val(&storage) as libc::socklen_t;
let mut addrlen = size_of_val(&storage) as libc::socklen_t;
let n = cvt(unsafe {
libc::recvfrom(

View File

@ -211,7 +211,7 @@ impl Socket {
flags: c_int,
) -> io::Result<(usize, SocketAddr)> {
let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() };
let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
let mut addrlen = size_of_val(&storage) as netc::socklen_t;
let n = cvt(unsafe {
netc::recvfrom(

View File

@ -381,7 +381,7 @@ impl Socket {
flags: c_int,
) -> io::Result<(usize, SocketAddr)> {
let mut storage = unsafe { mem::zeroed::<c::SOCKADDR_STORAGE>() };
let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
let mut addrlen = size_of_val(&storage) as netc::socklen_t;
let length = cmp::min(buf.len(), <wrlen_t>::MAX as usize) as wrlen_t;
// On unix when a socket is shut down all further reads return 0, so we
@ -514,13 +514,13 @@ impl Socket {
// This is used by sys_common code to abstract over Windows and Unix.
pub fn as_raw(&self) -> c::SOCKET {
debug_assert_eq!(mem::size_of::<c::SOCKET>(), mem::size_of::<RawSocket>());
debug_assert_eq!(mem::align_of::<c::SOCKET>(), mem::align_of::<RawSocket>());
debug_assert_eq!(size_of::<c::SOCKET>(), size_of::<RawSocket>());
debug_assert_eq!(align_of::<c::SOCKET>(), align_of::<RawSocket>());
self.as_inner().as_raw_socket() as c::SOCKET
}
pub unsafe fn from_raw(raw: c::SOCKET) -> Self {
debug_assert_eq!(mem::size_of::<c::SOCKET>(), mem::size_of::<RawSocket>());
debug_assert_eq!(mem::align_of::<c::SOCKET>(), mem::align_of::<RawSocket>());
debug_assert_eq!(size_of::<c::SOCKET>(), size_of::<RawSocket>());
debug_assert_eq!(align_of::<c::SOCKET>(), align_of::<RawSocket>());
unsafe { Self::from_raw_socket(raw as RawSocket) }
}
}

View File

@ -244,7 +244,7 @@ impl UdpSocket {
// let buf = unsafe {
// xous::MemoryRange::new(
// &mut tx_req as *mut SendData as usize,
// core::mem::size_of::<SendData>(),
// size_of::<SendData>(),
// )
// .unwrap()
// };

View File

@ -80,7 +80,7 @@ const LIFECYCLE_EXITED_OR_FINISHED_OR_JOIN_FINALIZE: usize = usize::MAX;
// there's no single value for `JOINING`
// 64KiB for 32-bit ISAs, 128KiB for 64-bit ISAs.
pub const DEFAULT_MIN_STACK_SIZE: usize = 0x4000 * crate::mem::size_of::<usize>();
pub const DEFAULT_MIN_STACK_SIZE: usize = 0x4000 * size_of::<usize>();
impl Thread {
/// # Safety

View File

@ -63,7 +63,7 @@ unsafe impl<T: UserSafeSized> UserSafeSized for [T; 2] {}
/// A type that can be represented in memory as one or more `UserSafeSized`s.
#[unstable(feature = "sgx_platform", issue = "56975")]
pub unsafe trait UserSafe {
/// Equivalent to `mem::align_of::<Self>`.
/// Equivalent to `align_of::<Self>`.
fn align_of() -> usize;
/// Constructs a pointer to `Self` given a memory range in user space.
@ -120,7 +120,7 @@ pub unsafe trait UserSafe {
let is_aligned = |p: *const u8| -> bool { p.is_aligned_to(Self::align_of()) };
assert!(is_aligned(ptr as *const u8));
assert!(is_user_range(ptr as _, mem::size_of_val(unsafe { &*ptr })));
assert!(is_user_range(ptr as _, size_of_val(unsafe { &*ptr })));
assert!(!ptr.is_null());
}
}
@ -128,11 +128,11 @@ pub unsafe trait UserSafe {
#[unstable(feature = "sgx_platform", issue = "56975")]
unsafe impl<T: UserSafeSized> UserSafe for T {
fn align_of() -> usize {
mem::align_of::<T>()
align_of::<T>()
}
unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self {
assert_eq!(size, mem::size_of::<T>());
assert_eq!(size, size_of::<T>());
ptr as _
}
}
@ -140,7 +140,7 @@ unsafe impl<T: UserSafeSized> UserSafe for T {
#[unstable(feature = "sgx_platform", issue = "56975")]
unsafe impl<T: UserSafeSized> UserSafe for [T] {
fn align_of() -> usize {
mem::align_of::<T>()
align_of::<T>()
}
/// # Safety
@ -155,7 +155,7 @@ unsafe impl<T: UserSafeSized> UserSafe for [T] {
///
/// * the element size is not a factor of the size
unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self {
let elem_size = mem::size_of::<T>();
let elem_size = size_of::<T>();
assert_eq!(size % elem_size, 0);
let len = size / elem_size;
// SAFETY: The caller must uphold the safety contract for `from_raw_sized_unchecked`
@ -239,7 +239,7 @@ where
/// Copies `val` into freshly allocated space in user memory.
pub fn new_from_enclave(val: &T) -> Self {
unsafe {
let mut user = Self::new_uninit_bytes(mem::size_of_val(val));
let mut user = Self::new_uninit_bytes(size_of_val(val));
user.copy_from_enclave(val);
user
}
@ -277,7 +277,7 @@ where
{
/// Allocates space for `T` in user memory.
pub fn uninitialized() -> Self {
Self::new_uninit_bytes(mem::size_of::<T>())
Self::new_uninit_bytes(size_of::<T>())
}
}
@ -288,7 +288,7 @@ where
{
/// Allocates space for a `[T]` of `n` elements in user memory.
pub fn uninitialized(n: usize) -> Self {
Self::new_uninit_bytes(n * mem::size_of::<T>())
Self::new_uninit_bytes(n * size_of::<T>())
}
/// Creates an owned `User<[T]>` from a raw thin pointer and a slice length.
@ -306,9 +306,7 @@ where
/// * The pointed-to range does not fit in the address space
/// * The pointed-to range is not in user memory
pub unsafe fn from_raw_parts(ptr: *mut T, len: usize) -> Self {
User(unsafe {
NonNull::new_userref(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()))
})
User(unsafe { NonNull::new_userref(<[T]>::from_raw_sized(ptr as _, len * size_of::<T>())) })
}
}
@ -326,7 +324,7 @@ where
// `<*const u8>::align_offset` aren't _guaranteed_ to compute the largest
// possible middle region, and as such can't be used.
fn u64_align_to_guaranteed(ptr: *const u8, mut len: usize) -> (usize, usize, usize) {
const QWORD_SIZE: usize = mem::size_of::<u64>();
const QWORD_SIZE: usize = size_of::<u64>();
let offset = ptr as usize % QWORD_SIZE;
@ -532,11 +530,11 @@ where
/// the source. This can happen for dynamically-sized types such as slices.
pub fn copy_from_enclave(&mut self, val: &T) {
unsafe {
assert_eq!(mem::size_of_val(val), mem::size_of_val(&*self.0.get()));
assert_eq!(size_of_val(val), size_of_val(&*self.0.get()));
copy_to_userspace(
val as *const T as *const u8,
self.0.get() as *mut T as *mut u8,
mem::size_of_val(val),
size_of_val(val),
);
}
}
@ -548,11 +546,11 @@ where
/// the source. This can happen for dynamically-sized types such as slices.
pub fn copy_to_enclave(&self, dest: &mut T) {
unsafe {
assert_eq!(mem::size_of_val(dest), mem::size_of_val(&*self.0.get()));
assert_eq!(size_of_val(dest), size_of_val(&*self.0.get()));
copy_from_userspace(
self.0.get() as *const T as *const u8,
dest as *mut T as *mut u8,
mem::size_of_val(dest),
size_of_val(dest),
);
}
}
@ -577,7 +575,7 @@ where
pub fn to_enclave(&self) -> T {
unsafe {
let mut data = mem::MaybeUninit::uninit();
copy_from_userspace(self.0.get() as _, data.as_mut_ptr() as _, mem::size_of::<T>());
copy_from_userspace(self.0.get() as _, data.as_mut_ptr() as _, size_of::<T>());
data.assume_init()
}
}
@ -602,9 +600,7 @@ where
/// * The pointed-to range is not in user memory
pub unsafe fn from_raw_parts<'a>(ptr: *const T, len: usize) -> &'a Self {
// SAFETY: The caller must uphold the safety contract for `from_raw_parts`.
unsafe {
&*(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()).as_ptr() as *const Self)
}
unsafe { &*(<[T]>::from_raw_sized(ptr as _, len * size_of::<T>()).as_ptr() as *const Self) }
}
/// Creates a `&mut UserRef<[T]>` from a raw thin pointer and a slice length.
@ -624,7 +620,7 @@ where
pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut T, len: usize) -> &'a mut Self {
// SAFETY: The caller must uphold the safety contract for `from_raw_parts_mut`.
unsafe {
&mut *(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()).as_ptr() as *mut Self)
&mut *(<[T]>::from_raw_sized(ptr as _, len * size_of::<T>()).as_ptr() as *mut Self)
}
}
@ -744,7 +740,7 @@ where
fn drop(&mut self) {
unsafe {
let ptr = (*self.0.as_ptr()).0.get();
super::free(ptr as _, mem::size_of_val(&mut *ptr), T::align_of());
super::free(ptr as _, size_of_val(&mut *ptr), T::align_of());
}
}
}

View File

@ -4,7 +4,6 @@ use super::helpers;
use crate::env::current_exe;
use crate::ffi::OsString;
use crate::iter::Iterator;
use crate::mem::size_of;
use crate::{fmt, vec};
pub struct Args {

View File

@ -15,7 +15,7 @@ use r_efi::protocols::{device_path, device_path_to_text, service_binding, shell}
use crate::ffi::{OsStr, OsString};
use crate::io::{self, const_error};
use crate::marker::PhantomData;
use crate::mem::{MaybeUninit, size_of};
use crate::mem::MaybeUninit;
use crate::os::uefi::env::boot_services;
use crate::os::uefi::ffi::{OsStrExt, OsStringExt};
use crate::os::uefi::{self};

View File

@ -490,7 +490,7 @@ mod uefi_command_internal {
helpers::open_protocol(self.handle, loaded_image::PROTOCOL_GUID).unwrap();
let len = args.len();
let args_size: u32 = (len * crate::mem::size_of::<u16>()).try_into().unwrap();
let args_size: u32 = (len * size_of::<u16>()).try_into().unwrap();
let ptr = Box::into_raw(args).as_mut_ptr();
unsafe {

Some files were not shown because too many files have changed in this diff Show More