diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs
index f69e756a3e1..86959b28e53 100644
--- a/compiler/rustc_errors/src/lib.rs
+++ b/compiler/rustc_errors/src/lib.rs
@@ -14,6 +14,7 @@
 #![feature(associated_type_defaults)]
 #![feature(box_into_inner)]
 #![feature(box_patterns)]
+#![feature(default_field_values)]
 #![feature(error_reporter)]
 #![feature(if_let_guard)]
 #![feature(let_chains)]
diff --git a/compiler/rustc_errors/src/markdown/parse.rs b/compiler/rustc_errors/src/markdown/parse.rs
index 7a991a2ace7..f02387d8335 100644
--- a/compiler/rustc_errors/src/markdown/parse.rs
+++ b/compiler/rustc_errors/src/markdown/parse.rs
@@ -40,11 +40,13 @@ type ParseResult<'a> = Option<Parsed<'a>>;
 
 /// Parsing context
 #[derive(Clone, Copy, Debug, PartialEq)]
+// The default values are the most common setting for non top-level parsing: not top block, not at
+// line start (yes leading whitespace, not escaped).
 struct Context {
     /// If true, we are at a the topmost level (not recursing a nested tt)
-    top_block: bool,
+    top_block: bool = false,
     /// Previous character
-    prev: Prev,
+    prev: Prev = Prev::Whitespace,
 }
 
 /// Character class preceding this one
@@ -57,14 +59,6 @@ enum Prev {
     Any,
 }
 
-impl Default for Context {
-    /// Most common setting for non top-level parsing: not top block, not at
-    /// line start (yes leading whitespace, not escaped)
-    fn default() -> Self {
-        Self { top_block: false, prev: Prev::Whitespace }
-    }
-}
-
 /// Flags to simple parser function
 #[derive(Clone, Copy, Debug, PartialEq)]
 enum ParseOpt {
@@ -248,7 +242,7 @@ fn parse_heading(buf: &[u8]) -> ParseResult<'_> {
     }
 
     let (txt, rest) = parse_to_newline(&buf[1..]);
-    let ctx = Context { top_block: false, prev: Prev::Whitespace };
+    let ctx = Context { .. };
     let stream = parse_recursive(txt, ctx);
 
     Some((MdTree::Heading(level.try_into().unwrap(), stream), rest))
@@ -257,7 +251,7 @@ fn parse_heading(buf: &[u8]) -> ParseResult<'_> {
 /// Bulleted list
 fn parse_unordered_li(buf: &[u8]) -> Parsed<'_> {
     let (txt, rest) = get_indented_section(&buf[2..]);
-    let ctx = Context { top_block: false, prev: Prev::Whitespace };
+    let ctx = Context { .. };
     let stream = parse_recursive(trim_ascii_start(txt), ctx);
     (MdTree::UnorderedListItem(stream), rest)
 }
@@ -266,7 +260,7 @@ fn parse_unordered_li(buf: &[u8]) -> Parsed<'_> {
 fn parse_ordered_li(buf: &[u8]) -> Parsed<'_> {
     let (num, pos) = ord_list_start(buf).unwrap(); // success tested in caller
     let (txt, rest) = get_indented_section(&buf[pos..]);
-    let ctx = Context { top_block: false, prev: Prev::Whitespace };
+    let ctx = Context { .. };
     let stream = parse_recursive(trim_ascii_start(txt), ctx);
     (MdTree::OrderedListItem(num, stream), rest)
 }
diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs
index 147b42c0490..1f6fb3a329a 100644
--- a/compiler/rustc_hir_pretty/src/lib.rs
+++ b/compiler/rustc_hir_pretty/src/lib.rs
@@ -117,80 +117,6 @@ impl<'a> State<'a> {
                 ));
                 self.hardbreak()
             }
-            hir::Attribute::Parsed(AttributeKind::Deprecation { deprecation, .. }) => {
-                self.word("#[deprecated");
-
-                // There are three possible forms here:
-                // 1. a form with explicit components like
-                //    `#[deprecated(since = "1.2.3", note = "some note", suggestion = "something")]`
-                //    where each component may be present or absent.
-                // 2. `#[deprecated = "message"]`
-                // 3. `#[deprecated]`
-                //
-                // Let's figure out which we need.
-                // If there's a `since` or `suggestion` value, we're definitely in form 1.
-                if matches!(
-                    deprecation.since,
-                    rustc_attr_parsing::DeprecatedSince::RustcVersion(..)
-                        | rustc_attr_parsing::DeprecatedSince::Future
-                        | rustc_attr_parsing::DeprecatedSince::NonStandard(..)
-                ) || deprecation.suggestion.is_some()
-                {
-                    self.word("(");
-                    let mut use_comma = false;
-
-                    match &deprecation.since {
-                        rustc_attr_parsing::DeprecatedSince::RustcVersion(rustc_version) => {
-                            self.word("since = \"");
-                            self.word(format!(
-                                "{}.{}.{}",
-                                rustc_version.major, rustc_version.minor, rustc_version.patch
-                            ));
-                            self.word("\"");
-                            use_comma = true;
-                        }
-                        rustc_attr_parsing::DeprecatedSince::Future => {
-                            self.word("since = \"future\"");
-                            use_comma = true;
-                        }
-                        rustc_attr_parsing::DeprecatedSince::NonStandard(symbol) => {
-                            self.word("since = \"");
-                            self.word(symbol.to_ident_string());
-                            self.word("\"");
-                            use_comma = true;
-                        }
-                        _ => {}
-                    }
-
-                    if let Some(note) = &deprecation.note {
-                        if use_comma {
-                            self.word(", ");
-                        }
-                        self.word("note = \"");
-                        self.word(note.to_ident_string());
-                        self.word("\"");
-                        use_comma = true;
-                    }
-
-                    if let Some(suggestion) = &deprecation.suggestion {
-                        if use_comma {
-                            self.word(", ");
-                        }
-                        self.word("suggestion = \"");
-                        self.word(suggestion.to_ident_string());
-                        self.word("\"");
-                    }
-                } else if let Some(note) = &deprecation.note {
-                    // We're in form 2: `#[deprecated = "message"]`.
-                    self.word(" = \"");
-                    self.word(note.to_ident_string());
-                    self.word("\"");
-                } else {
-                    // We're in form 3: `#[deprecated]`. Nothing to do here.
-                }
-
-                self.word("]");
-            }
             hir::Attribute::Parsed(pa) => {
                 self.word("#[attr=\"");
                 pa.print_attribute(self);
diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs
index 55694cacd92..bc335cee147 100644
--- a/compiler/rustc_mir_transform/src/add_call_guards.rs
+++ b/compiler/rustc_mir_transform/src/add_call_guards.rs
@@ -40,6 +40,16 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards {
         let mut new_blocks = Vec::new();
 
         let cur_len = body.basic_blocks.len();
+        let mut new_block = |source_info: SourceInfo, is_cleanup: bool, target: BasicBlock| {
+            let block = BasicBlockData {
+                statements: vec![],
+                is_cleanup,
+                terminator: Some(Terminator { source_info, kind: TerminatorKind::Goto { target } }),
+            };
+            let idx = cur_len + new_blocks.len();
+            new_blocks.push(block);
+            BasicBlock::new(idx)
+        };
 
         for block in body.basic_blocks_mut() {
             match block.terminator {
@@ -47,25 +57,34 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards {
                     kind: TerminatorKind::Call { target: Some(ref mut destination), unwind, .. },
                     source_info,
                 }) if pred_count[*destination] > 1
-                    && (matches!(
-                        unwind,
-                        UnwindAction::Cleanup(_) | UnwindAction::Terminate(_)
-                    ) || self == &AllCallEdges) =>
+                    && (generates_invoke(unwind) || self == &AllCallEdges) =>
                 {
                     // It's a critical edge, break it
-                    let call_guard = BasicBlockData {
-                        statements: vec![],
-                        is_cleanup: block.is_cleanup,
-                        terminator: Some(Terminator {
-                            source_info,
-                            kind: TerminatorKind::Goto { target: *destination },
-                        }),
-                    };
-
-                    // Get the index it will be when inserted into the MIR
-                    let idx = cur_len + new_blocks.len();
-                    new_blocks.push(call_guard);
-                    *destination = BasicBlock::new(idx);
+                    *destination = new_block(source_info, block.is_cleanup, *destination);
+                }
+                Some(Terminator {
+                    kind:
+                        TerminatorKind::InlineAsm {
+                            asm_macro: InlineAsmMacro::Asm,
+                            ref mut targets,
+                            ref operands,
+                            unwind,
+                            ..
+                        },
+                    source_info,
+                }) if self == &CriticalCallEdges => {
+                    let has_outputs = operands.iter().any(|op| {
+                        matches!(op, InlineAsmOperand::InOut { .. } | InlineAsmOperand::Out { .. })
+                    });
+                    let has_labels =
+                        operands.iter().any(|op| matches!(op, InlineAsmOperand::Label { .. }));
+                    if has_outputs && (has_labels || generates_invoke(unwind)) {
+                        for target in targets.iter_mut() {
+                            if pred_count[*target] > 1 {
+                                *target = new_block(source_info, block.is_cleanup, *target);
+                            }
+                        }
+                    }
                 }
                 _ => {}
             }
@@ -80,3 +99,11 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards {
         true
     }
 }
+
+/// Returns true if this unwind action is code generated as an invoke as opposed to a call.
+fn generates_invoke(unwind: UnwindAction) -> bool {
+    match unwind {
+        UnwindAction::Continue | UnwindAction::Unreachable => false,
+        UnwindAction::Cleanup(_) | UnwindAction::Terminate(_) => true,
+    }
+}
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index e62a8fc0fc3..7af221c9607 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -681,10 +681,14 @@ impl OutputType {
 }
 
 /// The type of diagnostics output to generate.
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
 pub enum ErrorOutputType {
     /// Output meant for the consumption of humans.
-    HumanReadable(HumanReadableErrorType, ColorConfig),
+    #[default]
+    HumanReadable {
+        kind: HumanReadableErrorType = HumanReadableErrorType::Default,
+        color_config: ColorConfig = ColorConfig::Auto,
+    },
     /// Output that's consumed by other tools such as `rustfix` or the `RLS`.
     Json {
         /// Render the JSON in a human readable way (with indents and newlines).
@@ -696,12 +700,6 @@ pub enum ErrorOutputType {
     },
 }
 
-impl Default for ErrorOutputType {
-    fn default() -> Self {
-        Self::HumanReadable(HumanReadableErrorType::Default, ColorConfig::Auto)
-    }
-}
-
 #[derive(Clone, Hash, Debug)]
 pub enum ResolveDocLinks {
     /// Do not resolve doc links.
@@ -898,18 +896,13 @@ pub enum PrintKind {
     DeploymentTarget,
 }
 
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Default)]
 pub struct NextSolverConfig {
     /// Whether the new trait solver should be enabled in coherence.
-    pub coherence: bool,
+    pub coherence: bool = true,
     /// Whether the new trait solver should be enabled everywhere.
     /// This is only `true` if `coherence` is also enabled.
-    pub globally: bool,
-}
-impl Default for NextSolverConfig {
-    fn default() -> Self {
-        NextSolverConfig { coherence: true, globally: false }
-    }
+    pub globally: bool = false,
 }
 
 #[derive(Clone)]
@@ -1825,7 +1818,7 @@ pub fn parse_json(early_dcx: &EarlyDiagCtxt, matches: &getopts::Matches) -> Json
 pub fn parse_error_format(
     early_dcx: &mut EarlyDiagCtxt,
     matches: &getopts::Matches,
-    color: ColorConfig,
+    color_config: ColorConfig,
     json_color: ColorConfig,
     json_rendered: HumanReadableErrorType,
 ) -> ErrorOutputType {
@@ -1835,27 +1828,26 @@ pub fn parse_error_format(
     // `opt_present` because the latter will panic.
     let error_format = if matches.opts_present(&["error-format".to_owned()]) {
         match matches.opt_str("error-format").as_deref() {
-            None | Some("human") => {
-                ErrorOutputType::HumanReadable(HumanReadableErrorType::Default, color)
-            }
-            Some("human-annotate-rs") => {
-                ErrorOutputType::HumanReadable(HumanReadableErrorType::AnnotateSnippet, color)
-            }
+            None | Some("human") => ErrorOutputType::HumanReadable { color_config, .. },
+            Some("human-annotate-rs") => ErrorOutputType::HumanReadable {
+                kind: HumanReadableErrorType::AnnotateSnippet,
+                color_config,
+            },
             Some("json") => {
                 ErrorOutputType::Json { pretty: false, json_rendered, color_config: json_color }
             }
             Some("pretty-json") => {
                 ErrorOutputType::Json { pretty: true, json_rendered, color_config: json_color }
             }
-            Some("short") => ErrorOutputType::HumanReadable(HumanReadableErrorType::Short, color),
-            Some("human-unicode") => {
-                ErrorOutputType::HumanReadable(HumanReadableErrorType::Unicode, color)
+            Some("short") => {
+                ErrorOutputType::HumanReadable { kind: HumanReadableErrorType::Short, color_config }
             }
+            Some("human-unicode") => ErrorOutputType::HumanReadable {
+                kind: HumanReadableErrorType::Unicode,
+                color_config,
+            },
             Some(arg) => {
-                early_dcx.set_error_format(ErrorOutputType::HumanReadable(
-                    HumanReadableErrorType::Default,
-                    color,
-                ));
+                early_dcx.set_error_format(ErrorOutputType::HumanReadable { color_config, .. });
                 early_dcx.early_fatal(format!(
                     "argument for `--error-format` must be `human`, `human-annotate-rs`, \
                     `human-unicode`, `json`, `pretty-json` or `short` (instead was `{arg}`)"
@@ -1863,7 +1855,7 @@ pub fn parse_error_format(
             }
         }
     } else {
-        ErrorOutputType::HumanReadable(HumanReadableErrorType::Default, color)
+        ErrorOutputType::HumanReadable { color_config, .. }
     };
 
     match error_format {
@@ -1918,7 +1910,7 @@ fn check_error_format_stability(
     }
     let format = match format {
         ErrorOutputType::Json { pretty: true, .. } => "pretty-json",
-        ErrorOutputType::HumanReadable(format, _) => match format {
+        ErrorOutputType::HumanReadable { kind, .. } => match kind {
             HumanReadableErrorType::AnnotateSnippet => "human-annotate-rs",
             HumanReadableErrorType::Unicode => "human-unicode",
             _ => return,
diff --git a/compiler/rustc_session/src/lib.rs b/compiler/rustc_session/src/lib.rs
index 112adde3740..d432e84fdb2 100644
--- a/compiler/rustc_session/src/lib.rs
+++ b/compiler/rustc_session/src/lib.rs
@@ -1,5 +1,6 @@
 // tidy-alphabetical-start
 #![allow(internal_features)]
+#![feature(default_field_values)]
 #![feature(iter_intersperse)]
 #![feature(let_chains)]
 #![feature(rustc_attrs)]
diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs
index ecdf76d22fb..aa1e9762f39 100644
--- a/compiler/rustc_session/src/session.rs
+++ b/compiler/rustc_session/src/session.rs
@@ -913,7 +913,7 @@ fn default_emitter(
     let source_map = if sopts.unstable_opts.link_only { None } else { Some(source_map) };
 
     match sopts.error_format {
-        config::ErrorOutputType::HumanReadable(kind, color_config) => {
+        config::ErrorOutputType::HumanReadable { kind, color_config } => {
             let short = kind.short();
 
             if let HumanReadableErrorType::AnnotateSnippet = kind {
@@ -1430,7 +1430,7 @@ fn mk_emitter(output: ErrorOutputType) -> Box<DynEmitter> {
     let fallback_bundle =
         fallback_fluent_bundle(vec![rustc_errors::DEFAULT_LOCALE_RESOURCE], false);
     let emitter: Box<DynEmitter> = match output {
-        config::ErrorOutputType::HumanReadable(kind, color_config) => {
+        config::ErrorOutputType::HumanReadable { kind, color_config } => {
             let short = kind.short();
             Box::new(
                 HumanEmitter::new(stderr_destination(color_config), fallback_bundle)
diff --git a/library/alloc/benches/slice.rs b/library/alloc/benches/slice.rs
index c6b46e6a2a1..27b0e6fac0a 100644
--- a/library/alloc/benches/slice.rs
+++ b/library/alloc/benches/slice.rs
@@ -1,4 +1,4 @@
-use std::{mem, ptr};
+use std::ptr;
 
 use rand::Rng;
 use rand::distr::{Alphanumeric, SampleString, StandardUniform};
@@ -234,7 +234,7 @@ macro_rules! sort {
         fn $name(b: &mut Bencher) {
             let v = $gen($len);
             b.iter(|| v.clone().$f());
-            b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
+            b.bytes = $len * size_of_val(&$gen(1)[0]) as u64;
         }
     };
 }
@@ -246,7 +246,7 @@ macro_rules! sort_strings {
             let v = $gen($len);
             let v = v.iter().map(|s| &**s).collect::<Vec<&str>>();
             b.iter(|| v.clone().$f());
-            b.bytes = $len * mem::size_of::<&str>() as u64;
+            b.bytes = $len * size_of::<&str>() as u64;
         }
     };
 }
@@ -268,7 +268,7 @@ macro_rules! sort_expensive {
                 });
                 black_box(count);
             });
-            b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
+            b.bytes = $len * size_of_val(&$gen(1)[0]) as u64;
         }
     };
 }
@@ -279,7 +279,7 @@ macro_rules! sort_lexicographic {
         fn $name(b: &mut Bencher) {
             let v = $gen($len);
             b.iter(|| v.clone().$f(|x| x.to_string()));
-            b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
+            b.bytes = $len * size_of_val(&$gen(1)[0]) as u64;
         }
     };
 }
@@ -322,7 +322,7 @@ macro_rules! reverse {
         fn $name(b: &mut Bencher) {
             // odd length and offset by 1 to be as unaligned as possible
             let n = 0xFFFFF;
-            let mut v: Vec<_> = (0..1 + (n / mem::size_of::<$ty>() as u64)).map($f).collect();
+            let mut v: Vec<_> = (0..1 + (n / size_of::<$ty>() as u64)).map($f).collect();
             b.iter(|| black_box(&mut v[1..]).reverse());
             b.bytes = n;
         }
@@ -346,7 +346,7 @@ macro_rules! rotate {
     ($name:ident, $gen:expr, $len:expr, $mid:expr) => {
         #[bench]
         fn $name(b: &mut Bencher) {
-            let size = mem::size_of_val(&$gen(1)[0]);
+            let size = size_of_val(&$gen(1)[0]);
             let mut v = $gen($len * 8 / size);
             b.iter(|| black_box(&mut v).rotate_left(($mid * 8 + size - 1) / size));
             b.bytes = (v.len() * size) as u64;
diff --git a/library/alloc/benches/vec.rs b/library/alloc/benches/vec.rs
index a725ad6894b..1dab71fa1f4 100644
--- a/library/alloc/benches/vec.rs
+++ b/library/alloc/benches/vec.rs
@@ -669,7 +669,7 @@ fn random_sorted_fill(mut seed: u32, buf: &mut [u32]) {
 // This algorithm was used for Vecs prior to Rust 1.52.
 fn bench_dedup_slice_truncate(b: &mut Bencher, sz: usize) {
     let mut template = vec![0u32; sz];
-    b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
+    b.bytes = size_of_val(template.as_slice()) as u64;
     random_sorted_fill(0x43, &mut template);
 
     let mut vec = template.clone();
@@ -691,7 +691,7 @@ fn bench_dedup_slice_truncate(b: &mut Bencher, sz: usize) {
 // Measures performance of Vec::dedup on random data.
 fn bench_vec_dedup_random(b: &mut Bencher, sz: usize) {
     let mut template = vec![0u32; sz];
-    b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
+    b.bytes = size_of_val(template.as_slice()) as u64;
     random_sorted_fill(0x43, &mut template);
 
     let mut vec = template.clone();
@@ -708,7 +708,7 @@ fn bench_vec_dedup_random(b: &mut Bencher, sz: usize) {
 // Measures performance of Vec::dedup when there is no items removed
 fn bench_vec_dedup_none(b: &mut Bencher, sz: usize) {
     let mut template = vec![0u32; sz];
-    b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
+    b.bytes = size_of_val(template.as_slice()) as u64;
     template.chunks_exact_mut(2).for_each(|w| {
         w[0] = black_box(0);
         w[1] = black_box(5);
@@ -729,7 +729,7 @@ fn bench_vec_dedup_none(b: &mut Bencher, sz: usize) {
 // Measures performance of Vec::dedup when there is all items removed
 fn bench_vec_dedup_all(b: &mut Bencher, sz: usize) {
     let mut template = vec![0u32; sz];
-    b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
+    b.bytes = size_of_val(template.as_slice()) as u64;
     template.iter_mut().for_each(|w| {
         *w = black_box(0);
     });
diff --git a/library/alloc/src/boxed/convert.rs b/library/alloc/src/boxed/convert.rs
index 255cefb1e78..80626580202 100644
--- a/library/alloc/src/boxed/convert.rs
+++ b/library/alloc/src/boxed/convert.rs
@@ -529,7 +529,6 @@ impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
     /// ```
     /// use std::error::Error;
     /// use std::fmt;
-    /// use std::mem;
     ///
     /// #[derive(Debug)]
     /// struct AnError;
@@ -543,9 +542,9 @@ impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
     /// impl Error for AnError {}
     ///
     /// let an_error = AnError;
-    /// assert!(0 == mem::size_of_val(&an_error));
+    /// assert!(0 == size_of_val(&an_error));
     /// let a_boxed_error = Box::<dyn Error>::from(an_error);
-    /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+    /// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error))
     /// ```
     fn from(err: E) -> Box<dyn Error + 'a> {
         Box::new(err)
@@ -563,7 +562,6 @@ impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync +
     /// ```
     /// use std::error::Error;
     /// use std::fmt;
-    /// use std::mem;
     ///
     /// #[derive(Debug)]
     /// struct AnError;
@@ -581,10 +579,10 @@ impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync +
     /// unsafe impl Sync for AnError {}
     ///
     /// let an_error = AnError;
-    /// assert!(0 == mem::size_of_val(&an_error));
+    /// assert!(0 == size_of_val(&an_error));
     /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error);
     /// assert!(
-    ///     mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+    ///     size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error))
     /// ```
     fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> {
         Box::new(err)
@@ -600,12 +598,11 @@ impl<'a> From<String> for Box<dyn Error + Send + Sync + 'a> {
     ///
     /// ```
     /// use std::error::Error;
-    /// use std::mem;
     ///
     /// let a_string_error = "a string error".to_string();
     /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error);
     /// assert!(
-    ///     mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+    ///     size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error))
     /// ```
     #[inline]
     fn from(err: String) -> Box<dyn Error + Send + Sync + 'a> {
@@ -644,11 +641,10 @@ impl<'a> From<String> for Box<dyn Error + 'a> {
     ///
     /// ```
     /// use std::error::Error;
-    /// use std::mem;
     ///
     /// let a_string_error = "a string error".to_string();
     /// let a_boxed_error = Box::<dyn Error>::from(a_string_error);
-    /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+    /// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error))
     /// ```
     fn from(str_err: String) -> Box<dyn Error + 'a> {
         let err1: Box<dyn Error + Send + Sync> = From::from(str_err);
@@ -668,12 +664,11 @@ impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> {
     ///
     /// ```
     /// use std::error::Error;
-    /// use std::mem;
     ///
     /// let a_str_error = "a str error";
     /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error);
     /// assert!(
-    ///     mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+    ///     size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error))
     /// ```
     #[inline]
     fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> {
@@ -692,11 +687,10 @@ impl<'a> From<&str> for Box<dyn Error + 'a> {
     ///
     /// ```
     /// use std::error::Error;
-    /// use std::mem;
     ///
     /// let a_str_error = "a str error";
     /// let a_boxed_error = Box::<dyn Error>::from(a_str_error);
-    /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+    /// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error))
     /// ```
     fn from(err: &str) -> Box<dyn Error + 'a> {
         From::from(String::from(err))
@@ -712,13 +706,12 @@ impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> {
     ///
     /// ```
     /// use std::error::Error;
-    /// use std::mem;
     /// use std::borrow::Cow;
     ///
     /// let a_cow_str_error = Cow::from("a str error");
     /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error);
     /// assert!(
-    ///     mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+    ///     size_of::<Box<dyn Error + Send + Sync>>() == size_of_val(&a_boxed_error))
     /// ```
     fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> {
         From::from(String::from(err))
@@ -734,12 +727,11 @@ impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + 'a> {
     ///
     /// ```
     /// use std::error::Error;
-    /// use std::mem;
     /// use std::borrow::Cow;
     ///
     /// let a_cow_str_error = Cow::from("a str error");
     /// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error);
-    /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+    /// assert!(size_of::<Box<dyn Error>>() == size_of_val(&a_boxed_error))
     /// ```
     fn from(err: Cow<'b, str>) -> Box<dyn Error + 'a> {
         From::from(String::from(err))
diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs
index 78e5aec09b1..21425b9846e 100644
--- a/library/alloc/src/boxed/thin.rs
+++ b/library/alloc/src/boxed/thin.rs
@@ -9,9 +9,8 @@ use core::intrinsics::const_allocate;
 use core::marker::PhantomData;
 #[cfg(not(no_global_oom_handling))]
 use core::marker::Unsize;
-use core::mem;
 #[cfg(not(no_global_oom_handling))]
-use core::mem::SizedTypeProperties;
+use core::mem::{self, SizedTypeProperties};
 use core::ops::{Deref, DerefMut};
 use core::ptr::{self, NonNull, Pointee};
 
@@ -30,7 +29,6 @@ use crate::alloc::{self, Layout, LayoutError};
 /// let five = ThinBox::new(5);
 /// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]);
 ///
-/// use std::mem::{size_of, size_of_val};
 /// let size_of_ptr = size_of::<*const ()>();
 /// assert_eq!(size_of_ptr, size_of_val(&five));
 /// assert_eq!(size_of_ptr, size_of_val(&thin_slice));
@@ -114,7 +112,7 @@ impl<Dyn: ?Sized> ThinBox<Dyn> {
     where
         T: Unsize<Dyn>,
     {
-        if mem::size_of::<T>() == 0 {
+        if size_of::<T>() == 0 {
             let ptr = WithOpaqueHeader::new_unsize_zst::<Dyn, T>(value);
             ThinBox { ptr, _marker: PhantomData }
         } else {
@@ -283,9 +281,7 @@ impl<H> WithHeader<H> {
             let ptr = if layout.size() == 0 {
                 // Some paranoia checking, mostly so that the ThinBox tests are
                 // more able to catch issues.
-                debug_assert!(
-                    value_offset == 0 && mem::size_of::<T>() == 0 && mem::size_of::<H>() == 0
-                );
+                debug_assert!(value_offset == 0 && size_of::<T>() == 0 && size_of::<H>() == 0);
                 layout.dangling()
             } else {
                 let ptr = alloc::alloc(layout);
@@ -315,7 +311,7 @@ impl<H> WithHeader<H> {
         Dyn: Pointee<Metadata = H> + ?Sized,
         T: Unsize<Dyn>,
     {
-        assert!(mem::size_of::<T>() == 0);
+        assert!(size_of::<T>() == 0);
 
         const fn max(a: usize, b: usize) -> usize {
             if a > b { a } else { b }
@@ -329,18 +325,16 @@ impl<H> WithHeader<H> {
             // FIXME: just call `WithHeader::alloc_layout` with size reset to 0.
             // Currently that's blocked on `Layout::extend` not being `const fn`.
 
-            let alloc_align =
-                max(mem::align_of::<T>(), mem::align_of::<<Dyn as Pointee>::Metadata>());
+            let alloc_align = max(align_of::<T>(), align_of::<<Dyn as Pointee>::Metadata>());
 
-            let alloc_size =
-                max(mem::align_of::<T>(), mem::size_of::<<Dyn as Pointee>::Metadata>());
+            let alloc_size = max(align_of::<T>(), size_of::<<Dyn as Pointee>::Metadata>());
 
             unsafe {
                 // SAFETY: align is power of two because it is the maximum of two alignments.
                 let alloc: *mut u8 = const_allocate(alloc_size, alloc_align);
 
                 let metadata_offset =
-                    alloc_size.checked_sub(mem::size_of::<<Dyn as Pointee>::Metadata>()).unwrap();
+                    alloc_size.checked_sub(size_of::<<Dyn as Pointee>::Metadata>()).unwrap();
                 // SAFETY: adding offset within the allocation.
                 let metadata_ptr: *mut <Dyn as Pointee>::Metadata =
                     alloc.add(metadata_offset).cast();
@@ -421,7 +415,7 @@ impl<H> WithHeader<H> {
     }
 
     const fn header_size() -> usize {
-        mem::size_of::<H>()
+        size_of::<H>()
     }
 
     fn alloc_layout(value_layout: Layout) -> Result<(Layout, usize), LayoutError> {
diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs
index ecd009f11c7..7d1a2ea4809 100644
--- a/library/alloc/src/collections/btree/node/tests.rs
+++ b/library/alloc/src/collections/btree/node/tests.rs
@@ -92,8 +92,8 @@ fn test_partial_eq() {
 #[cfg(target_arch = "x86_64")]
 #[cfg_attr(any(miri, randomized_layouts), ignore)] // We'd like to run Miri with layout randomization
 fn test_sizes() {
-    assert_eq!(core::mem::size_of::<LeafNode<(), ()>>(), 16);
-    assert_eq!(core::mem::size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8);
-    assert_eq!(core::mem::size_of::<InternalNode<(), ()>>(), 16 + (CAPACITY + 1) * 8);
-    assert_eq!(core::mem::size_of::<InternalNode<i64, i64>>(), 16 + (CAPACITY * 3 + 1) * 8);
+    assert_eq!(size_of::<LeafNode<(), ()>>(), 16);
+    assert_eq!(size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8);
+    assert_eq!(size_of::<InternalNode<(), ()>>(), 16 + (CAPACITY + 1) * 8);
+    assert_eq!(size_of::<InternalNode<i64, i64>>(), 16 + (CAPACITY * 3 + 1) * 8);
 }
diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs
index b80d1fc7889..70f32fbaab4 100644
--- a/library/alloc/src/raw_vec.rs
+++ b/library/alloc/src/raw_vec.rs
@@ -480,7 +480,7 @@ impl<A: Allocator> RawVecInner<A> {
 
         // Allocators currently return a `NonNull<[u8]>` whose length
         // matches the size requested. If that ever changes, the capacity
-        // here should change to `ptr.len() / mem::size_of::<T>()`.
+        // here should change to `ptr.len() / size_of::<T>()`.
         Ok(Self {
             ptr: Unique::from(ptr.cast()),
             cap: unsafe { Cap::new_unchecked(capacity) },
@@ -627,7 +627,7 @@ impl<A: Allocator> RawVecInner<A> {
     unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
         // Allocators currently return a `NonNull<[u8]>` whose length matches
         // the size requested. If that ever changes, the capacity here should
-        // change to `ptr.len() / mem::size_of::<T>()`.
+        // change to `ptr.len() / size_of::<T>()`.
         self.ptr = Unique::from(ptr.cast());
         self.cap = unsafe { Cap::new_unchecked(cap) };
     }
diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs
index d78ded104fb..700fa922739 100644
--- a/library/alloc/src/raw_vec/tests.rs
+++ b/library/alloc/src/raw_vec/tests.rs
@@ -1,4 +1,3 @@
-use core::mem::size_of;
 use std::cell::Cell;
 
 use super::*;
@@ -93,7 +92,7 @@ fn zst_sanity<T>(v: &RawVec<T>) {
 fn zst() {
     let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into());
 
-    assert_eq!(std::mem::size_of::<ZST>(), 0);
+    assert_eq!(size_of::<ZST>(), 0);
 
     // All these different ways of creating the RawVec produce the same thing.
 
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index dcd95ddf00f..8baf9685062 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -16,7 +16,7 @@ use core::borrow::{Borrow, BorrowMut};
 #[cfg(not(no_global_oom_handling))]
 use core::cmp::Ordering::{self, Less};
 #[cfg(not(no_global_oom_handling))]
-use core::mem::{self, MaybeUninit};
+use core::mem::MaybeUninit;
 #[cfg(not(no_global_oom_handling))]
 use core::ptr;
 #[unstable(feature = "array_chunks", issue = "74985")]
@@ -446,7 +446,7 @@ impl<T> [T] {
         // Avoids binary-size usage in cases where the alignment doesn't work out to make this
         // beneficial or on 32-bit platforms.
         let is_using_u32_as_idx_type_helpful =
-            const { mem::size_of::<(K, u32)>() < mem::size_of::<(K, usize)>() };
+            const { size_of::<(K, u32)>() < size_of::<(K, usize)>() };
 
         // It's possible to instantiate this for u8 and u16 but, doing so is very wasteful in terms
         // of compile-times and binary-size, the peak saved heap memory for u16 is (u8 + u16) -> 4
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index fc4b93ccf8c..2c034786549 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -119,8 +119,6 @@ use crate::vec::{self, Vec};
 /// the same `char`s:
 ///
 /// ```
-/// use std::mem;
-///
 /// // `s` is ASCII which represents each `char` as one byte
 /// let s = "hello";
 /// assert_eq!(s.len(), 5);
@@ -128,7 +126,7 @@ use crate::vec::{self, Vec};
 /// // A `char` array with the same contents would be longer because
 /// // every `char` is four bytes
 /// let s = ['h', 'e', 'l', 'l', 'o'];
-/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum();
+/// let size: usize = s.into_iter().map(|c| size_of_val(&c)).sum();
 /// assert_eq!(size, 20);
 ///
 /// // However, for non-ASCII strings, the difference will be smaller
@@ -137,7 +135,7 @@ use crate::vec::{self, Vec};
 /// assert_eq!(s.len(), 20);
 ///
 /// let s = ['💖', '💖', '💖', '💖', '💖'];
-/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum();
+/// let size: usize = s.into_iter().map(|c| size_of_val(&c)).sum();
 /// assert_eq!(size, 20);
 /// ```
 ///
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index dba1449347a..1956dda5388 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -2274,7 +2274,7 @@ impl<T: ?Sized + CloneToUninit, A: Allocator + Clone> Arc<T, A> {
     #[inline]
     #[stable(feature = "arc_unique", since = "1.4.0")]
     pub fn make_mut(this: &mut Self) -> &mut T {
-        let size_of_val = mem::size_of_val::<T>(&**this);
+        let size_of_val = size_of_val::<T>(&**this);
 
         // Note that we hold both a strong reference and a weak reference.
         // Thus, releasing our strong reference only will not, by itself, cause
@@ -3544,7 +3544,7 @@ impl<T> Default for Arc<[T]> {
     /// This may or may not share an allocation with other Arcs.
     #[inline]
     fn default() -> Self {
-        if mem::align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
+        if align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
             // We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so
             // we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows.
             // (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.)
diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs
index dffd85f13aa..b98a118048f 100644
--- a/library/alloc/src/vec/in_place_collect.rs
+++ b/library/alloc/src/vec/in_place_collect.rs
@@ -171,7 +171,7 @@ const fn in_place_collectible<DEST, SRC>(
 ) -> bool {
     // Require matching alignments because an alignment-changing realloc is inefficient on many
     // system allocators and better implementations would require the unstable Allocator trait.
-    if const { SRC::IS_ZST || DEST::IS_ZST || mem::align_of::<SRC>() != mem::align_of::<DEST>() } {
+    if const { SRC::IS_ZST || DEST::IS_ZST || align_of::<SRC>() != align_of::<DEST>() } {
         return false;
     }
 
@@ -181,7 +181,7 @@ const fn in_place_collectible<DEST, SRC>(
             // e.g.
             // - 1 x [u8; 4] -> 4x u8, via flatten
             // - 4 x u8 -> 1x [u8; 4], via array_chunks
-            mem::size_of::<SRC>() * step_merge.get() >= mem::size_of::<DEST>() * step_expand.get()
+            size_of::<SRC>() * step_merge.get() >= size_of::<DEST>() * step_expand.get()
         }
         // Fall back to other from_iter impls if an overflow occurred in the step merge/expansion
         // tracking.
@@ -190,7 +190,7 @@ const fn in_place_collectible<DEST, SRC>(
 }
 
 const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool {
-    if const { mem::align_of::<SRC>() != mem::align_of::<DEST>() } {
+    if const { align_of::<SRC>() != align_of::<DEST>() } {
         // FIXME(const-hack): use unreachable! once that works in const
         panic!("in_place_collectible() prevents this");
     }
@@ -199,8 +199,8 @@ const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool {
     // the caller will have calculated a `dst_cap` that is an integer multiple of
     // `src_cap` without remainder.
     if const {
-        let src_sz = mem::size_of::<SRC>();
-        let dest_sz = mem::size_of::<DEST>();
+        let src_sz = size_of::<SRC>();
+        let dest_sz = size_of::<DEST>();
         dest_sz != 0 && src_sz % dest_sz == 0
     } {
         return false;
@@ -208,7 +208,7 @@ const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool {
 
     // type layouts don't guarantee a fit, so do a runtime check to see if
     // the allocations happen to match
-    src_cap > 0 && src_cap * mem::size_of::<SRC>() != dst_cap * mem::size_of::<DEST>()
+    src_cap > 0 && src_cap * size_of::<SRC>() != dst_cap * size_of::<DEST>()
 }
 
 /// This provides a shorthand for the source type since local type aliases aren't a thing.
@@ -262,7 +262,7 @@ where
             inner.buf.cast::<T>(),
             inner.end as *const T,
             // SAFETY: the multiplication can not overflow, since `inner.cap * size_of::<I::SRC>()` is the size of the allocation.
-            inner.cap.unchecked_mul(mem::size_of::<I::Src>()) / mem::size_of::<T>(),
+            inner.cap.unchecked_mul(size_of::<I::Src>()) / size_of::<T>(),
         )
     };
 
@@ -310,14 +310,14 @@ where
         debug_assert_ne!(dst_cap, 0);
         unsafe {
             // The old allocation exists, therefore it must have a valid layout.
-            let src_align = mem::align_of::<I::Src>();
-            let src_size = mem::size_of::<I::Src>().unchecked_mul(src_cap);
+            let src_align = align_of::<I::Src>();
+            let src_size = size_of::<I::Src>().unchecked_mul(src_cap);
             let old_layout = Layout::from_size_align_unchecked(src_size, src_align);
 
             // The allocation must be equal or smaller for in-place iteration to be possible
             // therefore the new layout must be ≤ the old one and therefore valid.
-            let dst_align = mem::align_of::<T>();
-            let dst_size = mem::size_of::<T>().unchecked_mul(dst_cap);
+            let dst_align = align_of::<T>();
+            let dst_size = size_of::<T>().unchecked_mul(dst_cap);
             let new_layout = Layout::from_size_align_unchecked(dst_size, dst_align);
 
             let result = alloc.shrink(dst_buf.cast(), old_layout, new_layout);
@@ -325,7 +325,7 @@ where
             dst_buf = reallocated.cast::<T>();
         }
     } else {
-        debug_assert_eq!(src_cap * mem::size_of::<I::Src>(), dst_cap * mem::size_of::<T>());
+        debug_assert_eq!(src_cap * size_of::<I::Src>(), dst_cap * size_of::<T>());
     }
 
     mem::forget(dst_guard);
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index 701144cc3af..49878f2b6fa 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -293,7 +293,7 @@ mod spec_extend;
 /// on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized
 /// types inside a `Vec`, it will not allocate space for them. *Note that in this case
 /// the `Vec` might not report a [`capacity`] of 0*. `Vec` will allocate if and only
-/// if <code>[mem::size_of::\<T>]\() * [capacity]\() > 0</code>. In general, `Vec`'s allocation
+/// if <code>[size_of::\<T>]\() * [capacity]\() > 0</code>. In general, `Vec`'s allocation
 /// details are very subtle --- if you intend to allocate memory using a `Vec`
 /// and use it for something else (either to pass to unsafe code, or to build your
 /// own memory-backed collection), be sure to deallocate this memory by using
@@ -393,7 +393,7 @@ mod spec_extend;
 /// [capacity]: Vec::capacity
 /// [`capacity`]: Vec::capacity
 /// [`Vec::capacity`]: Vec::capacity
-/// [mem::size_of::\<T>]: core::mem::size_of
+/// [size_of::\<T>]: size_of
 /// [len]: Vec::len
 /// [`len`]: Vec::len
 /// [`push`]: Vec::push
@@ -1573,7 +1573,7 @@ impl<T, A: Allocator> Vec<T, A> {
     pub const fn as_slice(&self) -> &[T] {
         // SAFETY: `slice::from_raw_parts` requires pointee is a contiguous, aligned buffer of size
         // `len` containing properly-initialized `T`s. Data must not be mutated for the returned
-        // lifetime. Further, `len * mem::size_of::<T>` <= `ISIZE::MAX`, and allocation does not
+        // lifetime. Further, `len * size_of::<T>` <= `isize::MAX`, and allocation does not
         // "wrap" through overflowing memory addresses.
         //
         // * Vec API guarantees that self.buf:
@@ -1605,7 +1605,7 @@ impl<T, A: Allocator> Vec<T, A> {
     pub const fn as_mut_slice(&mut self) -> &mut [T] {
         // SAFETY: `slice::from_raw_parts_mut` requires pointee is a contiguous, aligned buffer of
         // size `len` containing properly-initialized `T`s. Data must not be accessed through any
-        // other pointer for the returned lifetime. Further, `len * mem::size_of::<T>` <=
+        // other pointer for the returned lifetime. Further, `len * size_of::<T>` <=
         // `ISIZE::MAX` and allocation does not "wrap" through overflowing memory addresses.
         //
         // * Vec API guarantees that self.buf:
@@ -2693,7 +2693,7 @@ impl<T, A: Allocator> Vec<T, A> {
         let len = self.len;
 
         // SAFETY: The maximum capacity of `Vec<T>` is `isize::MAX` bytes, so the maximum value can
-        // be returned is `usize::checked_div(mem::size_of::<T>()).unwrap_or(usize::MAX)`, which
+        // be returned is `usize::checked_div(size_of::<T>()).unwrap_or(usize::MAX)`, which
         // matches the definition of `T::MAX_SLICE_LEN`.
         unsafe { intrinsics::assume(len <= T::MAX_SLICE_LEN) };
 
diff --git a/library/alloc/tests/arc.rs b/library/alloc/tests/arc.rs
index a259c0131ec..0baa50f439b 100644
--- a/library/alloc/tests/arc.rs
+++ b/library/alloc/tests/arc.rs
@@ -1,7 +1,6 @@
 use std::any::Any;
 use std::cell::{Cell, RefCell};
 use std::iter::TrustedLen;
-use std::mem;
 use std::sync::{Arc, Weak};
 
 #[test]
@@ -129,7 +128,7 @@ fn shared_from_iter_trustedlen_normal() {
         let vec = iter.clone().collect::<Vec<_>>();
         let rc = iter.collect::<Rc<[_]>>();
         assert_eq!(&*vec, &*rc);
-        assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc));
+        assert_eq!(size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, size_of_val(&*rc));
 
         // Clone a bit and let these get dropped.
         {
@@ -145,7 +144,7 @@ fn shared_from_iter_trustedlen_normal() {
         let vec = iter.clone().collect::<Vec<_>>();
         let rc = iter.collect::<Rc<[_]>>();
         assert_eq!(&*vec, &*rc);
-        assert_eq!(0, mem::size_of_val(&*rc));
+        assert_eq!(0, size_of_val(&*rc));
         {
             let _rc_2 = rc.clone();
             let _rc_3 = rc.clone();
diff --git a/library/alloc/tests/rc.rs b/library/alloc/tests/rc.rs
index 451765d7242..9d82a7621a2 100644
--- a/library/alloc/tests/rc.rs
+++ b/library/alloc/tests/rc.rs
@@ -1,7 +1,6 @@
 use std::any::Any;
 use std::cell::{Cell, RefCell};
 use std::iter::TrustedLen;
-use std::mem;
 use std::rc::{Rc, Weak};
 
 #[test]
@@ -125,7 +124,7 @@ fn shared_from_iter_trustedlen_normal() {
         let vec = iter.clone().collect::<Vec<_>>();
         let rc = iter.collect::<Rc<[_]>>();
         assert_eq!(&*vec, &*rc);
-        assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc));
+        assert_eq!(size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, size_of_val(&*rc));
 
         // Clone a bit and let these get dropped.
         {
@@ -141,7 +140,7 @@ fn shared_from_iter_trustedlen_normal() {
         let vec = iter.clone().collect::<Vec<_>>();
         let rc = iter.collect::<Rc<[_]>>();
         assert_eq!(&*vec, &*rc);
-        assert_eq!(0, mem::size_of_val(&*rc));
+        assert_eq!(0, size_of_val(&*rc));
         {
             let _rc_2 = rc.clone();
             let _rc_3 = rc.clone();
diff --git a/library/alloc/tests/slice.rs b/library/alloc/tests/slice.rs
index f990a41b679..2516563187f 100644
--- a/library/alloc/tests/slice.rs
+++ b/library/alloc/tests/slice.rs
@@ -1,7 +1,7 @@
 use std::cmp::Ordering::{Equal, Greater, Less};
 use std::convert::identity;
 use std::rc::Rc;
-use std::{fmt, mem, panic};
+use std::{fmt, panic};
 
 fn square(n: usize) -> usize {
     n * n
@@ -73,7 +73,7 @@ fn test_len_divzero() {
     let v0: &[Z] = &[];
     let v1: &[Z] = &[[]];
     let v2: &[Z] = &[[], []];
-    assert_eq!(mem::size_of::<Z>(), 0);
+    assert_eq!(size_of::<Z>(), 0);
     assert_eq!(v0.len(), 0);
     assert_eq!(v1.len(), 1);
     assert_eq!(v2.len(), 2);
diff --git a/library/alloc/tests/sort/known_good_stable_sort.rs b/library/alloc/tests/sort/known_good_stable_sort.rs
index f8615435fc2..2df89146253 100644
--- a/library/alloc/tests/sort/known_good_stable_sort.rs
+++ b/library/alloc/tests/sort/known_good_stable_sort.rs
@@ -5,7 +5,7 @@
 // Based on https://github.com/voultapher/tiny-sort-rs.
 
 use alloc::alloc::{Layout, alloc, dealloc};
-use std::{mem, ptr};
+use std::ptr;
 
 /// Sort `v` preserving initial order of equal elements.
 ///
@@ -26,7 +26,7 @@ pub fn sort<T: Ord>(v: &mut [T]) {
 
 #[inline(always)]
 fn stable_sort<T, F: FnMut(&T, &T) -> bool>(v: &mut [T], mut is_less: F) {
-    if mem::size_of::<T>() == 0 {
+    if size_of::<T>() == 0 {
         return;
     }
 
@@ -166,7 +166,7 @@ struct BufGuard<T> {
 impl<T> BufGuard<T> {
     // SAFETY: The caller has to ensure that len is not 0 and that T is not a ZST.
     unsafe fn new(len: usize) -> Self {
-        debug_assert!(len > 0 && mem::size_of::<T>() > 0);
+        debug_assert!(len > 0 && size_of::<T>() > 0);
 
         // SAFETY: See function safety description.
         let layout = unsafe { unwrap_unchecked(Layout::array::<T>(len).ok()) };
diff --git a/library/alloc/tests/thin_box.rs b/library/alloc/tests/thin_box.rs
index e008b0cc357..4c46b614127 100644
--- a/library/alloc/tests/thin_box.rs
+++ b/library/alloc/tests/thin_box.rs
@@ -1,5 +1,4 @@
 use core::fmt::Debug;
-use core::mem::size_of;
 use std::boxed::ThinBox;
 
 #[test]
@@ -52,7 +51,7 @@ fn verify_aligned<T>(ptr: *const T) {
         ptr.is_aligned() && !ptr.is_null(),
         "misaligned ThinBox data; valid pointers to `{ty}` should be aligned to {align}: {ptr:p}",
         ty = core::any::type_name::<T>(),
-        align = core::mem::align_of::<T>(),
+        align = align_of::<T>(),
     );
 }
 
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index fe1db56414e..f430d979fa8 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -11,14 +11,14 @@ use std::borrow::Cow;
 use std::cell::Cell;
 use std::collections::TryReserveErrorKind::*;
 use std::fmt::Debug;
+use std::hint;
 use std::iter::InPlaceIterable;
-use std::mem::{size_of, swap};
+use std::mem::swap;
 use std::ops::Bound::*;
 use std::panic::{AssertUnwindSafe, catch_unwind};
 use std::rc::Rc;
 use std::sync::atomic::{AtomicU32, Ordering};
 use std::vec::{Drain, IntoIter};
-use std::{hint, mem};
 
 struct DropCounter<'a> {
     count: &'a mut u32,
@@ -1134,7 +1134,7 @@ fn test_into_iter_zst() {
     impl Drop for AlignedZstWithDrop {
         fn drop(&mut self) {
             let addr = self as *mut _ as usize;
-            assert!(hint::black_box(addr) % mem::align_of::<u64>() == 0);
+            assert!(hint::black_box(addr) % align_of::<u64>() == 0);
         }
     }
 
diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs
index 17f4d68867e..1595a3af883 100644
--- a/library/core/src/alloc/layout.rs
+++ b/library/core/src/alloc/layout.rs
@@ -17,7 +17,7 @@ use crate::{assert_unsafe_precondition, fmt, mem};
 // * https://github.com/rust-lang/rust/pull/72189
 // * https://github.com/rust-lang/rust/pull/79827
 const fn size_align<T>() -> (usize, usize) {
-    (mem::size_of::<T>(), mem::align_of::<T>())
+    (size_of::<T>(), align_of::<T>())
 }
 
 /// Layout of a block of memory.
@@ -182,7 +182,7 @@ impl Layout {
     #[must_use]
     #[inline]
     pub const fn for_value<T: ?Sized>(t: &T) -> Self {
-        let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
+        let (size, align) = (size_of_val(t), align_of_val(t));
         // SAFETY: see rationale in `new` for why this is using the unsafe variant
         unsafe { Layout::from_size_align_unchecked(size, align) }
     }
diff --git a/library/core/src/char/convert.rs b/library/core/src/char/convert.rs
index 73ab4f1e52a..ac808038f89 100644
--- a/library/core/src/char/convert.rs
+++ b/library/core/src/char/convert.rs
@@ -40,11 +40,9 @@ impl From<char> for u32 {
     /// # Examples
     ///
     /// ```
-    /// use std::mem;
-    ///
     /// let c = 'c';
     /// let u = u32::from(c);
-    /// assert!(4 == mem::size_of_val(&u))
+    /// assert!(4 == size_of_val(&u))
     /// ```
     #[inline]
     fn from(c: char) -> Self {
@@ -59,11 +57,9 @@ impl From<char> for u64 {
     /// # Examples
     ///
     /// ```
-    /// use std::mem;
-    ///
     /// let c = '👤';
     /// let u = u64::from(c);
-    /// assert!(8 == mem::size_of_val(&u))
+    /// assert!(8 == size_of_val(&u))
     /// ```
     #[inline]
     fn from(c: char) -> Self {
@@ -80,11 +76,9 @@ impl From<char> for u128 {
     /// # Examples
     ///
     /// ```
-    /// use std::mem;
-    ///
     /// let c = '⚙';
     /// let u = u128::from(c);
-    /// assert!(16 == mem::size_of_val(&u))
+    /// assert!(16 == size_of_val(&u))
     /// ```
     #[inline]
     fn from(c: char) -> Self {
@@ -167,11 +161,9 @@ impl From<u8> for char {
     /// # Examples
     ///
     /// ```
-    /// use std::mem;
-    ///
     /// let u = 32 as u8;
     /// let c = char::from(u);
-    /// assert!(4 == mem::size_of_val(&c))
+    /// assert!(4 == size_of_val(&c))
     /// ```
     #[inline]
     fn from(i: u8) -> Self {
diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs
index 00300328b64..9d64348289c 100644
--- a/library/core/src/clone.rs
+++ b/library/core/src/clone.rs
@@ -244,8 +244,8 @@ pub unsafe trait CloneToUninit {
     ///
     /// Behavior is undefined if any of the following conditions are violated:
     ///
-    /// * `dst` must be [valid] for writes for `std::mem::size_of_val(self)` bytes.
-    /// * `dst` must be properly aligned to `std::mem::align_of_val(self)`.
+    /// * `dst` must be [valid] for writes for `size_of_val(self)` bytes.
+    /// * `dst` must be properly aligned to `align_of_val(self)`.
     ///
     /// [valid]: crate::ptr#safety
     /// [pointer metadata]: crate::ptr::metadata()
diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs
index 7a6630c82d0..f7b874b26bb 100644
--- a/library/core/src/hash/mod.rs
+++ b/library/core/src/hash/mod.rs
@@ -801,7 +801,7 @@ impl<H> Eq for BuildHasherDefault<H> {}
 
 mod impls {
     use super::*;
-    use crate::{mem, slice};
+    use crate::slice;
 
     macro_rules! impl_write {
         ($(($ty:ident, $meth:ident),)*) => {$(
@@ -814,7 +814,7 @@ mod impls {
 
                 #[inline]
                 fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
-                    let newlen = mem::size_of_val(data);
+                    let newlen = size_of_val(data);
                     let ptr = data.as_ptr() as *const u8;
                     // SAFETY: `ptr` is valid and aligned, as this macro is only used
                     // for numeric primitives which have no padding. The new slice only
diff --git a/library/core/src/hash/sip.rs b/library/core/src/hash/sip.rs
index 6ea3241c593..780e522c48e 100644
--- a/library/core/src/hash/sip.rs
+++ b/library/core/src/hash/sip.rs
@@ -3,7 +3,7 @@
 #![allow(deprecated)] // the types in this module are deprecated
 
 use crate::marker::PhantomData;
-use crate::{cmp, mem, ptr};
+use crate::{cmp, ptr};
 
 /// An implementation of SipHash 1-3.
 ///
@@ -99,12 +99,12 @@ macro_rules! compress {
 /// `$i..$i+size_of::<$int_ty>()`, so that must be in-bounds.
 macro_rules! load_int_le {
     ($buf:expr, $i:expr, $int_ty:ident) => {{
-        debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
+        debug_assert!($i + size_of::<$int_ty>() <= $buf.len());
         let mut data = 0 as $int_ty;
         ptr::copy_nonoverlapping(
             $buf.as_ptr().add($i),
             &mut data as *mut _ as *mut u8,
-            mem::size_of::<$int_ty>(),
+            size_of::<$int_ty>(),
         );
         data.to_le()
     }};
diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs
index c5603298a70..6af647b137d 100644
--- a/library/core/src/intrinsics/mod.rs
+++ b/library/core/src/intrinsics/mod.rs
@@ -3340,7 +3340,7 @@ pub unsafe fn vtable_align(_ptr: *const ()) -> usize;
 /// More specifically, this is the offset in bytes between successive
 /// items of the same type, including alignment padding.
 ///
-/// The stabilized version of this intrinsic is [`core::mem::size_of`].
+/// The stabilized version of this intrinsic is [`size_of`].
 #[rustc_nounwind]
 #[unstable(feature = "core_intrinsics", issue = "none")]
 #[rustc_intrinsic_const_stable_indirect]
@@ -3354,7 +3354,7 @@ pub const fn size_of<T>() -> usize;
 /// Therefore, implementations must not require the user to uphold
 /// any safety invariants.
 ///
-/// The stabilized version of this intrinsic is [`core::mem::align_of`].
+/// The stabilized version of this intrinsic is [`align_of`].
 #[rustc_nounwind]
 #[unstable(feature = "core_intrinsics", issue = "none")]
 #[rustc_intrinsic_const_stable_indirect]
@@ -3386,7 +3386,7 @@ pub const fn variant_count<T>() -> usize;
 
 /// The size of the referenced value in bytes.
 ///
-/// The stabilized version of this intrinsic is [`crate::mem::size_of_val`].
+/// The stabilized version of this intrinsic is [`size_of_val`].
 ///
 /// # Safety
 ///
@@ -3399,7 +3399,7 @@ pub const unsafe fn size_of_val<T: ?Sized>(_ptr: *const T) -> usize;
 
 /// The required alignment of the referenced value.
 ///
-/// The stabilized version of this intrinsic is [`core::mem::align_of_val`].
+/// The stabilized version of this intrinsic is [`align_of_val`].
 ///
 /// # Safety
 ///
diff --git a/library/core/src/iter/adapters/map_windows.rs b/library/core/src/iter/adapters/map_windows.rs
index cb13023c85c..a9c07fee2a9 100644
--- a/library/core/src/iter/adapters/map_windows.rs
+++ b/library/core/src/iter/adapters/map_windows.rs
@@ -1,5 +1,5 @@
 use crate::iter::FusedIterator;
-use crate::mem::{self, MaybeUninit};
+use crate::mem::MaybeUninit;
 use crate::{fmt, ptr};
 
 /// An iterator over the mapped windows of another iterator.
@@ -50,7 +50,7 @@ impl<I: Iterator, F, const N: usize> MapWindows<I, F, N> {
         assert!(N != 0, "array in `Iterator::map_windows` must contain more than 0 elements");
 
         // Only ZST arrays' length can be so large.
-        if mem::size_of::<I::Item>() == 0 {
+        if size_of::<I::Item>() == 0 {
             assert!(
                 N.checked_mul(2).is_some(),
                 "array size of `Iterator::map_windows` is too large"
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index e2dd813981d..e234f105b0b 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -405,7 +405,7 @@ marker_impls! {
 ///
 /// [`Vec<T>`]: ../../std/vec/struct.Vec.html
 /// [`String`]: ../../std/string/struct.String.html
-/// [`size_of::<T>`]: crate::mem::size_of
+/// [`size_of::<T>`]: size_of
 /// [impls]: #implementors
 #[stable(feature = "rust1", since = "1.0.0")]
 #[lang = "copy"]
@@ -731,7 +731,6 @@ impl<T: ?Sized> !Sync for *mut T {}
 /// # }
 /// # fn convert_params(_: ParamType) -> usize { 42 }
 /// use std::marker::PhantomData;
-/// use std::mem;
 ///
 /// struct ExternalResource<R> {
 ///    resource_handle: *mut (),
@@ -740,7 +739,7 @@ impl<T: ?Sized> !Sync for *mut T {}
 ///
 /// impl<R: ResType> ExternalResource<R> {
 ///     fn new() -> Self {
-///         let size_of_res = mem::size_of::<R>();
+///         let size_of_res = size_of::<R>();
 ///         Self {
 ///             resource_handle: foreign_lib::new(size_of_res),
 ///             resource_type: PhantomData,
diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs
index 067371c1b58..ce84f105e5c 100644
--- a/library/core/src/mem/maybe_uninit.rs
+++ b/library/core/src/mem/maybe_uninit.rs
@@ -203,7 +203,7 @@ use crate::{fmt, intrinsics, ptr, slice};
 /// `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as `T`:
 ///
 /// ```rust
-/// use std::mem::{MaybeUninit, size_of, align_of};
+/// use std::mem::MaybeUninit;
 /// assert_eq!(size_of::<MaybeUninit<u64>>(), size_of::<u64>());
 /// assert_eq!(align_of::<MaybeUninit<u64>>(), align_of::<u64>());
 /// ```
@@ -215,7 +215,7 @@ use crate::{fmt, intrinsics, ptr, slice};
 /// optimizations, potentially resulting in a larger size:
 ///
 /// ```rust
-/// # use std::mem::{MaybeUninit, size_of};
+/// # use std::mem::MaybeUninit;
 /// assert_eq!(size_of::<Option<bool>>(), 1);
 /// assert_eq!(size_of::<Option<MaybeUninit<bool>>>(), 2);
 /// ```
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index b9bb6d6a13f..caab7a6ddb5 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -226,31 +226,27 @@ pub fn forget_unsized<T: ?Sized>(t: T) {
 /// # Examples
 ///
 /// ```
-/// use std::mem;
-///
 /// // Some primitives
-/// assert_eq!(4, mem::size_of::<i32>());
-/// assert_eq!(8, mem::size_of::<f64>());
-/// assert_eq!(0, mem::size_of::<()>());
+/// assert_eq!(4, size_of::<i32>());
+/// assert_eq!(8, size_of::<f64>());
+/// assert_eq!(0, size_of::<()>());
 ///
 /// // Some arrays
-/// assert_eq!(8, mem::size_of::<[i32; 2]>());
-/// assert_eq!(12, mem::size_of::<[i32; 3]>());
-/// assert_eq!(0, mem::size_of::<[i32; 0]>());
+/// assert_eq!(8, size_of::<[i32; 2]>());
+/// assert_eq!(12, size_of::<[i32; 3]>());
+/// assert_eq!(0, size_of::<[i32; 0]>());
 ///
 ///
 /// // Pointer size equality
-/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<*const i32>());
-/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Box<i32>>());
-/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Option<&i32>>());
-/// assert_eq!(mem::size_of::<Box<i32>>(), mem::size_of::<Option<Box<i32>>>());
+/// assert_eq!(size_of::<&i32>(), size_of::<*const i32>());
+/// assert_eq!(size_of::<&i32>(), size_of::<Box<i32>>());
+/// assert_eq!(size_of::<&i32>(), size_of::<Option<&i32>>());
+/// assert_eq!(size_of::<Box<i32>>(), size_of::<Option<Box<i32>>>());
 /// ```
 ///
 /// Using `#[repr(C)]`.
 ///
 /// ```
-/// use std::mem;
-///
 /// #[repr(C)]
 /// struct FieldStruct {
 ///     first: u8,
@@ -265,13 +261,13 @@ pub fn forget_unsized<T: ?Sized>(t: T) {
 /// // The size of the third field is 1, so add 1 to the size. Size is 5.
 /// // Finally, the alignment of the struct is 2 (because the largest alignment amongst its
 /// // fields is 2), so add 1 to the size for padding. Size is 6.
-/// assert_eq!(6, mem::size_of::<FieldStruct>());
+/// assert_eq!(6, size_of::<FieldStruct>());
 ///
 /// #[repr(C)]
 /// struct TupleStruct(u8, u16, u8);
 ///
 /// // Tuple structs follow the same rules.
-/// assert_eq!(6, mem::size_of::<TupleStruct>());
+/// assert_eq!(6, size_of::<TupleStruct>());
 ///
 /// // Note that reordering the fields can lower the size. We can remove both padding bytes
 /// // by putting `third` before `second`.
@@ -282,7 +278,7 @@ pub fn forget_unsized<T: ?Sized>(t: T) {
 ///     second: u16
 /// }
 ///
-/// assert_eq!(4, mem::size_of::<FieldStructOptimized>());
+/// assert_eq!(4, size_of::<FieldStructOptimized>());
 ///
 /// // Union size is the size of the largest field.
 /// #[repr(C)]
@@ -291,7 +287,7 @@ pub fn forget_unsized<T: ?Sized>(t: T) {
 ///     larger: u16
 /// }
 ///
-/// assert_eq!(2, mem::size_of::<ExampleUnion>());
+/// assert_eq!(2, size_of::<ExampleUnion>());
 /// ```
 ///
 /// [alignment]: align_of
@@ -320,13 +316,11 @@ pub const fn size_of<T>() -> usize {
 /// # Examples
 ///
 /// ```
-/// use std::mem;
-///
-/// assert_eq!(4, mem::size_of_val(&5i32));
+/// assert_eq!(4, size_of_val(&5i32));
 ///
 /// let x: [u8; 13] = [0; 13];
 /// let y: &[u8] = &x;
-/// assert_eq!(13, mem::size_of_val(y));
+/// assert_eq!(13, size_of_val(y));
 /// ```
 ///
 /// [`size_of::<T>()`]: size_of
@@ -381,7 +375,7 @@ pub const fn size_of_val<T: ?Sized>(val: &T) -> usize {
 /// #![feature(layout_for_ptr)]
 /// use std::mem;
 ///
-/// assert_eq!(4, mem::size_of_val(&5i32));
+/// assert_eq!(4, size_of_val(&5i32));
 ///
 /// let x: [u8; 13] = [0; 13];
 /// let y: &[u8] = &x;
@@ -454,9 +448,7 @@ pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize {
 /// # Examples
 ///
 /// ```
-/// use std::mem;
-///
-/// assert_eq!(4, mem::align_of::<i32>());
+/// assert_eq!(4, align_of::<i32>());
 /// ```
 #[inline(always)]
 #[must_use]
@@ -477,9 +469,7 @@ pub const fn align_of<T>() -> usize {
 /// # Examples
 ///
 /// ```
-/// use std::mem;
-///
-/// assert_eq!(4, mem::align_of_val(&5i32));
+/// assert_eq!(4, align_of_val(&5i32));
 /// ```
 #[inline]
 #[must_use]
diff --git a/library/core/src/mem/transmutability.rs b/library/core/src/mem/transmutability.rs
index 7b920d7a777..782b826448a 100644
--- a/library/core/src/mem/transmutability.rs
+++ b/library/core/src/mem/transmutability.rs
@@ -153,7 +153,7 @@ pub struct Assume {
     ///
     /// ```compile_fail,E0277
     /// #![feature(transmutability)]
-    /// use core::mem::{align_of, TransmuteFrom};
+    /// use core::mem::TransmuteFrom;
     ///
     /// assert_eq!(align_of::<[u8; 2]>(), 1);
     /// assert_eq!(align_of::<u16>(), 2);
@@ -172,7 +172,7 @@ pub struct Assume {
     ///
     /// ```rust
     /// #![feature(pointer_is_aligned_to, transmutability)]
-    /// use core::mem::{align_of, Assume, TransmuteFrom};
+    /// use core::mem::{Assume, TransmuteFrom};
     ///
     /// let src: &[u8; 2] = &[0xFF, 0xFF];
     ///
@@ -337,7 +337,7 @@ impl Assume {
     ///     transmutability,
     /// )]
     /// #![allow(incomplete_features)]
-    /// use core::mem::{align_of, Assume, TransmuteFrom};
+    /// use core::mem::{Assume, TransmuteFrom};
     ///
     /// /// Attempts to transmute `src` to `&Dst`.
     /// ///
diff --git a/library/core/src/num/bignum.rs b/library/core/src/num/bignum.rs
index 2a47c89e2ae..40e6eaf075e 100644
--- a/library/core/src/num/bignum.rs
+++ b/library/core/src/num/bignum.rs
@@ -253,12 +253,11 @@ macro_rules! define_bignum {
 
             /// Multiplies itself by `5^e` and returns its own mutable reference.
             pub fn mul_pow5(&mut self, mut e: usize) -> &mut $name {
-                use crate::mem;
                 use crate::num::bignum::SMALL_POW5;
 
                 // There are exactly n trailing zeros on 2^n, and the only relevant digit sizes
                 // are consecutive powers of two, so this is well suited index for the table.
-                let table_index = mem::size_of::<$ty>().trailing_zeros() as usize;
+                let table_index = size_of::<$ty>().trailing_zeros() as usize;
                 let (small_power, small_e) = SMALL_POW5[table_index];
                 let small_power = small_power as $ty;
 
diff --git a/library/core/src/num/dec2flt/fpu.rs b/library/core/src/num/dec2flt/fpu.rs
index daeee1755b0..8aad087ec1b 100644
--- a/library/core/src/num/dec2flt/fpu.rs
+++ b/library/core/src/num/dec2flt/fpu.rs
@@ -22,7 +22,6 @@ pub(super) use fpu_precision::set_precision;
 #[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
 mod fpu_precision {
     use core::arch::asm;
-    use core::mem::size_of;
 
     /// A structure used to preserve the original value of the FPU control word, so that it can be
     /// restored when the structure is dropped.
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
index 7d99aaa1731..a72ca4bcb05 100644
--- a/library/core/src/num/int_macros.rs
+++ b/library/core/src/num/int_macros.rs
@@ -3627,7 +3627,7 @@ macro_rules! int_impl {
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
-        pub const fn to_be_bytes(self) -> [u8; mem::size_of::<Self>()] {
+        pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
             self.to_be().to_ne_bytes()
         }
 
@@ -3647,7 +3647,7 @@ macro_rules! int_impl {
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
-        pub const fn to_le_bytes(self) -> [u8; mem::size_of::<Self>()] {
+        pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
             self.to_le().to_ne_bytes()
         }
 
@@ -3683,7 +3683,7 @@ macro_rules! int_impl {
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
-        pub const fn to_ne_bytes(self) -> [u8; mem::size_of::<Self>()] {
+        pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
             // SAFETY: integers are plain old datatypes so we can always transmute them to
             // arrays of bytes
             unsafe { mem::transmute(self) }
@@ -3705,7 +3705,7 @@ macro_rules! int_impl {
         ///
         /// ```
         #[doc = concat!("fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
-        #[doc = concat!("    let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+        #[doc = concat!("    let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
         ///     *input = rest;
         #[doc = concat!("    ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap())")]
         /// }
@@ -3714,7 +3714,7 @@ macro_rules! int_impl {
         #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
         #[must_use]
         #[inline]
-        pub const fn from_be_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+        pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
             Self::from_be(Self::from_ne_bytes(bytes))
         }
 
@@ -3734,7 +3734,7 @@ macro_rules! int_impl {
         ///
         /// ```
         #[doc = concat!("fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
-        #[doc = concat!("    let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+        #[doc = concat!("    let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
         ///     *input = rest;
         #[doc = concat!("    ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap())")]
         /// }
@@ -3743,7 +3743,7 @@ macro_rules! int_impl {
         #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
         #[must_use]
         #[inline]
-        pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+        pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
             Self::from_le(Self::from_ne_bytes(bytes))
         }
 
@@ -3774,7 +3774,7 @@ macro_rules! int_impl {
         ///
         /// ```
         #[doc = concat!("fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
-        #[doc = concat!("    let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+        #[doc = concat!("    let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
         ///     *input = rest;
         #[doc = concat!("    ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap())")]
         /// }
@@ -3785,7 +3785,7 @@ macro_rules! int_impl {
         // SAFETY: const sound because integers are plain old datatypes so we can always
         // transmute to them
         #[inline]
-        pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+        pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
             // SAFETY: integers are plain old datatypes so we can always transmute to them
             unsafe { mem::transmute(bytes) }
         }
diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs
index 80a38a6013d..151e128cd78 100644
--- a/library/core/src/num/mod.rs
+++ b/library/core/src/num/mod.rs
@@ -1241,7 +1241,7 @@ impl usize {
     /// Returns an `usize` where every byte is equal to `x`.
     #[inline]
     pub(crate) const fn repeat_u8(x: u8) -> usize {
-        usize::from_ne_bytes([x; mem::size_of::<usize>()])
+        usize::from_ne_bytes([x; size_of::<usize>()])
     }
 
     /// Returns an `usize` where every byte pair is equal to `x`.
@@ -1249,7 +1249,7 @@ impl usize {
     pub(crate) const fn repeat_u16(x: u16) -> usize {
         let mut r = 0usize;
         let mut i = 0;
-        while i < mem::size_of::<usize>() {
+        while i < size_of::<usize>() {
             // Use `wrapping_shl` to make it work on targets with 16-bit `usize`
             r = r.wrapping_shl(16) | (x as usize);
             i += 2;
@@ -1330,7 +1330,7 @@ pub enum FpCategory {
 #[inline(always)]
 #[unstable(issue = "none", feature = "std_internals")]
 pub const fn can_not_overflow<T>(radix: u32, is_signed_ty: bool, digits: &[u8]) -> bool {
-    radix <= 16 && digits.len() <= mem::size_of::<T>() * 2 - is_signed_ty as usize
+    radix <= 16 && digits.len() <= size_of::<T>() * 2 - is_signed_ty as usize
 }
 
 #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
index a967b72c4fa..6c9b366d903 100644
--- a/library/core/src/num/nonzero.rs
+++ b/library/core/src/num/nonzero.rs
@@ -86,7 +86,7 @@ impl_zeroable_primitive!(
 /// For example, `Option<NonZero<u32>>` is the same size as `u32`:
 ///
 /// ```
-/// use core::{mem::size_of, num::NonZero};
+/// use core::{num::NonZero};
 ///
 /// assert_eq!(size_of::<Option<NonZero<u32>>>(), size_of::<u32>());
 /// ```
@@ -102,7 +102,6 @@ impl_zeroable_primitive!(
 /// `Option<NonZero<T>>` are guaranteed to have the same size and alignment:
 ///
 /// ```
-/// # use std::mem::{size_of, align_of};
 /// use std::num::NonZero;
 ///
 /// assert_eq!(size_of::<NonZero<u32>>(), size_of::<Option<NonZero<u32>>>());
@@ -500,7 +499,6 @@ macro_rules! nonzero_integer {
         #[doc = concat!("For example, `Option<", stringify!($Ty), ">` is the same size as `", stringify!($Int), "`:")]
         ///
         /// ```rust
-        /// use std::mem::size_of;
         #[doc = concat!("assert_eq!(size_of::<Option<core::num::", stringify!($Ty), ">>(), size_of::<", stringify!($Int), ">());")]
         /// ```
         ///
@@ -516,7 +514,6 @@ macro_rules! nonzero_integer {
         /// are guaranteed to have the same size and alignment:
         ///
         /// ```
-        /// # use std::mem::{size_of, align_of};
         #[doc = concat!("use std::num::", stringify!($Ty), ";")]
         ///
         #[doc = concat!("assert_eq!(size_of::<", stringify!($Ty), ">(), size_of::<Option<", stringify!($Ty), ">>());")]
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs
index 405c71121ca..d8709d51ccc 100644
--- a/library/core/src/num/uint_macros.rs
+++ b/library/core/src/num/uint_macros.rs
@@ -2586,7 +2586,7 @@ macro_rules! uint_impl {
                       without modifying the original"]
         #[inline]
         pub const fn abs_diff(self, other: Self) -> Self {
-            if mem::size_of::<Self>() == 1 {
+            if size_of::<Self>() == 1 {
                 // Trick LLVM into generating the psadbw instruction when SSE2
                 // is available and this function is autovectorized for u8's.
                 (self as i32).wrapping_sub(other as i32).abs() as Self
@@ -3465,7 +3465,7 @@ macro_rules! uint_impl {
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
-        pub const fn to_be_bytes(self) -> [u8; mem::size_of::<Self>()] {
+        pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
             self.to_be().to_ne_bytes()
         }
 
@@ -3485,7 +3485,7 @@ macro_rules! uint_impl {
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
-        pub const fn to_le_bytes(self) -> [u8; mem::size_of::<Self>()] {
+        pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
             self.to_le().to_ne_bytes()
         }
 
@@ -3521,7 +3521,7 @@ macro_rules! uint_impl {
         // SAFETY: const sound because integers are plain old datatypes so we can always
         // transmute them to arrays of bytes
         #[inline]
-        pub const fn to_ne_bytes(self) -> [u8; mem::size_of::<Self>()] {
+        pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
             // SAFETY: integers are plain old datatypes so we can always transmute them to
             // arrays of bytes
             unsafe { mem::transmute(self) }
@@ -3543,7 +3543,7 @@ macro_rules! uint_impl {
         ///
         /// ```
         #[doc = concat!("fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
-        #[doc = concat!("    let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+        #[doc = concat!("    let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
         ///     *input = rest;
         #[doc = concat!("    ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap())")]
         /// }
@@ -3552,7 +3552,7 @@ macro_rules! uint_impl {
         #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
         #[must_use]
         #[inline]
-        pub const fn from_be_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+        pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
             Self::from_be(Self::from_ne_bytes(bytes))
         }
 
@@ -3572,7 +3572,7 @@ macro_rules! uint_impl {
         ///
         /// ```
         #[doc = concat!("fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
-        #[doc = concat!("    let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+        #[doc = concat!("    let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
         ///     *input = rest;
         #[doc = concat!("    ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap())")]
         /// }
@@ -3581,7 +3581,7 @@ macro_rules! uint_impl {
         #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
         #[must_use]
         #[inline]
-        pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+        pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
             Self::from_le(Self::from_ne_bytes(bytes))
         }
 
@@ -3612,7 +3612,7 @@ macro_rules! uint_impl {
         ///
         /// ```
         #[doc = concat!("fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")]
-        #[doc = concat!("    let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")]
+        #[doc = concat!("    let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")]
         ///     *input = rest;
         #[doc = concat!("    ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap())")]
         /// }
@@ -3623,7 +3623,7 @@ macro_rules! uint_impl {
         // SAFETY: const sound because integers are plain old datatypes so we can always
         // transmute to them
         #[inline]
-        pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+        pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
             // SAFETY: integers are plain old datatypes so we can always transmute to them
             unsafe { mem::transmute(bytes) }
         }
diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs
index bbf5939fe1b..89c856fe107 100644
--- a/library/core/src/primitive_docs.rs
+++ b/library/core/src/primitive_docs.rs
@@ -398,12 +398,12 @@ mod prim_never {}
 /// let v = vec!['h', 'e', 'l', 'l', 'o'];
 ///
 /// // five elements times four bytes for each element
-/// assert_eq!(20, v.len() * std::mem::size_of::<char>());
+/// assert_eq!(20, v.len() * size_of::<char>());
 ///
 /// let s = String::from("hello");
 ///
 /// // five elements times one byte per element
-/// assert_eq!(5, s.len() * std::mem::size_of::<u8>());
+/// assert_eq!(5, s.len() * size_of::<u8>());
 /// ```
 ///
 /// [`String`]: ../std/string/struct.String.html
@@ -443,8 +443,8 @@ mod prim_never {}
 /// let s = String::from("love: ❤️");
 /// let v: Vec<char> = s.chars().collect();
 ///
-/// assert_eq!(12, std::mem::size_of_val(&s[..]));
-/// assert_eq!(32, std::mem::size_of_val(&v[..]));
+/// assert_eq!(12, size_of_val(&s[..]));
+/// assert_eq!(32, size_of_val(&v[..]));
 /// ```
 #[stable(feature = "rust1", since = "1.0.0")]
 mod prim_char {}
@@ -594,10 +594,8 @@ impl () {}
 /// #[allow(unused_extern_crates)]
 /// extern crate libc;
 ///
-/// use std::mem;
-///
 /// unsafe {
-///     let my_num: *mut i32 = libc::malloc(mem::size_of::<i32>()) as *mut i32;
+///     let my_num: *mut i32 = libc::malloc(size_of::<i32>()) as *mut i32;
 ///     if my_num.is_null() {
 ///         panic!("failed to allocate memory");
 ///     }
@@ -893,11 +891,11 @@ mod prim_array {}
 ///
 /// ```
 /// # use std::rc::Rc;
-/// let pointer_size = std::mem::size_of::<&u8>();
-/// assert_eq!(2 * pointer_size, std::mem::size_of::<&[u8]>());
-/// assert_eq!(2 * pointer_size, std::mem::size_of::<*const [u8]>());
-/// assert_eq!(2 * pointer_size, std::mem::size_of::<Box<[u8]>>());
-/// assert_eq!(2 * pointer_size, std::mem::size_of::<Rc<[u8]>>());
+/// let pointer_size = size_of::<&u8>();
+/// assert_eq!(2 * pointer_size, size_of::<&[u8]>());
+/// assert_eq!(2 * pointer_size, size_of::<*const [u8]>());
+/// assert_eq!(2 * pointer_size, size_of::<Box<[u8]>>());
+/// assert_eq!(2 * pointer_size, size_of::<Rc<[u8]>>());
 /// ```
 ///
 /// ## Trait Implementations
@@ -1692,15 +1690,13 @@ mod prim_ref {}
 /// This zero-sized type *coerces* to a regular function pointer. For example:
 ///
 /// ```rust
-/// use std::mem;
-///
 /// fn bar(x: i32) {}
 ///
 /// let not_bar_ptr = bar; // `not_bar_ptr` is zero-sized, uniquely identifying `bar`
-/// assert_eq!(mem::size_of_val(&not_bar_ptr), 0);
+/// assert_eq!(size_of_val(&not_bar_ptr), 0);
 ///
 /// let bar_ptr: fn(i32) = not_bar_ptr; // force coercion to function pointer
-/// assert_eq!(mem::size_of_val(&bar_ptr), mem::size_of::<usize>());
+/// assert_eq!(size_of_val(&bar_ptr), size_of::<usize>());
 ///
 /// let footgun = &bar; // this is a shared reference to the zero-sized type identifying `bar`
 /// ```
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
index 2da94e72566..19311e39b45 100644
--- a/library/core/src/ptr/alignment.rs
+++ b/library/core/src/ptr/alignment.rs
@@ -13,8 +13,8 @@ use crate::{cmp, fmt, hash, mem, num};
 pub struct Alignment(AlignmentEnum);
 
 // Alignment is `repr(usize)`, but via extra steps.
-const _: () = assert!(mem::size_of::<Alignment>() == mem::size_of::<usize>());
-const _: () = assert!(mem::align_of::<Alignment>() == mem::align_of::<usize>());
+const _: () = assert!(size_of::<Alignment>() == size_of::<usize>());
+const _: () = assert!(align_of::<Alignment>() == align_of::<usize>());
 
 fn _alignment_can_be_structurally_matched(a: Alignment) -> bool {
     matches!(a, Alignment::MIN)
@@ -38,14 +38,14 @@ impl Alignment {
 
     /// Returns the alignment for a type.
     ///
-    /// This provides the same numerical value as [`mem::align_of`],
+    /// This provides the same numerical value as [`align_of`],
     /// but in an `Alignment` instead of a `usize`.
     #[unstable(feature = "ptr_alignment_type", issue = "102070")]
     #[inline]
     #[must_use]
     pub const fn of<T>() -> Self {
         // This can't actually panic since type alignment is always a power of two.
-        const { Alignment::new(mem::align_of::<T>()).unwrap() }
+        const { Alignment::new(align_of::<T>()).unwrap() }
     }
 
     /// Creates an `Alignment` from a `usize`, or returns `None` if it's
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 8db620596dd..9a4f916803e 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -1,7 +1,7 @@
 use super::*;
 use crate::cmp::Ordering::{Equal, Greater, Less};
 use crate::intrinsics::const_eval_select;
-use crate::mem::SizedTypeProperties;
+use crate::mem::{self, SizedTypeProperties};
 use crate::slice::{self, SliceIndex};
 
 impl<T: ?Sized> *const T {
@@ -595,9 +595,9 @@ impl<T: ?Sized> *const T {
     }
 
     /// Calculates the distance between two pointers within the same allocation. The returned value is in
-    /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
+    /// units of T: the distance in bytes divided by `size_of::<T>()`.
     ///
-    /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
+    /// This is equivalent to `(self as isize - origin as isize) / (size_of::<T>() as isize)`,
     /// except that it has a lot more opportunities for UB, in exchange for the compiler
     /// better understanding what you are doing.
     ///
@@ -633,7 +633,7 @@ impl<T: ?Sized> *const T {
     /// objects is not known at compile-time. However, the requirement also exists at
     /// runtime and may be exploited by optimizations. If you wish to compute the difference between
     /// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
-    /// origin as isize) / mem::size_of::<T>()`.
+    /// origin as isize) / size_of::<T>()`.
     // FIXME: recommend `addr()` instead of `as usize` once that is stable.
     ///
     /// [`add`]: #method.add
@@ -683,7 +683,7 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        let pointee_size = mem::size_of::<T>();
+        let pointee_size = size_of::<T>();
         assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
         // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`.
         unsafe { intrinsics::ptr_offset_from(self, origin) }
@@ -709,7 +709,7 @@ impl<T: ?Sized> *const T {
 
     /// Calculates the distance between two pointers within the same allocation, *where it's known that
     /// `self` is equal to or greater than `origin`*. The returned value is in
-    /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
+    /// units of T: the distance in bytes is divided by `size_of::<T>()`.
     ///
     /// This computes the same value that [`offset_from`](#method.offset_from)
     /// would compute, but with the added precondition that the offset is
@@ -793,7 +793,7 @@ impl<T: ?Sized> *const T {
             ) => runtime_ptr_ge(this, origin)
         );
 
-        let pointee_size = mem::size_of::<T>();
+        let pointee_size = size_of::<T>();
         assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
         // SAFETY: the caller must uphold the safety contract for `ptr_offset_from_unsigned`.
         unsafe { intrinsics::ptr_offset_from_unsigned(self, origin) }
@@ -1313,7 +1313,7 @@ impl<T: ?Sized> *const T {
         unsafe { read_unaligned(self) }
     }
 
-    /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+    /// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
     /// and destination may overlap.
     ///
     /// NOTE: this has the *same* argument order as [`ptr::copy`].
@@ -1333,7 +1333,7 @@ impl<T: ?Sized> *const T {
         unsafe { copy(self, dest, count) }
     }
 
-    /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+    /// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
     /// and destination may *not* overlap.
     ///
     /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
@@ -1375,8 +1375,6 @@ impl<T: ?Sized> *const T {
     /// Accessing adjacent `u8` as `u16`
     ///
     /// ```
-    /// use std::mem::align_of;
-    ///
     /// # unsafe {
     /// let x = [5_u8, 6, 7, 8, 9];
     /// let ptr = x.as_ptr();
@@ -1436,7 +1434,7 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        self.is_aligned_to(mem::align_of::<T>())
+        self.is_aligned_to(align_of::<T>())
     }
 
     /// Returns whether the pointer is aligned to `align`.
@@ -1595,7 +1593,7 @@ impl<T> *const [T] {
     /// When calling this method, you have to ensure that *either* the pointer is null *or*
     /// all of the following is true:
     ///
-    /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
+    /// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes,
     ///   and it must be properly aligned. This means in particular:
     ///
     ///     * The entire memory range of this slice must be contained within a single [allocated object]!
@@ -1607,7 +1605,7 @@ impl<T> *const [T] {
     ///       them from other data. You can obtain a pointer that is usable as `data`
     ///       for zero-length slices using [`NonNull::dangling()`].
     ///
-    /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+    /// * The total size `ptr.len() * size_of::<T>()` of the slice must be no larger than `isize::MAX`.
     ///   See the safety documentation of [`pointer::offset`].
     ///
     /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs
index 9eee29d485f..48707506389 100644
--- a/library/core/src/ptr/metadata.rs
+++ b/library/core/src/ptr/metadata.rs
@@ -74,7 +74,7 @@ pub trait Pointee {
 /// #![feature(ptr_metadata)]
 ///
 /// fn this_never_panics<T: std::ptr::Thin>() {
-///     assert_eq!(std::mem::size_of::<&T>(), std::mem::size_of::<usize>())
+///     assert_eq!(size_of::<&T>(), size_of::<usize>())
 /// }
 /// ```
 #[unstable(feature = "ptr_metadata", issue = "81513")]
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index eb99be817a2..ea53da78d3b 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -48,7 +48,7 @@
 //!
 //! Valid raw pointers as defined above are not necessarily properly aligned (where
 //! "proper" alignment is defined by the pointee type, i.e., `*const T` must be
-//! aligned to `mem::align_of::<T>()`). However, most functions require their
+//! aligned to `align_of::<T>()`). However, most functions require their
 //! arguments to be properly aligned, and will explicitly state
 //! this requirement in their documentation. Notable exceptions to this are
 //! [`read_unaligned`] and [`write_unaligned`].
@@ -297,7 +297,7 @@
 //!
 //!     // Our value, which must have enough alignment to have spare least-significant-bits.
 //!     let my_precious_data: u32 = 17;
-//!     assert!(core::mem::align_of::<u32>() > 1);
+//!     assert!(align_of::<u32>() > 1);
 //!
 //!     // Create a tagged pointer
 //!     let ptr = &my_precious_data as *const u32;
@@ -1098,12 +1098,12 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         } else {
             macro_rules! attempt_swap_as_chunks {
                 ($ChunkTy:ty) => {
-                    if mem::align_of::<T>() >= mem::align_of::<$ChunkTy>()
-                        && mem::size_of::<T>() % mem::size_of::<$ChunkTy>() == 0
+                    if align_of::<T>() >= align_of::<$ChunkTy>()
+                        && size_of::<T>() % size_of::<$ChunkTy>() == 0
                     {
                         let x: *mut $ChunkTy = x.cast();
                         let y: *mut $ChunkTy = y.cast();
-                        let count = count * (mem::size_of::<T>() / mem::size_of::<$ChunkTy>());
+                        let count = count * (size_of::<T>() / size_of::<$ChunkTy>());
                         // SAFETY: these are the same bytes that the caller promised were
                         // ok, just typed as `MaybeUninit<ChunkTy>`s instead of as `T`s.
                         // The `if` condition above ensures that we're not violating
@@ -1117,9 +1117,9 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
             // Split up the slice into small power-of-two-sized chunks that LLVM is able
             // to vectorize (unless it's a special type with more-than-pointer alignment,
             // because we don't want to pessimize things like slices of SIMD vectors.)
-            if mem::align_of::<T>() <= mem::size_of::<usize>()
-            && (!mem::size_of::<T>().is_power_of_two()
-                || mem::size_of::<T>() > mem::size_of::<usize>() * 2)
+            if align_of::<T>() <= size_of::<usize>()
+            && (!size_of::<T>().is_power_of_two()
+                || size_of::<T>() > size_of::<usize>() * 2)
             {
                 attempt_swap_as_chunks!(usize);
                 attempt_swap_as_chunks!(u8);
@@ -1443,10 +1443,8 @@ pub const unsafe fn read<T>(src: *const T) -> T {
 /// Read a `usize` value from a byte buffer:
 ///
 /// ```
-/// use std::mem;
-///
 /// fn read_usize(x: &[u8]) -> usize {
-///     assert!(x.len() >= mem::size_of::<usize>());
+///     assert!(x.len() >= size_of::<usize>());
 ///
 ///     let ptr = x.as_ptr() as *const usize;
 ///
@@ -1467,7 +1465,7 @@ pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
     // Also, since we just wrote a valid value into `tmp`, it is guaranteed
     // to be properly initialized.
     unsafe {
-        copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>());
+        copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, size_of::<T>());
         tmp.assume_init()
     }
 }
@@ -1647,10 +1645,8 @@ pub const unsafe fn write<T>(dst: *mut T, src: T) {
 /// Write a `usize` value to a byte buffer:
 ///
 /// ```
-/// use std::mem;
-///
 /// fn write_usize(x: &mut [u8], val: usize) {
-///     assert!(x.len() >= mem::size_of::<usize>());
+///     assert!(x.len() >= size_of::<usize>());
 ///
 ///     let ptr = x.as_mut_ptr() as *mut usize;
 ///
@@ -1667,7 +1663,7 @@ pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
     // `dst` cannot overlap `src` because the caller has mutable access
     // to `dst` while `src` is owned by this function.
     unsafe {
-        copy_nonoverlapping((&raw const src) as *const u8, dst as *mut u8, mem::size_of::<T>());
+        copy_nonoverlapping((&raw const src) as *const u8, dst as *mut u8, size_of::<T>());
         // We are calling the intrinsic directly to avoid function calls in the generated code.
         intrinsics::forget(src);
     }
@@ -1911,7 +1907,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
         inverse & m_minus_one
     }
 
-    let stride = mem::size_of::<T>();
+    let stride = size_of::<T>();
 
     let addr: usize = p.addr();
 
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index 5a64f12ca99..b960a3d86be 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -1,7 +1,7 @@
 use super::*;
 use crate::cmp::Ordering::{Equal, Greater, Less};
 use crate::intrinsics::const_eval_select;
-use crate::mem::SizedTypeProperties;
+use crate::mem::{self, SizedTypeProperties};
 use crate::slice::{self, SliceIndex};
 
 impl<T: ?Sized> *mut T {
@@ -769,9 +769,9 @@ impl<T: ?Sized> *mut T {
     }
 
     /// Calculates the distance between two pointers within the same allocation. The returned value is in
-    /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
+    /// units of T: the distance in bytes divided by `size_of::<T>()`.
     ///
-    /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
+    /// This is equivalent to `(self as isize - origin as isize) / (size_of::<T>() as isize)`,
     /// except that it has a lot more opportunities for UB, in exchange for the compiler
     /// better understanding what you are doing.
     ///
@@ -807,7 +807,7 @@ impl<T: ?Sized> *mut T {
     /// objects is not known at compile-time. However, the requirement also exists at
     /// runtime and may be exploited by optimizations. If you wish to compute the difference between
     /// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
-    /// origin as isize) / mem::size_of::<T>()`.
+    /// origin as isize) / size_of::<T>()`.
     // FIXME: recommend `addr()` instead of `as usize` once that is stable.
     ///
     /// [`add`]: #method.add
@@ -881,7 +881,7 @@ impl<T: ?Sized> *mut T {
 
     /// Calculates the distance between two pointers within the same allocation, *where it's known that
     /// `self` is equal to or greater than `origin`*. The returned value is in
-    /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
+    /// units of T: the distance in bytes is divided by `size_of::<T>()`.
     ///
     /// This computes the same value that [`offset_from`](#method.offset_from)
     /// would compute, but with the added precondition that the offset is
@@ -1397,7 +1397,7 @@ impl<T: ?Sized> *mut T {
         unsafe { read_unaligned(self) }
     }
 
-    /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+    /// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
     /// and destination may overlap.
     ///
     /// NOTE: this has the *same* argument order as [`ptr::copy`].
@@ -1417,7 +1417,7 @@ impl<T: ?Sized> *mut T {
         unsafe { copy(self, dest, count) }
     }
 
-    /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+    /// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
     /// and destination may *not* overlap.
     ///
     /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
@@ -1437,7 +1437,7 @@ impl<T: ?Sized> *mut T {
         unsafe { copy_nonoverlapping(self, dest, count) }
     }
 
-    /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+    /// Copies `count * size_of::<T>()` bytes from `src` to `self`. The source
     /// and destination may overlap.
     ///
     /// NOTE: this has the *opposite* argument order of [`ptr::copy`].
@@ -1457,7 +1457,7 @@ impl<T: ?Sized> *mut T {
         unsafe { copy(src, self, count) }
     }
 
-    /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+    /// Copies `count * size_of::<T>()` bytes from `src` to `self`. The source
     /// and destination may *not* overlap.
     ///
     /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
@@ -1623,8 +1623,6 @@ impl<T: ?Sized> *mut T {
     /// Accessing adjacent `u8` as `u16`
     ///
     /// ```
-    /// use std::mem::align_of;
-    ///
     /// # unsafe {
     /// let mut x = [5_u8, 6, 7, 8, 9];
     /// let ptr = x.as_mut_ptr();
@@ -1689,7 +1687,7 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        self.is_aligned_to(mem::align_of::<T>())
+        self.is_aligned_to(align_of::<T>())
     }
 
     /// Returns whether the pointer is aligned to `align`.
@@ -1950,7 +1948,7 @@ impl<T> *mut [T] {
     /// When calling this method, you have to ensure that *either* the pointer is null *or*
     /// all of the following is true:
     ///
-    /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
+    /// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes,
     ///   and it must be properly aligned. This means in particular:
     ///
     ///     * The entire memory range of this slice must be contained within a single [allocated object]!
@@ -1962,7 +1960,7 @@ impl<T> *mut [T] {
     ///       them from other data. You can obtain a pointer that is usable as `data`
     ///       for zero-length slices using [`NonNull::dangling()`].
     ///
-    /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+    /// * The total size `ptr.len() * size_of::<T>()` of the slice must be no larger than `isize::MAX`.
     ///   See the safety documentation of [`pointer::offset`].
     ///
     /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
@@ -2008,7 +2006,7 @@ impl<T> *mut [T] {
     /// When calling this method, you have to ensure that *either* the pointer is null *or*
     /// all of the following is true:
     ///
-    /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::<T>()`
+    /// * The pointer must be [valid] for reads and writes for `ptr.len() * size_of::<T>()`
     ///   many bytes, and it must be properly aligned. This means in particular:
     ///
     ///     * The entire memory range of this slice must be contained within a single [allocated object]!
@@ -2020,7 +2018,7 @@ impl<T> *mut [T] {
     ///       them from other data. You can obtain a pointer that is usable as `data`
     ///       for zero-length slices using [`NonNull::dangling()`].
     ///
-    /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+    /// * The total size `ptr.len() * size_of::<T>()` of the slice must be no larger than `isize::MAX`.
     ///   See the safety documentation of [`pointer::offset`].
     ///
     /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index 7abd3ddaa9e..c769ba673c6 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -49,7 +49,6 @@ use crate::{fmt, hash, intrinsics, mem, ptr};
 /// are guaranteed to have the same size and alignment:
 ///
 /// ```
-/// # use std::mem::{size_of, align_of};
 /// use std::ptr::NonNull;
 ///
 /// assert_eq!(size_of::<NonNull<i16>>(), size_of::<Option<NonNull<i16>>>());
@@ -724,9 +723,9 @@ impl<T: ?Sized> NonNull<T> {
     }
 
     /// Calculates the distance between two pointers within the same allocation. The returned value is in
-    /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
+    /// units of T: the distance in bytes divided by `size_of::<T>()`.
     ///
-    /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
+    /// This is equivalent to `(self as isize - origin as isize) / (size_of::<T>() as isize)`,
     /// except that it has a lot more opportunities for UB, in exchange for the compiler
     /// better understanding what you are doing.
     ///
@@ -762,7 +761,7 @@ impl<T: ?Sized> NonNull<T> {
     /// objects is not known at compile-time. However, the requirement also exists at
     /// runtime and may be exploited by optimizations. If you wish to compute the difference between
     /// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
-    /// origin as isize) / mem::size_of::<T>()`.
+    /// origin as isize) / size_of::<T>()`.
     // FIXME: recommend `addr()` instead of `as usize` once that is stable.
     ///
     /// [`add`]: #method.add
@@ -842,7 +841,7 @@ impl<T: ?Sized> NonNull<T> {
 
     /// Calculates the distance between two pointers within the same allocation, *where it's known that
     /// `self` is equal to or greater than `origin`*. The returned value is in
-    /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
+    /// units of T: the distance in bytes is divided by `size_of::<T>()`.
     ///
     /// This computes the same value that [`offset_from`](#method.offset_from)
     /// would compute, but with the added precondition that the offset is
@@ -989,7 +988,7 @@ impl<T: ?Sized> NonNull<T> {
         unsafe { ptr::read_unaligned(self.as_ptr()) }
     }
 
-    /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+    /// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
     /// and destination may overlap.
     ///
     /// NOTE: this has the *same* argument order as [`ptr::copy`].
@@ -1009,7 +1008,7 @@ impl<T: ?Sized> NonNull<T> {
         unsafe { ptr::copy(self.as_ptr(), dest.as_ptr(), count) }
     }
 
-    /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+    /// Copies `count * size_of::<T>()` bytes from `self` to `dest`. The source
     /// and destination may *not* overlap.
     ///
     /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
@@ -1029,7 +1028,7 @@ impl<T: ?Sized> NonNull<T> {
         unsafe { ptr::copy_nonoverlapping(self.as_ptr(), dest.as_ptr(), count) }
     }
 
-    /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+    /// Copies `count * size_of::<T>()` bytes from `src` to `self`. The source
     /// and destination may overlap.
     ///
     /// NOTE: this has the *opposite* argument order of [`ptr::copy`].
@@ -1049,7 +1048,7 @@ impl<T: ?Sized> NonNull<T> {
         unsafe { ptr::copy(src.as_ptr(), self.as_ptr(), count) }
     }
 
-    /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+    /// Copies `count * size_of::<T>()` bytes from `src` to `self`. The source
     /// and destination may *not* overlap.
     ///
     /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
@@ -1223,7 +1222,6 @@ impl<T: ?Sized> NonNull<T> {
     /// Accessing adjacent `u8` as `u16`
     ///
     /// ```
-    /// use std::mem::align_of;
     /// use std::ptr::NonNull;
     ///
     /// # unsafe {
@@ -1443,7 +1441,7 @@ impl<T> NonNull<[T]> {
     ///
     /// When calling this method, you have to ensure that all of the following is true:
     ///
-    /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
+    /// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes,
     ///   and it must be properly aligned. This means in particular:
     ///
     ///     * The entire memory range of this slice must be contained within a single allocated object!
@@ -1455,7 +1453,7 @@ impl<T> NonNull<[T]> {
     ///       them from other data. You can obtain a pointer that is usable as `data`
     ///       for zero-length slices using [`NonNull::dangling()`].
     ///
-    /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+    /// * The total size `ptr.len() * size_of::<T>()` of the slice must be no larger than `isize::MAX`.
     ///   See the safety documentation of [`pointer::offset`].
     ///
     /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
@@ -1488,7 +1486,7 @@ impl<T> NonNull<[T]> {
     ///
     /// When calling this method, you have to ensure that all of the following is true:
     ///
-    /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::<T>()`
+    /// * The pointer must be [valid] for reads and writes for `ptr.len() * size_of::<T>()`
     ///   many bytes, and it must be properly aligned. This means in particular:
     ///
     ///     * The entire memory range of this slice must be contained within a single allocated object!
@@ -1500,7 +1498,7 @@ impl<T> NonNull<[T]> {
     ///       them from other data. You can obtain a pointer that is usable as `data`
     ///       for zero-length slices using [`NonNull::dangling()`].
     ///
-    /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+    /// * The total size `ptr.len() * size_of::<T>()` of the slice must be no larger than `isize::MAX`.
     ///   See the safety documentation of [`pointer::offset`].
     ///
     /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
diff --git a/library/core/src/slice/cmp.rs b/library/core/src/slice/cmp.rs
index 9cb00644e64..804bdfcbb4f 100644
--- a/library/core/src/slice/cmp.rs
+++ b/library/core/src/slice/cmp.rs
@@ -1,10 +1,10 @@
 //! Comparison traits for `[T]`.
 
 use super::{from_raw_parts, memchr};
+use crate::ascii;
 use crate::cmp::{self, BytewiseEq, Ordering};
 use crate::intrinsics::compare_bytes;
 use crate::num::NonZero;
-use crate::{ascii, mem};
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T, U> PartialEq<[U]> for [T]
@@ -87,7 +87,7 @@ where
         // SAFETY: `self` and `other` are references and are thus guaranteed to be valid.
         // The two slices have been checked to have the same size above.
         unsafe {
-            let size = mem::size_of_val(self);
+            let size = size_of_val(self);
             compare_bytes(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
         }
     }
@@ -266,7 +266,7 @@ macro_rules! impl_slice_contains {
                 fn slice_contains(&self, arr: &[$t]) -> bool {
                     // Make our LANE_COUNT 4x the normal lane count (aiming for 128 bit vectors).
                     // The compiler will nicely unroll it.
-                    const LANE_COUNT: usize = 4 * (128 / (mem::size_of::<$t>() * 8));
+                    const LANE_COUNT: usize = 4 * (128 / (size_of::<$t>() * 8));
                     // SIMD
                     let mut chunks = arr.chunks_exact(LANE_COUNT);
                     for chunk in &mut chunks {
diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs
index 98db7aaf533..1e1053583a6 100644
--- a/library/core/src/slice/memchr.rs
+++ b/library/core/src/slice/memchr.rs
@@ -2,11 +2,10 @@
 // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
 
 use crate::intrinsics::const_eval_select;
-use crate::mem;
 
 const LO_USIZE: usize = usize::repeat_u8(0x01);
 const HI_USIZE: usize = usize::repeat_u8(0x80);
-const USIZE_BYTES: usize = mem::size_of::<usize>();
+const USIZE_BYTES: usize = size_of::<usize>();
 
 /// Returns `true` if `x` contains any zero byte.
 ///
@@ -138,7 +137,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
     // offset is always aligned, so just testing `>` is sufficient and avoids possible
     // overflow.
     let repeated_x = usize::repeat_u8(x);
-    let chunk_bytes = mem::size_of::<Chunk>();
+    let chunk_bytes = size_of::<Chunk>();
 
     while offset > min_aligned_offset {
         // SAFETY: offset starts at len - suffix.len(), as long as it is greater than
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index 4055038b958..3570d8d0876 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -3893,9 +3893,9 @@ impl<T> [T] {
 
         // Explicitly wrap the function call in a const block so it gets
         // constant-evaluated even in debug mode.
-        let gcd: usize = const { gcd(mem::size_of::<T>(), mem::size_of::<U>()) };
-        let ts: usize = mem::size_of::<U>() / gcd;
-        let us: usize = mem::size_of::<T>() / gcd;
+        let gcd: usize = const { gcd(size_of::<T>(), size_of::<U>()) };
+        let ts: usize = size_of::<U>() / gcd;
+        let us: usize = size_of::<T>() / gcd;
 
         // Armed with this knowledge, we can find how many `U`s we can fit!
         let us_len = self.len() / ts * us;
@@ -3945,7 +3945,7 @@ impl<T> [T] {
         // ptr.align_offset.
         let ptr = self.as_ptr();
         // SAFETY: See the `align_to_mut` method for the detailed safety comment.
-        let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
+        let offset = unsafe { crate::ptr::align_offset(ptr, align_of::<U>()) };
         if offset > self.len() {
             (self, &[], &[])
         } else {
@@ -3955,7 +3955,7 @@ impl<T> [T] {
             #[cfg(miri)]
             crate::intrinsics::miri_promise_symbolic_alignment(
                 rest.as_ptr().cast(),
-                mem::align_of::<U>(),
+                align_of::<U>(),
             );
             // SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
             // since the caller guarantees that we can transmute `T` to `U` safely.
@@ -4016,7 +4016,7 @@ impl<T> [T] {
         // valid pointer `ptr` (it comes from a reference to `self`) and with
         // a size that is a power of two (since it comes from the alignment for U),
         // satisfying its safety constraints.
-        let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
+        let offset = unsafe { crate::ptr::align_offset(ptr, align_of::<U>()) };
         if offset > self.len() {
             (self, &mut [], &mut [])
         } else {
@@ -4028,7 +4028,7 @@ impl<T> [T] {
             #[cfg(miri)]
             crate::intrinsics::miri_promise_symbolic_alignment(
                 mut_ptr.cast() as *const (),
-                mem::align_of::<U>(),
+                align_of::<U>(),
             );
             // We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
             // SAFETY: see comments for `align_to`.
@@ -4099,7 +4099,7 @@ impl<T> [T] {
         // These are expected to always match, as vector types are laid out like
         // arrays per <https://llvm.org/docs/LangRef.html#vector-type>, but we
         // might as well double-check since it'll optimize away anyhow.
-        assert_eq!(mem::size_of::<Simd<T, LANES>>(), mem::size_of::<[T; LANES]>());
+        assert_eq!(size_of::<Simd<T, LANES>>(), size_of::<[T; LANES]>());
 
         // SAFETY: The simd types have the same layout as arrays, just with
         // potentially-higher alignment, so the de-facto transmutes are sound.
@@ -4135,7 +4135,7 @@ impl<T> [T] {
         // These are expected to always match, as vector types are laid out like
         // arrays per <https://llvm.org/docs/LangRef.html#vector-type>, but we
         // might as well double-check since it'll optimize away anyhow.
-        assert_eq!(mem::size_of::<Simd<T, LANES>>(), mem::size_of::<[T; LANES]>());
+        assert_eq!(size_of::<Simd<T, LANES>>(), size_of::<[T; LANES]>());
 
         // SAFETY: The simd types have the same layout as arrays, just with
         // potentially-higher alignment, so the de-facto transmutes are sound.
@@ -4700,11 +4700,11 @@ impl<T> [T] {
 
         let byte_offset = elem_start.wrapping_sub(self_start);
 
-        if byte_offset % mem::size_of::<T>() != 0 {
+        if byte_offset % size_of::<T>() != 0 {
             return None;
         }
 
-        let offset = byte_offset / mem::size_of::<T>();
+        let offset = byte_offset / size_of::<T>();
 
         if offset < self.len() { Some(offset) } else { None }
     }
@@ -4754,11 +4754,11 @@ impl<T> [T] {
 
         let byte_start = subslice_start.wrapping_sub(self_start);
 
-        if byte_start % core::mem::size_of::<T>() != 0 {
+        if byte_start % size_of::<T>() != 0 {
             return None;
         }
 
-        let start = byte_start / core::mem::size_of::<T>();
+        let start = byte_start / size_of::<T>();
         let end = start.wrapping_add(subslice.len());
 
         if start <= self.len() && end <= self.len() { Some(start..end) } else { None }
diff --git a/library/core/src/slice/raw.rs b/library/core/src/slice/raw.rs
index e24b52cff82..3582c7e8b3f 100644
--- a/library/core/src/slice/raw.rs
+++ b/library/core/src/slice/raw.rs
@@ -11,7 +11,7 @@ use crate::{array, ptr, ub_checks};
 ///
 /// Behavior is undefined if any of the following conditions are violated:
 ///
-/// * `data` must be non-null, [valid] for reads for `len * mem::size_of::<T>()` many bytes,
+/// * `data` must be non-null, [valid] for reads for `len * size_of::<T>()` many bytes,
 ///   and it must be properly aligned. This means in particular:
 ///
 ///     * The entire memory range of this slice must be contained within a single allocated object!
@@ -28,7 +28,7 @@ use crate::{array, ptr, ub_checks};
 /// * The memory referenced by the returned slice must not be mutated for the duration
 ///   of lifetime `'a`, except inside an `UnsafeCell`.
 ///
-/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`,
+/// * The total size `len * size_of::<T>()` of the slice must be no larger than `isize::MAX`,
 ///   and adding that size to `data` must not "wrap around" the address space.
 ///   See the safety documentation of [`pointer::offset`].
 ///
@@ -146,7 +146,7 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T]
 ///
 /// Behavior is undefined if any of the following conditions are violated:
 ///
-/// * `data` must be non-null, [valid] for both reads and writes for `len * mem::size_of::<T>()` many bytes,
+/// * `data` must be non-null, [valid] for both reads and writes for `len * size_of::<T>()` many bytes,
 ///   and it must be properly aligned. This means in particular:
 ///
 ///     * The entire memory range of this slice must be contained within a single allocated object!
@@ -163,7 +163,7 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T]
 ///   (not derived from the return value) for the duration of lifetime `'a`.
 ///   Both read and write accesses are forbidden.
 ///
-/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`,
+/// * The total size `len * size_of::<T>()` of the slice must be no larger than `isize::MAX`,
 ///   and adding that size to `data` must not "wrap around" the address space.
 ///   See the safety documentation of [`pointer::offset`].
 ///
diff --git a/library/core/src/slice/rotate.rs b/library/core/src/slice/rotate.rs
index 5d5ee4c7b62..80178f297ea 100644
--- a/library/core/src/slice/rotate.rs
+++ b/library/core/src/slice/rotate.rs
@@ -1,4 +1,4 @@
-use crate::mem::{self, MaybeUninit, SizedTypeProperties};
+use crate::mem::{MaybeUninit, SizedTypeProperties};
 use crate::{cmp, ptr};
 
 type BufType = [usize; 32];
@@ -21,12 +21,12 @@ pub(super) unsafe fn ptr_rotate<T>(left: usize, mid: *mut T, right: usize) {
     }
     // `T` is not a zero-sized type, so it's okay to divide by its size.
     if !cfg!(feature = "optimize_for_size")
-        && cmp::min(left, right) <= mem::size_of::<BufType>() / mem::size_of::<T>()
+        && cmp::min(left, right) <= size_of::<BufType>() / size_of::<T>()
     {
         // SAFETY: guaranteed by the caller
         unsafe { ptr_rotate_memmove(left, mid, right) };
     } else if !cfg!(feature = "optimize_for_size")
-        && ((left + right < 24) || (mem::size_of::<T>() > mem::size_of::<[usize; 4]>()))
+        && ((left + right < 24) || (size_of::<T>() > size_of::<[usize; 4]>()))
     {
         // SAFETY: guaranteed by the caller
         unsafe { ptr_rotate_gcd(left, mid, right) }
diff --git a/library/core/src/slice/sort/shared/smallsort.rs b/library/core/src/slice/sort/shared/smallsort.rs
index f6dcf42ba60..95f196a40d0 100644
--- a/library/core/src/slice/sort/shared/smallsort.rs
+++ b/library/core/src/slice/sort/shared/smallsort.rs
@@ -113,7 +113,7 @@ pub(crate) trait UnstableSmallSortFreezeTypeImpl: Sized + FreezeMarker {
 impl<T: FreezeMarker> UnstableSmallSortFreezeTypeImpl for T {
     #[inline(always)]
     default fn small_sort_threshold() -> usize {
-        if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
+        if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
             SMALL_SORT_GENERAL_THRESHOLD
         } else {
             SMALL_SORT_FALLBACK_THRESHOLD
@@ -125,7 +125,7 @@ impl<T: FreezeMarker> UnstableSmallSortFreezeTypeImpl for T {
     where
         F: FnMut(&T, &T) -> bool,
     {
-        if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
+        if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
             small_sort_general(v, is_less);
         } else {
             small_sort_fallback(v, is_less);
@@ -143,10 +143,10 @@ impl<T: FreezeMarker + CopyMarker> UnstableSmallSortFreezeTypeImpl for T {
     #[inline(always)]
     fn small_sort_threshold() -> usize {
         if has_efficient_in_place_swap::<T>()
-            && (mem::size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
+            && (size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
         {
             SMALL_SORT_NETWORK_THRESHOLD
-        } else if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
+        } else if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
             SMALL_SORT_GENERAL_THRESHOLD
         } else {
             SMALL_SORT_FALLBACK_THRESHOLD
@@ -159,10 +159,10 @@ impl<T: FreezeMarker + CopyMarker> UnstableSmallSortFreezeTypeImpl for T {
         F: FnMut(&T, &T) -> bool,
     {
         if has_efficient_in_place_swap::<T>()
-            && (mem::size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
+            && (size_of::<T>() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE
         {
             small_sort_network(v, is_less);
-        } else if (mem::size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
+        } else if (size_of::<T>() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE {
             small_sort_general(v, is_less);
         } else {
             small_sort_fallback(v, is_less);
@@ -238,7 +238,7 @@ fn small_sort_general_with_scratch<T: FreezeMarker, F: FnMut(&T, &T) -> bool>(
     unsafe {
         let scratch_base = scratch.as_mut_ptr() as *mut T;
 
-        let presorted_len = if const { mem::size_of::<T>() <= 16 } && len >= 16 {
+        let presorted_len = if const { size_of::<T>() <= 16 } && len >= 16 {
             // SAFETY: scratch_base is valid and has enough space.
             sort8_stable(v_base, scratch_base, scratch_base.add(len), is_less);
             sort8_stable(
@@ -863,5 +863,5 @@ fn panic_on_ord_violation() -> ! {
 #[must_use]
 pub(crate) const fn has_efficient_in_place_swap<T>() -> bool {
     // Heuristic that holds true on all tested 64-bit capable architectures.
-    mem::size_of::<T>() <= 8 // mem::size_of::<u64>()
+    size_of::<T>() <= 8 // size_of::<u64>()
 }
diff --git a/library/core/src/slice/sort/stable/mod.rs b/library/core/src/slice/sort/stable/mod.rs
index 3ff2e71fd05..090367cdaba 100644
--- a/library/core/src/slice/sort/stable/mod.rs
+++ b/library/core/src/slice/sort/stable/mod.rs
@@ -3,7 +3,7 @@
 #[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
 use crate::cmp;
 use crate::intrinsics;
-use crate::mem::{self, MaybeUninit, SizedTypeProperties};
+use crate::mem::{MaybeUninit, SizedTypeProperties};
 #[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
 use crate::slice::sort::shared::smallsort::{
     SMALL_SORT_GENERAL_SCRATCH_LEN, StableSmallSortTypeImpl, insertion_sort_shift_left,
@@ -107,7 +107,7 @@ fn driftsort_main<T, F: FnMut(&T, &T) -> bool, BufT: BufGuard<T>>(v: &mut [T], i
     // If min_good_run_len is ever modified, this code must be updated to allocate
     // the correct scratch size for it.
     const MAX_FULL_ALLOC_BYTES: usize = 8_000_000; // 8MB
-    let max_full_alloc = MAX_FULL_ALLOC_BYTES / mem::size_of::<T>();
+    let max_full_alloc = MAX_FULL_ALLOC_BYTES / size_of::<T>();
     let len = v.len();
     let alloc_len = cmp::max(
         cmp::max(len - len / 2, cmp::min(len, max_full_alloc)),
@@ -155,7 +155,7 @@ impl<T, const N: usize> AlignedStorage<T, N> {
     }
 
     fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit<T>] {
-        let len = N / mem::size_of::<T>();
+        let len = N / size_of::<T>();
 
         // SAFETY: `_align` ensures we are correctly aligned.
         unsafe { core::slice::from_raw_parts_mut(self.storage.as_mut_ptr().cast(), len) }
diff --git a/library/core/src/slice/sort/stable/quicksort.rs b/library/core/src/slice/sort/stable/quicksort.rs
index 630c6ff9077..3c9688790c4 100644
--- a/library/core/src/slice/sort/stable/quicksort.rs
+++ b/library/core/src/slice/sort/stable/quicksort.rs
@@ -1,6 +1,6 @@
 //! This module contains a stable quicksort and partition implementation.
 
-use crate::mem::{self, ManuallyDrop, MaybeUninit};
+use crate::mem::{ManuallyDrop, MaybeUninit};
 use crate::slice::sort::shared::FreezeMarker;
 use crate::slice::sort::shared::pivot::choose_pivot;
 use crate::slice::sort::shared::smallsort::StableSmallSortTypeImpl;
@@ -126,7 +126,7 @@ fn stable_partition<T, F: FnMut(&T, &T) -> bool>(
             // this gave significant performance boosts in benchmarks. Unrolling
             // through for _ in 0..UNROLL_LEN { .. } instead of manually improves
             // compile times but has a ~10-20% performance penalty on opt-level=s.
-            if const { mem::size_of::<T>() <= 16 } {
+            if const { size_of::<T>() <= 16 } {
                 const UNROLL_LEN: usize = 4;
                 let unroll_end = v_base.add(loop_end_pos.saturating_sub(UNROLL_LEN - 1));
                 while state.scan < unroll_end {
diff --git a/library/core/src/slice/sort/unstable/quicksort.rs b/library/core/src/slice/sort/unstable/quicksort.rs
index bb9f90fc881..68a16118716 100644
--- a/library/core/src/slice/sort/unstable/quicksort.rs
+++ b/library/core/src/slice/sort/unstable/quicksort.rs
@@ -1,6 +1,8 @@
 //! This module contains an unstable quicksort and two partition implementations.
 
-use crate::mem::{self, ManuallyDrop};
+#[cfg(not(feature = "optimize_for_size"))]
+use crate::mem;
+use crate::mem::ManuallyDrop;
 #[cfg(not(feature = "optimize_for_size"))]
 use crate::slice::sort::shared::pivot::choose_pivot;
 #[cfg(not(feature = "optimize_for_size"))]
@@ -137,7 +139,7 @@ where
 
 const fn inst_partition<T, F: FnMut(&T, &T) -> bool>() -> fn(&mut [T], &T, &mut F) -> usize {
     const MAX_BRANCHLESS_PARTITION_SIZE: usize = 96;
-    if mem::size_of::<T>() <= MAX_BRANCHLESS_PARTITION_SIZE {
+    if size_of::<T>() <= MAX_BRANCHLESS_PARTITION_SIZE {
         // Specialize for types that are relatively cheap to copy, where branchless optimizations
         // have large leverage e.g. `u64` and `String`.
         cfg_if! {
@@ -304,7 +306,7 @@ where
 
         // Manual unrolling that works well on x86, Arm and with opt-level=s without murdering
         // compile-times. Leaving this to the compiler yields ok to bad results.
-        let unroll_len = const { if mem::size_of::<T>() <= 16 { 2 } else { 1 } };
+        let unroll_len = const { if size_of::<T>() <= 16 { 2 } else { 1 } };
 
         let unroll_end = v_base.add(len - (unroll_len - 1));
         while state.right < unroll_end {
diff --git a/library/core/src/str/count.rs b/library/core/src/str/count.rs
index b5d7aaf05d4..452403b23de 100644
--- a/library/core/src/str/count.rs
+++ b/library/core/src/str/count.rs
@@ -20,7 +20,7 @@
 
 use core::intrinsics::unlikely;
 
-const USIZE_SIZE: usize = core::mem::size_of::<usize>();
+const USIZE_SIZE: usize = size_of::<usize>();
 const UNROLL_INNER: usize = 4;
 
 #[inline]
diff --git a/library/core/src/str/validations.rs b/library/core/src/str/validations.rs
index 0f724dd9613..8174e4ff97d 100644
--- a/library/core/src/str/validations.rs
+++ b/library/core/src/str/validations.rs
@@ -2,7 +2,6 @@
 
 use super::Utf8Error;
 use crate::intrinsics::const_eval_select;
-use crate::mem;
 
 /// Returns the initial codepoint accumulator for the first byte.
 /// The first byte is special, only want bottom 5 bits for width 2, 4 bits
@@ -128,7 +127,7 @@ pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
     let mut index = 0;
     let len = v.len();
 
-    const USIZE_BYTES: usize = mem::size_of::<usize>();
+    const USIZE_BYTES: usize = size_of::<usize>();
 
     let ascii_block_size = 2 * USIZE_BYTES;
     let blocks_end = if len >= ascii_block_size { len - ascii_block_size + 1 } else { 0 };
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index 73180bde54a..bac92ef94e7 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -2033,7 +2033,7 @@ impl<T> AtomicPtr<T> {
     #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T {
-        self.fetch_byte_add(val.wrapping_mul(core::mem::size_of::<T>()), order)
+        self.fetch_byte_add(val.wrapping_mul(size_of::<T>()), order)
     }
 
     /// Offsets the pointer's address by subtracting `val` (in units of `T`),
@@ -2078,7 +2078,7 @@ impl<T> AtomicPtr<T> {
     #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T {
-        self.fetch_byte_sub(val.wrapping_mul(core::mem::size_of::<T>()), order)
+        self.fetch_byte_sub(val.wrapping_mul(size_of::<T>()), order)
     }
 
     /// Offsets the pointer's address by adding `val` *bytes*, returning the
diff --git a/library/coretests/benches/ascii/is_ascii.rs b/library/coretests/benches/ascii/is_ascii.rs
index ced7084fb0e..a6c718409ee 100644
--- a/library/coretests/benches/ascii/is_ascii.rs
+++ b/library/coretests/benches/ascii/is_ascii.rs
@@ -95,7 +95,7 @@ benches! {
 // These are separate since it's easier to debug errors if they don't go through
 // macro expansion first.
 fn is_ascii_align_to(bytes: &[u8]) -> bool {
-    if bytes.len() < core::mem::size_of::<usize>() {
+    if bytes.len() < size_of::<usize>() {
         return bytes.iter().all(|b| b.is_ascii());
     }
     // SAFETY: transmuting a sequence of `u8` to `usize` is always fine
@@ -106,7 +106,7 @@ fn is_ascii_align_to(bytes: &[u8]) -> bool {
 }
 
 fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool {
-    if bytes.len() < core::mem::size_of::<usize>() {
+    if bytes.len() < size_of::<usize>() {
         return bytes.iter().all(|b| b.is_ascii());
     }
     // SAFETY: transmuting a sequence of `u8` to `[usize; 2]` is always fine
@@ -118,6 +118,6 @@ fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool {
 
 #[inline]
 fn contains_nonascii(v: usize) -> bool {
-    const NONASCII_MASK: usize = usize::from_ne_bytes([0x80; core::mem::size_of::<usize>()]);
+    const NONASCII_MASK: usize = usize::from_ne_bytes([0x80; size_of::<usize>()]);
     (NONASCII_MASK & v) != 0
 }
diff --git a/library/coretests/benches/iter.rs b/library/coretests/benches/iter.rs
index e14f26b7290..e49d152eb53 100644
--- a/library/coretests/benches/iter.rs
+++ b/library/coretests/benches/iter.rs
@@ -1,6 +1,5 @@
 use core::borrow::Borrow;
 use core::iter::*;
-use core::mem;
 use core::num::Wrapping;
 use core::ops::Range;
 
@@ -477,7 +476,7 @@ fn bench_next_chunk_copied(b: &mut Bencher) {
         let mut iter = black_box(&v).iter().copied();
         let mut acc = Wrapping(0);
         // This uses a while-let loop to side-step the TRA specialization in ArrayChunks
-        while let Ok(chunk) = iter.next_chunk::<{ mem::size_of::<u64>() }>() {
+        while let Ok(chunk) = iter.next_chunk::<{ size_of::<u64>() }>() {
             let d = u64::from_ne_bytes(chunk);
             acc += Wrapping(d.rotate_left(7).wrapping_add(1));
         }
@@ -496,7 +495,7 @@ fn bench_next_chunk_trusted_random_access(b: &mut Bencher) {
             .iter()
             // this shows that we're not relying on the slice::Iter specialization in Copied
             .map(|b| *b.borrow())
-            .array_chunks::<{ mem::size_of::<u64>() }>()
+            .array_chunks::<{ size_of::<u64>() }>()
             .map(|ary| {
                 let d = u64::from_ne_bytes(ary);
                 Wrapping(d.rotate_left(7).wrapping_add(1))
diff --git a/library/coretests/tests/alloc.rs b/library/coretests/tests/alloc.rs
index b88f1821cd7..72fdf82c1f8 100644
--- a/library/coretests/tests/alloc.rs
+++ b/library/coretests/tests/alloc.rs
@@ -1,5 +1,4 @@
 use core::alloc::Layout;
-use core::mem::size_of;
 use core::ptr::{self, NonNull};
 
 #[test]
diff --git a/library/coretests/tests/atomic.rs b/library/coretests/tests/atomic.rs
index 0ffba538b20..e0c0fe4790c 100644
--- a/library/coretests/tests/atomic.rs
+++ b/library/coretests/tests/atomic.rs
@@ -250,8 +250,6 @@ fn atomic_access_bool() {
 
 #[test]
 fn atomic_alignment() {
-    use std::mem::{align_of, size_of};
-
     #[cfg(target_has_atomic = "8")]
     assert_eq!(align_of::<AtomicBool>(), size_of::<AtomicBool>());
     #[cfg(target_has_atomic = "ptr")]
diff --git a/library/coretests/tests/hash/sip.rs b/library/coretests/tests/hash/sip.rs
index f79954f916b..6add1a33cb9 100644
--- a/library/coretests/tests/hash/sip.rs
+++ b/library/coretests/tests/hash/sip.rs
@@ -1,7 +1,7 @@
 #![allow(deprecated)]
 
 use core::hash::{Hash, Hasher, SipHasher, SipHasher13};
-use core::{mem, slice};
+use core::slice;
 
 // Hash just the bytes of the slice, without length prefix
 struct Bytes<'a>(&'a [u8]);
@@ -314,7 +314,7 @@ fn test_write_short_works() {
     h1.write_u8(0x01u8);
     let mut h2 = SipHasher::new();
     h2.write(unsafe {
-        slice::from_raw_parts(&test_usize as *const _ as *const u8, mem::size_of::<usize>())
+        slice::from_raw_parts(&test_usize as *const _ as *const u8, size_of::<usize>())
     });
     h2.write(b"bytes");
     h2.write(b"string");
diff --git a/library/coretests/tests/nonzero.rs b/library/coretests/tests/nonzero.rs
index bdc5701d9fd..00232c9b706 100644
--- a/library/coretests/tests/nonzero.rs
+++ b/library/coretests/tests/nonzero.rs
@@ -1,6 +1,5 @@
 use core::num::{IntErrorKind, NonZero};
 use core::option::Option::None;
-use std::mem::size_of;
 
 #[test]
 fn test_create_nonzero_instance() {
diff --git a/library/coretests/tests/ptr.rs b/library/coretests/tests/ptr.rs
index c5fd7f01410..6091926084a 100644
--- a/library/coretests/tests/ptr.rs
+++ b/library/coretests/tests/ptr.rs
@@ -1,6 +1,6 @@
 use core::cell::RefCell;
 use core::marker::Freeze;
-use core::mem::{self, MaybeUninit};
+use core::mem::MaybeUninit;
 use core::num::NonZero;
 use core::ptr;
 use core::ptr::*;
@@ -388,7 +388,7 @@ fn align_offset_various_strides() {
         let mut expected = usize::MAX;
         // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
         for el in 0..align {
-            if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
+            if (numptr + el * size_of::<T>()) % align == 0 {
                 expected = el;
                 break;
             }
@@ -398,7 +398,7 @@ fn align_offset_various_strides() {
             eprintln!(
                 "aligning {:p} (with stride of {}) to {}, expected {}, got {}",
                 ptr,
-                ::std::mem::size_of::<T>(),
+                size_of::<T>(),
                 align,
                 expected,
                 got
@@ -605,9 +605,9 @@ fn dyn_metadata() {
     let meta = metadata(trait_object);
 
     assert_eq!(meta.size_of(), 64);
-    assert_eq!(meta.size_of(), std::mem::size_of::<Something>());
+    assert_eq!(meta.size_of(), size_of::<Something>());
     assert_eq!(meta.align_of(), 32);
-    assert_eq!(meta.align_of(), std::mem::align_of::<Something>());
+    assert_eq!(meta.align_of(), align_of::<Something>());
     assert_eq!(meta.layout(), std::alloc::Layout::new::<Something>());
 
     assert!(format!("{meta:?}").starts_with("DynMetadata(0x"));
@@ -781,7 +781,7 @@ fn nonnull_tagged_pointer_with_provenance() {
 
     impl<T> TaggedPointer<T> {
         /// The ABI-required minimum alignment of the `P` type.
-        pub const ALIGNMENT: usize = core::mem::align_of::<T>();
+        pub const ALIGNMENT: usize = align_of::<T>();
         /// A mask for data-carrying bits of the address.
         pub const DATA_MASK: usize = !Self::ADDRESS_MASK;
         /// Number of available bits of storage in the address.
@@ -865,7 +865,7 @@ fn test_const_copy_ptr() {
             ptr::copy(
                 &ptr1 as *const _ as *const MaybeUninit<u8>,
                 &mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
-                mem::size_of::<&i32>(),
+                size_of::<&i32>(),
             );
         }
 
@@ -883,7 +883,7 @@ fn test_const_copy_ptr() {
             ptr::copy_nonoverlapping(
                 &ptr1 as *const _ as *const MaybeUninit<u8>,
                 &mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
-                mem::size_of::<&i32>(),
+                size_of::<&i32>(),
             );
         }
 
@@ -928,7 +928,7 @@ fn test_const_swap_ptr() {
         let mut s2 = A(S { ptr: &666, f1: 0, f2: [0; 3] });
 
         // Swap ptr1 and ptr2, as an array.
-        type T = [u8; mem::size_of::<A>()];
+        type T = [u8; size_of::<A>()];
         unsafe {
             ptr::swap(ptr::from_mut(&mut s1).cast::<T>(), ptr::from_mut(&mut s2).cast::<T>());
         }
diff --git a/library/coretests/tests/slice.rs b/library/coretests/tests/slice.rs
index fe356dcc43c..d17e681480c 100644
--- a/library/coretests/tests/slice.rs
+++ b/library/coretests/tests/slice.rs
@@ -2057,15 +2057,13 @@ fn test_align_to_non_trivial() {
 
 #[test]
 fn test_align_to_empty_mid() {
-    use core::mem;
-
     // Make sure that we do not create empty unaligned slices for the mid part, even when the
     // overall slice is too short to contain an aligned address.
     let bytes = [1, 2, 3, 4, 5, 6, 7];
     type Chunk = u32;
     for offset in 0..4 {
         let (_, mid, _) = unsafe { bytes[offset..offset + 1].align_to::<Chunk>() };
-        assert_eq!(mid.as_ptr() as usize % mem::align_of::<Chunk>(), 0);
+        assert_eq!(mid.as_ptr() as usize % align_of::<Chunk>(), 0);
     }
 }
 
diff --git a/library/panic_unwind/src/emcc.rs b/library/panic_unwind/src/emcc.rs
index 1569c26c9de..bad795a019c 100644
--- a/library/panic_unwind/src/emcc.rs
+++ b/library/panic_unwind/src/emcc.rs
@@ -9,7 +9,7 @@
 use alloc::boxed::Box;
 use core::any::Any;
 use core::sync::atomic::{AtomicBool, Ordering};
-use core::{intrinsics, mem, ptr};
+use core::{intrinsics, ptr};
 
 use unwind as uw;
 
@@ -97,7 +97,7 @@ pub(crate) unsafe fn cleanup(ptr: *mut u8) -> Box<dyn Any + Send> {
 
 pub(crate) unsafe fn panic(data: Box<dyn Any + Send>) -> u32 {
     unsafe {
-        let exception = __cxa_allocate_exception(mem::size_of::<Exception>()) as *mut Exception;
+        let exception = __cxa_allocate_exception(size_of::<Exception>()) as *mut Exception;
         if exception.is_null() {
             return uw::_URC_FATAL_PHASE1_ERROR as u32;
         }
diff --git a/library/panic_unwind/src/seh.rs b/library/panic_unwind/src/seh.rs
index 3a95b940221..3794b56c089 100644
--- a/library/panic_unwind/src/seh.rs
+++ b/library/panic_unwind/src/seh.rs
@@ -49,7 +49,7 @@
 use alloc::boxed::Box;
 use core::any::Any;
 use core::ffi::{c_int, c_uint, c_void};
-use core::mem::{self, ManuallyDrop};
+use core::mem::ManuallyDrop;
 
 // NOTE(nbdd0121): The `canary` field is part of stable ABI.
 #[repr(C)]
@@ -225,7 +225,7 @@ static mut CATCHABLE_TYPE: _CatchableType = _CatchableType {
     properties: 0,
     pType: ptr_t::null(),
     thisDisplacement: _PMD { mdisp: 0, pdisp: -1, vdisp: 0 },
-    sizeOrOffset: mem::size_of::<Exception>() as c_int,
+    sizeOrOffset: size_of::<Exception>() as c_int,
     copyFunction: ptr_t::null(),
 };
 
diff --git a/library/proc_macro/src/bridge/selfless_reify.rs b/library/proc_macro/src/bridge/selfless_reify.rs
index 312a79152e2..b06434a5ffe 100644
--- a/library/proc_macro/src/bridge/selfless_reify.rs
+++ b/library/proc_macro/src/bridge/selfless_reify.rs
@@ -50,7 +50,7 @@ macro_rules! define_reify_functions {
         >(f: F) -> $(extern $abi)? fn($($arg_ty),*) -> $ret_ty {
             // FIXME(eddyb) describe the `F` type (e.g. via `type_name::<F>`) once panic
             // formatting becomes possible in `const fn`.
-            assert!(mem::size_of::<F>() == 0, "selfless_reify: closure must be zero-sized");
+            assert!(size_of::<F>() == 0, "selfless_reify: closure must be zero-sized");
 
             $(extern $abi)? fn wrapper<
                 $($($param,)*)?
diff --git a/library/std/build.rs b/library/std/build.rs
index 9df35ce3cc8..cedfd7406a1 100644
--- a/library/std/build.rs
+++ b/library/std/build.rs
@@ -113,7 +113,6 @@ fn main() {
         // Infinite recursion <https://github.com/llvm/llvm-project/issues/97981>
         ("csky", _) => false,
         ("hexagon", _) => false,
-        ("loongarch64", _) => false,
         ("powerpc" | "powerpc64", _) => false,
         ("sparc" | "sparc64", _) => false,
         ("wasm32" | "wasm64", _) => false,
diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs
index 38dcd816d26..6dd18e4f4c8 100644
--- a/library/std/src/fs/tests.rs
+++ b/library/std/src/fs/tests.rs
@@ -1878,7 +1878,7 @@ fn windows_unix_socket_exists() {
         let bytes = socket_path.as_os_str().as_encoded_bytes();
         let bytes = core::slice::from_raw_parts(bytes.as_ptr().cast::<i8>(), bytes.len());
         addr.sun_path[..bytes.len()].copy_from_slice(bytes);
-        let len = mem::size_of_val(&addr) as i32;
+        let len = size_of_val(&addr) as i32;
         let result = c::bind(socket, (&raw const addr).cast::<c::SOCKADDR>(), len);
         c::closesocket(socket);
         assert_eq!(result, 0);
diff --git a/library/std/src/io/error/tests.rs b/library/std/src/io/error/tests.rs
index edac6563478..3e4029768eb 100644
--- a/library/std/src/io/error/tests.rs
+++ b/library/std/src/io/error/tests.rs
@@ -1,6 +1,5 @@
 use super::{Custom, Error, ErrorData, ErrorKind, Repr, SimpleMessage, const_error};
 use crate::assert_matches::assert_matches;
-use crate::mem::size_of;
 use crate::sys::decode_error_kind;
 use crate::sys::os::error_string;
 use crate::{error, fmt};
diff --git a/library/std/src/os/fd/tests.rs b/library/std/src/os/fd/tests.rs
index b39863644f1..7e9cf038e9a 100644
--- a/library/std/src/os/fd/tests.rs
+++ b/library/std/src/os/fd/tests.rs
@@ -36,7 +36,6 @@ fn test_fd() {
 #[cfg(any(unix, target_os = "wasi"))]
 #[test]
 fn test_niche_optimizations() {
-    use crate::mem::size_of;
     #[cfg(unix)]
     use crate::os::unix::io::{BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
     #[cfg(target_os = "wasi")]
diff --git a/library/std/src/os/unix/io/tests.rs b/library/std/src/os/unix/io/tests.rs
index 84d2a7a1a91..fc147730578 100644
--- a/library/std/src/os/unix/io/tests.rs
+++ b/library/std/src/os/unix/io/tests.rs
@@ -1,4 +1,3 @@
-use crate::mem::size_of;
 use crate::os::unix::io::RawFd;
 
 #[test]
diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs
index 56789f235fd..cb1246db310 100644
--- a/library/std/src/os/unix/net/addr.rs
+++ b/library/std/src/os/unix/net/addr.rs
@@ -94,7 +94,7 @@ impl SocketAddr {
     {
         unsafe {
             let mut addr: libc::sockaddr_un = mem::zeroed();
-            let mut len = mem::size_of::<libc::sockaddr_un>() as libc::socklen_t;
+            let mut len = size_of::<libc::sockaddr_un>() as libc::socklen_t;
             cvt(f((&raw mut addr) as *mut _, &mut len))?;
             SocketAddr::from_parts(addr, len)
         }
diff --git a/library/std/src/os/unix/net/listener.rs b/library/std/src/os/unix/net/listener.rs
index be236317d04..27428c9eb28 100644
--- a/library/std/src/os/unix/net/listener.rs
+++ b/library/std/src/os/unix/net/listener.rs
@@ -177,7 +177,7 @@ impl UnixListener {
     #[stable(feature = "unix_socket", since = "1.10.0")]
     pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> {
         let mut storage: libc::sockaddr_un = unsafe { mem::zeroed() };
-        let mut len = mem::size_of_val(&storage) as libc::socklen_t;
+        let mut len = size_of_val(&storage) as libc::socklen_t;
         let sock = self.0.accept((&raw mut storage) as *mut _, &mut len)?;
         let addr = SocketAddr::from_parts(storage, len)?;
         Ok((UnixStream(sock), addr))
diff --git a/library/std/src/os/unix/net/ucred.rs b/library/std/src/os/unix/net/ucred.rs
index e1014a4f296..2dd7d409e48 100644
--- a/library/std/src/os/unix/net/ucred.rs
+++ b/library/std/src/os/unix/net/ucred.rs
@@ -41,15 +41,15 @@ mod impl_linux {
     use libc::{SO_PEERCRED, SOL_SOCKET, c_void, getsockopt, socklen_t, ucred};
 
     use super::UCred;
+    use crate::io;
     use crate::os::unix::io::AsRawFd;
     use crate::os::unix::net::UnixStream;
-    use crate::{io, mem};
 
     pub fn peer_cred(socket: &UnixStream) -> io::Result<UCred> {
-        let ucred_size = mem::size_of::<ucred>();
+        let ucred_size = size_of::<ucred>();
 
         // Trivial sanity checks.
-        assert!(mem::size_of::<u32>() <= mem::size_of::<usize>());
+        assert!(size_of::<u32>() <= size_of::<usize>());
         assert!(ucred_size <= u32::MAX as usize);
 
         let mut ucred_size = ucred_size as socklen_t;
@@ -64,7 +64,7 @@ mod impl_linux {
                 &mut ucred_size,
             );
 
-            if ret == 0 && ucred_size as usize == mem::size_of::<ucred>() {
+            if ret == 0 && ucred_size as usize == size_of::<ucred>() {
                 Ok(UCred { uid: ucred.uid, gid: ucred.gid, pid: Some(ucred.pid) })
             } else {
                 Err(io::Error::last_os_error())
@@ -101,9 +101,9 @@ mod impl_apple {
     use libc::{LOCAL_PEERPID, SOL_LOCAL, c_void, getpeereid, getsockopt, pid_t, socklen_t};
 
     use super::UCred;
+    use crate::io;
     use crate::os::unix::io::AsRawFd;
     use crate::os::unix::net::UnixStream;
-    use crate::{io, mem};
 
     pub fn peer_cred(socket: &UnixStream) -> io::Result<UCred> {
         let mut cred = UCred { uid: 1, gid: 1, pid: None };
@@ -115,7 +115,7 @@ mod impl_apple {
             }
 
             let mut pid: pid_t = 1;
-            let mut pid_size = mem::size_of::<pid_t>() as socklen_t;
+            let mut pid_size = size_of::<pid_t>() as socklen_t;
 
             let ret = getsockopt(
                 socket.as_raw_fd(),
@@ -125,7 +125,7 @@ mod impl_apple {
                 &mut pid_size,
             );
 
-            if ret == 0 && pid_size as usize == mem::size_of::<pid_t>() {
+            if ret == 0 && pid_size as usize == size_of::<pid_t>() {
                 cred.pid = Some(pid);
                 Ok(cred)
             } else {
diff --git a/library/std/src/os/wasi/io/tests.rs b/library/std/src/os/wasi/io/tests.rs
index 418274752b0..c5c6a19a6c8 100644
--- a/library/std/src/os/wasi/io/tests.rs
+++ b/library/std/src/os/wasi/io/tests.rs
@@ -1,4 +1,3 @@
-use crate::mem::size_of;
 use crate::os::wasi::io::RawFd;
 
 #[test]
diff --git a/library/std/src/os/windows/io/tests.rs b/library/std/src/os/windows/io/tests.rs
index 41734e52e8c..029b6f5cd3d 100644
--- a/library/std/src/os/windows/io/tests.rs
+++ b/library/std/src/os/windows/io/tests.rs
@@ -1,6 +1,5 @@
 #[test]
 fn test_niche_optimizations_socket() {
-    use crate::mem::size_of;
     use crate::os::windows::io::{
         BorrowedSocket, FromRawSocket, IntoRawSocket, OwnedSocket, RawSocket,
     };
diff --git a/library/std/src/os/windows/process.rs b/library/std/src/os/windows/process.rs
index 201274cf03a..fa65a7c51bf 100644
--- a/library/std/src/os/windows/process.rs
+++ b/library/std/src/os/windows/process.rs
@@ -500,11 +500,7 @@ impl<'a> ProcThreadAttributeListBuilder<'a> {
     /// [1]: <https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-updateprocthreadattribute#parameters>
     pub fn attribute<T>(self, attribute: usize, value: &'a T) -> Self {
         unsafe {
-            self.raw_attribute(
-                attribute,
-                ptr::addr_of!(*value).cast::<c_void>(),
-                crate::mem::size_of::<T>(),
-            )
+            self.raw_attribute(attribute, ptr::addr_of!(*value).cast::<c_void>(), size_of::<T>())
         }
     }
 
@@ -574,7 +570,7 @@ impl<'a> ProcThreadAttributeListBuilder<'a> {
     ///         .raw_attribute(
     ///             PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
     ///             h_pc as *const c_void,
-    ///             std::mem::size_of::<isize>(),
+    ///             size_of::<isize>(),
     ///         )
     ///         .finish()?
     /// };
diff --git a/library/std/src/os/xous/ffi.rs b/library/std/src/os/xous/ffi.rs
index 1db314e9dda..9394f0a0496 100644
--- a/library/std/src/os/xous/ffi.rs
+++ b/library/std/src/os/xous/ffi.rs
@@ -368,7 +368,7 @@ pub(crate) unsafe fn map_memory<T>(
     let mut a0 = Syscall::MapMemory as usize;
     let mut a1 = phys.map(|p| p.as_ptr() as usize).unwrap_or_default();
     let mut a2 = virt.map(|p| p.as_ptr() as usize).unwrap_or_default();
-    let a3 = count * core::mem::size_of::<T>();
+    let a3 = count * size_of::<T>();
     let a4 = flags.bits();
     let a5 = 0;
     let a6 = 0;
@@ -392,7 +392,7 @@ pub(crate) unsafe fn map_memory<T>(
 
     if result == SyscallResult::MemoryRange as usize {
         let start = core::ptr::with_exposed_provenance_mut::<T>(a1);
-        let len = a2 / core::mem::size_of::<T>();
+        let len = a2 / size_of::<T>();
         let end = unsafe { start.add(len) };
         Ok(unsafe { core::slice::from_raw_parts_mut(start, len) })
     } else if result == SyscallResult::Error as usize {
@@ -409,7 +409,7 @@ pub(crate) unsafe fn map_memory<T>(
 pub(crate) unsafe fn unmap_memory<T>(range: *mut [T]) -> Result<(), Error> {
     let mut a0 = Syscall::UnmapMemory as usize;
     let mut a1 = range.as_mut_ptr() as usize;
-    let a2 = range.len() * core::mem::size_of::<T>();
+    let a2 = range.len() * size_of::<T>();
     let a3 = 0;
     let a4 = 0;
     let a5 = 0;
@@ -455,7 +455,7 @@ pub(crate) unsafe fn update_memory_flags<T>(
 ) -> Result<(), Error> {
     let mut a0 = Syscall::UpdateMemoryFlags as usize;
     let mut a1 = range.as_mut_ptr() as usize;
-    let a2 = range.len() * core::mem::size_of::<T>();
+    let a2 = range.len() * size_of::<T>();
     let a3 = new_flags.bits();
     let a4 = 0; // Process ID is currently None
     let a5 = 0;
diff --git a/library/std/src/os/xous/services/log.rs b/library/std/src/os/xous/services/log.rs
index 1661011ca64..095d4f4a3e7 100644
--- a/library/std/src/os/xous/services/log.rs
+++ b/library/std/src/os/xous/services/log.rs
@@ -7,8 +7,8 @@ use crate::os::xous::ffi::Connection;
 /// `group_or_null([1,2,3,4,5,6,7,8], 1)` on a 32-bit system will return a
 /// `usize` with 5678 packed into it.
 fn group_or_null(data: &[u8], offset: usize) -> usize {
-    let start = offset * core::mem::size_of::<usize>();
-    let mut out_array = [0u8; core::mem::size_of::<usize>()];
+    let start = offset * size_of::<usize>();
+    let mut out_array = [0u8; size_of::<usize>()];
     if start < data.len() {
         for (dest, src) in out_array.iter_mut().zip(&data[start..]) {
             *dest = *src;
diff --git a/library/std/src/sys/alloc/unix.rs b/library/std/src/sys/alloc/unix.rs
index 1af9d766290..a7ac4117ec9 100644
--- a/library/std/src/sys/alloc/unix.rs
+++ b/library/std/src/sys/alloc/unix.rs
@@ -81,7 +81,7 @@ cfg_if::cfg_if! {
             // while others require the alignment to be at least the pointer size (Illumos, macOS).
             // posix_memalign only has one, clear requirement: that the alignment be a multiple of
             // `sizeof(void*)`. Since these are all powers of 2, we can just use max.
-            let align = layout.align().max(crate::mem::size_of::<usize>());
+            let align = layout.align().max(size_of::<usize>());
             let ret = unsafe { libc::posix_memalign(&mut out, align, layout.size()) };
             if ret != 0 { ptr::null_mut() } else { out as *mut u8 }
         }
diff --git a/library/std/src/sys/alloc/windows/tests.rs b/library/std/src/sys/alloc/windows/tests.rs
index 674a3e1d92d..1d5614528b1 100644
--- a/library/std/src/sys/alloc/windows/tests.rs
+++ b/library/std/src/sys/alloc/windows/tests.rs
@@ -1,9 +1,8 @@
 use super::{Header, MIN_ALIGN};
-use crate::mem;
 
 #[test]
 fn alloc_header() {
     // Header must fit in the padding before an aligned pointer
-    assert!(mem::size_of::<Header>() <= MIN_ALIGN);
-    assert!(mem::align_of::<Header>() <= MIN_ALIGN);
+    assert!(size_of::<Header>() <= MIN_ALIGN);
+    assert!(align_of::<Header>() <= MIN_ALIGN);
 }
diff --git a/library/std/src/sys/io/is_terminal/windows.rs b/library/std/src/sys/io/is_terminal/windows.rs
index 3ec18fb47b9..b0c718d71f9 100644
--- a/library/std/src/sys/io/is_terminal/windows.rs
+++ b/library/std/src/sys/io/is_terminal/windows.rs
@@ -1,5 +1,4 @@
 use crate::ffi::c_void;
-use crate::mem::size_of;
 use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle};
 use crate::sys::c;
 
diff --git a/library/std/src/sys/net/connection/socket.rs b/library/std/src/sys/net/connection/socket.rs
index ddd74b42615..e154cf039ca 100644
--- a/library/std/src/sys/net/connection/socket.rs
+++ b/library/std/src/sys/net/connection/socket.rs
@@ -154,11 +154,11 @@ fn socket_addr_to_c(addr: &SocketAddr) -> (SocketAddrCRepr, c::socklen_t) {
     match addr {
         SocketAddr::V4(a) => {
             let sockaddr = SocketAddrCRepr { v4: socket_addr_v4_to_c(a) };
-            (sockaddr, mem::size_of::<c::sockaddr_in>() as c::socklen_t)
+            (sockaddr, size_of::<c::sockaddr_in>() as c::socklen_t)
         }
         SocketAddr::V6(a) => {
             let sockaddr = SocketAddrCRepr { v6: socket_addr_v6_to_c(a) };
-            (sockaddr, mem::size_of::<c::sockaddr_in6>() as c::socklen_t)
+            (sockaddr, size_of::<c::sockaddr_in6>() as c::socklen_t)
         }
     }
 }
@@ -169,13 +169,13 @@ unsafe fn socket_addr_from_c(
 ) -> io::Result<SocketAddr> {
     match (*storage).ss_family as c_int {
         c::AF_INET => {
-            assert!(len >= mem::size_of::<c::sockaddr_in>());
+            assert!(len >= size_of::<c::sockaddr_in>());
             Ok(SocketAddr::V4(socket_addr_v4_from_c(unsafe {
                 *(storage as *const _ as *const c::sockaddr_in)
             })))
         }
         c::AF_INET6 => {
-            assert!(len >= mem::size_of::<c::sockaddr_in6>());
+            assert!(len >= size_of::<c::sockaddr_in6>());
             Ok(SocketAddr::V6(socket_addr_v6_from_c(unsafe {
                 *(storage as *const _ as *const c::sockaddr_in6)
             })))
@@ -200,7 +200,7 @@ pub fn setsockopt<T>(
             level,
             option_name,
             (&raw const option_value) as *const _,
-            mem::size_of::<T>() as c::socklen_t,
+            size_of::<T>() as c::socklen_t,
         ))?;
         Ok(())
     }
@@ -209,7 +209,7 @@ pub fn setsockopt<T>(
 pub fn getsockopt<T: Copy>(sock: &Socket, level: c_int, option_name: c_int) -> io::Result<T> {
     unsafe {
         let mut option_value: T = mem::zeroed();
-        let mut option_len = mem::size_of::<T>() as c::socklen_t;
+        let mut option_len = size_of::<T>() as c::socklen_t;
         cvt(c::getsockopt(
             sock.as_raw(),
             level,
@@ -227,7 +227,7 @@ where
 {
     unsafe {
         let mut storage: c::sockaddr_storage = mem::zeroed();
-        let mut len = mem::size_of_val(&storage) as c::socklen_t;
+        let mut len = size_of_val(&storage) as c::socklen_t;
         cvt(f((&raw mut storage) as *mut _, &mut len))?;
         socket_addr_from_c(&storage, len as usize)
     }
@@ -561,7 +561,7 @@ impl TcpListener {
         // so we don't need to zero it here.
         // reference: https://linux.die.net/man/2/accept4
         let mut storage: mem::MaybeUninit<c::sockaddr_storage> = mem::MaybeUninit::uninit();
-        let mut len = mem::size_of_val(&storage) as c::socklen_t;
+        let mut len = size_of_val(&storage) as c::socklen_t;
         let sock = self.inner.accept(storage.as_mut_ptr() as *mut _, &mut len)?;
         let addr = unsafe { socket_addr_from_c(storage.as_ptr(), len as usize)? };
         Ok((TcpStream { inner: sock }, addr))
diff --git a/library/std/src/sys/net/connection/socket/hermit.rs b/library/std/src/sys/net/connection/socket/hermit.rs
index e393342ced9..f49821657d9 100644
--- a/library/std/src/sys/net/connection/socket/hermit.rs
+++ b/library/std/src/sys/net/connection/socket/hermit.rs
@@ -183,7 +183,7 @@ impl Socket {
 
     fn recv_from_with_flags(&self, buf: &mut [u8], flags: i32) -> io::Result<(usize, SocketAddr)> {
         let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() };
-        let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
+        let mut addrlen = size_of_val(&storage) as netc::socklen_t;
 
         let n = cvt(unsafe {
             netc::recvfrom(
diff --git a/library/std/src/sys/net/connection/socket/solid.rs b/library/std/src/sys/net/connection/socket/solid.rs
index 906bef267b6..94bb605c100 100644
--- a/library/std/src/sys/net/connection/socket/solid.rs
+++ b/library/std/src/sys/net/connection/socket/solid.rs
@@ -244,7 +244,7 @@ impl Socket {
         flags: c_int,
     ) -> io::Result<(usize, SocketAddr)> {
         let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() };
-        let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
+        let mut addrlen = size_of_val(&storage) as netc::socklen_t;
 
         let n = cvt(unsafe {
             netc::recvfrom(
diff --git a/library/std/src/sys/net/connection/socket/unix.rs b/library/std/src/sys/net/connection/socket/unix.rs
index 29fb47ddca3..e633cf772c5 100644
--- a/library/std/src/sys/net/connection/socket/unix.rs
+++ b/library/std/src/sys/net/connection/socket/unix.rs
@@ -326,7 +326,7 @@ impl Socket {
         // so we don't need to zero it here.
         // reference: https://linux.die.net/man/2/recvfrom
         let mut storage: mem::MaybeUninit<libc::sockaddr_storage> = mem::MaybeUninit::uninit();
-        let mut addrlen = mem::size_of_val(&storage) as libc::socklen_t;
+        let mut addrlen = size_of_val(&storage) as libc::socklen_t;
 
         let n = cvt(unsafe {
             libc::recvfrom(
diff --git a/library/std/src/sys/net/connection/socket/wasip2.rs b/library/std/src/sys/net/connection/socket/wasip2.rs
index c5034e73dd7..73c25831872 100644
--- a/library/std/src/sys/net/connection/socket/wasip2.rs
+++ b/library/std/src/sys/net/connection/socket/wasip2.rs
@@ -211,7 +211,7 @@ impl Socket {
         flags: c_int,
     ) -> io::Result<(usize, SocketAddr)> {
         let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() };
-        let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
+        let mut addrlen = size_of_val(&storage) as netc::socklen_t;
 
         let n = cvt(unsafe {
             netc::recvfrom(
diff --git a/library/std/src/sys/net/connection/socket/windows.rs b/library/std/src/sys/net/connection/socket/windows.rs
index 428f142dabe..ce975bb2289 100644
--- a/library/std/src/sys/net/connection/socket/windows.rs
+++ b/library/std/src/sys/net/connection/socket/windows.rs
@@ -381,7 +381,7 @@ impl Socket {
         flags: c_int,
     ) -> io::Result<(usize, SocketAddr)> {
         let mut storage = unsafe { mem::zeroed::<c::SOCKADDR_STORAGE>() };
-        let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
+        let mut addrlen = size_of_val(&storage) as netc::socklen_t;
         let length = cmp::min(buf.len(), <wrlen_t>::MAX as usize) as wrlen_t;
 
         // On unix when a socket is shut down all further reads return 0, so we
@@ -514,13 +514,13 @@ impl Socket {
 
     // This is used by sys_common code to abstract over Windows and Unix.
     pub fn as_raw(&self) -> c::SOCKET {
-        debug_assert_eq!(mem::size_of::<c::SOCKET>(), mem::size_of::<RawSocket>());
-        debug_assert_eq!(mem::align_of::<c::SOCKET>(), mem::align_of::<RawSocket>());
+        debug_assert_eq!(size_of::<c::SOCKET>(), size_of::<RawSocket>());
+        debug_assert_eq!(align_of::<c::SOCKET>(), align_of::<RawSocket>());
         self.as_inner().as_raw_socket() as c::SOCKET
     }
     pub unsafe fn from_raw(raw: c::SOCKET) -> Self {
-        debug_assert_eq!(mem::size_of::<c::SOCKET>(), mem::size_of::<RawSocket>());
-        debug_assert_eq!(mem::align_of::<c::SOCKET>(), mem::align_of::<RawSocket>());
+        debug_assert_eq!(size_of::<c::SOCKET>(), size_of::<RawSocket>());
+        debug_assert_eq!(align_of::<c::SOCKET>(), align_of::<RawSocket>());
         unsafe { Self::from_raw_socket(raw as RawSocket) }
     }
 }
diff --git a/library/std/src/sys/net/connection/xous/udp.rs b/library/std/src/sys/net/connection/xous/udp.rs
index f35970bc321..c112c04ce94 100644
--- a/library/std/src/sys/net/connection/xous/udp.rs
+++ b/library/std/src/sys/net/connection/xous/udp.rs
@@ -244,7 +244,7 @@ impl UdpSocket {
         // let buf = unsafe {
         //     xous::MemoryRange::new(
         //         &mut tx_req as *mut SendData as usize,
-        //         core::mem::size_of::<SendData>(),
+        //         size_of::<SendData>(),
         //     )
         //     .unwrap()
         // };
diff --git a/library/std/src/sys/pal/itron/thread.rs b/library/std/src/sys/pal/itron/thread.rs
index 04095e1a7cf..d1481f827e1 100644
--- a/library/std/src/sys/pal/itron/thread.rs
+++ b/library/std/src/sys/pal/itron/thread.rs
@@ -80,7 +80,7 @@ const LIFECYCLE_EXITED_OR_FINISHED_OR_JOIN_FINALIZE: usize = usize::MAX;
 // there's no single value for `JOINING`
 
 // 64KiB for 32-bit ISAs, 128KiB for 64-bit ISAs.
-pub const DEFAULT_MIN_STACK_SIZE: usize = 0x4000 * crate::mem::size_of::<usize>();
+pub const DEFAULT_MIN_STACK_SIZE: usize = 0x4000 * size_of::<usize>();
 
 impl Thread {
     /// # Safety
diff --git a/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs
index 5069ab82ccc..301e3299c05 100644
--- a/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs
+++ b/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs
@@ -63,7 +63,7 @@ unsafe impl<T: UserSafeSized> UserSafeSized for [T; 2] {}
 /// A type that can be represented in memory as one or more `UserSafeSized`s.
 #[unstable(feature = "sgx_platform", issue = "56975")]
 pub unsafe trait UserSafe {
-    /// Equivalent to `mem::align_of::<Self>`.
+    /// Equivalent to `align_of::<Self>`.
     fn align_of() -> usize;
 
     /// Constructs a pointer to `Self` given a memory range in user space.
@@ -120,7 +120,7 @@ pub unsafe trait UserSafe {
         let is_aligned = |p: *const u8| -> bool { p.is_aligned_to(Self::align_of()) };
 
         assert!(is_aligned(ptr as *const u8));
-        assert!(is_user_range(ptr as _, mem::size_of_val(unsafe { &*ptr })));
+        assert!(is_user_range(ptr as _, size_of_val(unsafe { &*ptr })));
         assert!(!ptr.is_null());
     }
 }
@@ -128,11 +128,11 @@ pub unsafe trait UserSafe {
 #[unstable(feature = "sgx_platform", issue = "56975")]
 unsafe impl<T: UserSafeSized> UserSafe for T {
     fn align_of() -> usize {
-        mem::align_of::<T>()
+        align_of::<T>()
     }
 
     unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self {
-        assert_eq!(size, mem::size_of::<T>());
+        assert_eq!(size, size_of::<T>());
         ptr as _
     }
 }
@@ -140,7 +140,7 @@ unsafe impl<T: UserSafeSized> UserSafe for T {
 #[unstable(feature = "sgx_platform", issue = "56975")]
 unsafe impl<T: UserSafeSized> UserSafe for [T] {
     fn align_of() -> usize {
-        mem::align_of::<T>()
+        align_of::<T>()
     }
 
     /// # Safety
@@ -155,7 +155,7 @@ unsafe impl<T: UserSafeSized> UserSafe for [T] {
     ///
     /// * the element size is not a factor of the size
     unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self {
-        let elem_size = mem::size_of::<T>();
+        let elem_size = size_of::<T>();
         assert_eq!(size % elem_size, 0);
         let len = size / elem_size;
         // SAFETY: The caller must uphold the safety contract for `from_raw_sized_unchecked`
@@ -239,7 +239,7 @@ where
     /// Copies `val` into freshly allocated space in user memory.
     pub fn new_from_enclave(val: &T) -> Self {
         unsafe {
-            let mut user = Self::new_uninit_bytes(mem::size_of_val(val));
+            let mut user = Self::new_uninit_bytes(size_of_val(val));
             user.copy_from_enclave(val);
             user
         }
@@ -277,7 +277,7 @@ where
 {
     /// Allocates space for `T` in user memory.
     pub fn uninitialized() -> Self {
-        Self::new_uninit_bytes(mem::size_of::<T>())
+        Self::new_uninit_bytes(size_of::<T>())
     }
 }
 
@@ -288,7 +288,7 @@ where
 {
     /// Allocates space for a `[T]` of `n` elements in user memory.
     pub fn uninitialized(n: usize) -> Self {
-        Self::new_uninit_bytes(n * mem::size_of::<T>())
+        Self::new_uninit_bytes(n * size_of::<T>())
     }
 
     /// Creates an owned `User<[T]>` from a raw thin pointer and a slice length.
@@ -306,9 +306,7 @@ where
     /// * The pointed-to range does not fit in the address space
     /// * The pointed-to range is not in user memory
     pub unsafe fn from_raw_parts(ptr: *mut T, len: usize) -> Self {
-        User(unsafe {
-            NonNull::new_userref(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()))
-        })
+        User(unsafe { NonNull::new_userref(<[T]>::from_raw_sized(ptr as _, len * size_of::<T>())) })
     }
 }
 
@@ -326,7 +324,7 @@ where
 // `<*const u8>::align_offset` aren't _guaranteed_ to compute the largest
 // possible middle region, and as such can't be used.
 fn u64_align_to_guaranteed(ptr: *const u8, mut len: usize) -> (usize, usize, usize) {
-    const QWORD_SIZE: usize = mem::size_of::<u64>();
+    const QWORD_SIZE: usize = size_of::<u64>();
 
     let offset = ptr as usize % QWORD_SIZE;
 
@@ -532,11 +530,11 @@ where
     /// the source. This can happen for dynamically-sized types such as slices.
     pub fn copy_from_enclave(&mut self, val: &T) {
         unsafe {
-            assert_eq!(mem::size_of_val(val), mem::size_of_val(&*self.0.get()));
+            assert_eq!(size_of_val(val), size_of_val(&*self.0.get()));
             copy_to_userspace(
                 val as *const T as *const u8,
                 self.0.get() as *mut T as *mut u8,
-                mem::size_of_val(val),
+                size_of_val(val),
             );
         }
     }
@@ -548,11 +546,11 @@ where
     /// the source. This can happen for dynamically-sized types such as slices.
     pub fn copy_to_enclave(&self, dest: &mut T) {
         unsafe {
-            assert_eq!(mem::size_of_val(dest), mem::size_of_val(&*self.0.get()));
+            assert_eq!(size_of_val(dest), size_of_val(&*self.0.get()));
             copy_from_userspace(
                 self.0.get() as *const T as *const u8,
                 dest as *mut T as *mut u8,
-                mem::size_of_val(dest),
+                size_of_val(dest),
             );
         }
     }
@@ -577,7 +575,7 @@ where
     pub fn to_enclave(&self) -> T {
         unsafe {
             let mut data = mem::MaybeUninit::uninit();
-            copy_from_userspace(self.0.get() as _, data.as_mut_ptr() as _, mem::size_of::<T>());
+            copy_from_userspace(self.0.get() as _, data.as_mut_ptr() as _, size_of::<T>());
             data.assume_init()
         }
     }
@@ -602,9 +600,7 @@ where
     /// * The pointed-to range is not in user memory
     pub unsafe fn from_raw_parts<'a>(ptr: *const T, len: usize) -> &'a Self {
         // SAFETY: The caller must uphold the safety contract for `from_raw_parts`.
-        unsafe {
-            &*(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()).as_ptr() as *const Self)
-        }
+        unsafe { &*(<[T]>::from_raw_sized(ptr as _, len * size_of::<T>()).as_ptr() as *const Self) }
     }
 
     /// Creates a `&mut UserRef<[T]>` from a raw thin pointer and a slice length.
@@ -624,7 +620,7 @@ where
     pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut T, len: usize) -> &'a mut Self {
         // SAFETY: The caller must uphold the safety contract for `from_raw_parts_mut`.
         unsafe {
-            &mut *(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()).as_ptr() as *mut Self)
+            &mut *(<[T]>::from_raw_sized(ptr as _, len * size_of::<T>()).as_ptr() as *mut Self)
         }
     }
 
@@ -744,7 +740,7 @@ where
     fn drop(&mut self) {
         unsafe {
             let ptr = (*self.0.as_ptr()).0.get();
-            super::free(ptr as _, mem::size_of_val(&mut *ptr), T::align_of());
+            super::free(ptr as _, size_of_val(&mut *ptr), T::align_of());
         }
     }
 }
diff --git a/library/std/src/sys/pal/uefi/args.rs b/library/std/src/sys/pal/uefi/args.rs
index bdf6f5a0c1c..0c29caf2db6 100644
--- a/library/std/src/sys/pal/uefi/args.rs
+++ b/library/std/src/sys/pal/uefi/args.rs
@@ -4,7 +4,6 @@ use super::helpers;
 use crate::env::current_exe;
 use crate::ffi::OsString;
 use crate::iter::Iterator;
-use crate::mem::size_of;
 use crate::{fmt, vec};
 
 pub struct Args {
diff --git a/library/std/src/sys/pal/uefi/helpers.rs b/library/std/src/sys/pal/uefi/helpers.rs
index cca2312c4f9..0a2a8f5ef67 100644
--- a/library/std/src/sys/pal/uefi/helpers.rs
+++ b/library/std/src/sys/pal/uefi/helpers.rs
@@ -15,7 +15,7 @@ use r_efi::protocols::{device_path, device_path_to_text, service_binding, shell}
 use crate::ffi::{OsStr, OsString};
 use crate::io::{self, const_error};
 use crate::marker::PhantomData;
-use crate::mem::{MaybeUninit, size_of};
+use crate::mem::MaybeUninit;
 use crate::os::uefi::env::boot_services;
 use crate::os::uefi::ffi::{OsStrExt, OsStringExt};
 use crate::os::uefi::{self};
diff --git a/library/std/src/sys/pal/uefi/process.rs b/library/std/src/sys/pal/uefi/process.rs
index d4c09dc2ca0..1203d51e531 100644
--- a/library/std/src/sys/pal/uefi/process.rs
+++ b/library/std/src/sys/pal/uefi/process.rs
@@ -490,7 +490,7 @@ mod uefi_command_internal {
                 helpers::open_protocol(self.handle, loaded_image::PROTOCOL_GUID).unwrap();
 
             let len = args.len();
-            let args_size: u32 = (len * crate::mem::size_of::<u16>()).try_into().unwrap();
+            let args_size: u32 = (len * size_of::<u16>()).try_into().unwrap();
             let ptr = Box::into_raw(args).as_mut_ptr();
 
             unsafe {
diff --git a/library/std/src/sys/pal/uefi/tests.rs b/library/std/src/sys/pal/uefi/tests.rs
index 5eb36da922b..38658cc4e9a 100644
--- a/library/std/src/sys/pal/uefi/tests.rs
+++ b/library/std/src/sys/pal/uefi/tests.rs
@@ -16,7 +16,7 @@ fn align() {
             if *j <= 8 {
                 assert_eq!(align_size(i, *j), i);
             } else {
-                assert!(align_size(i, *j) > i + std::mem::size_of::<*mut ()>());
+                assert!(align_size(i, *j) > i + size_of::<*mut ()>());
             }
         }
     }
diff --git a/library/std/src/sys/pal/unix/fs.rs b/library/std/src/sys/pal/unix/fs.rs
index 3df460e38b7..20ba915af13 100644
--- a/library/std/src/sys/pal/unix/fs.rs
+++ b/library/std/src/sys/pal/unix/fs.rs
@@ -1505,7 +1505,7 @@ impl File {
                     self.as_raw_fd(),
                     (&raw const attrlist).cast::<libc::c_void>().cast_mut(),
                     buf.as_ptr().cast::<libc::c_void>().cast_mut(),
-                    num_times * mem::size_of::<libc::timespec>(),
+                    num_times * size_of::<libc::timespec>(),
                     0
                 ) })?;
                 Ok(())
@@ -1660,7 +1660,7 @@ impl fmt::Debug for File {
         fn get_path(fd: c_int) -> Option<PathBuf> {
             let info = Box::<libc::kinfo_file>::new_zeroed();
             let mut info = unsafe { info.assume_init() };
-            info.kf_structsize = mem::size_of::<libc::kinfo_file>() as libc::c_int;
+            info.kf_structsize = size_of::<libc::kinfo_file>() as libc::c_int;
             let n = unsafe { libc::fcntl(fd, libc::F_KINFO, &mut *info) };
             if n == -1 {
                 return None;
diff --git a/library/std/src/sys/pal/unix/futex.rs b/library/std/src/sys/pal/unix/futex.rs
index d4551dd6a38..87ba13ca932 100644
--- a/library/std/src/sys/pal/unix/futex.rs
+++ b/library/std/src/sys/pal/unix/futex.rs
@@ -58,7 +58,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
                         _clockid: libc::CLOCK_MONOTONIC as u32,
                     });
                     let umtx_timeout_ptr = umtx_timeout.as_ref().map_or(null(), |t| t as *const _);
-                    let umtx_timeout_size = umtx_timeout.as_ref().map_or(0, |t| crate::mem::size_of_val(t));
+                    let umtx_timeout_size = umtx_timeout.as_ref().map_or(0, |t| size_of_val(t));
                     libc::_umtx_op(
                         futex as *const AtomicU32 as *mut _,
                         libc::UMTX_OP_WAIT_UINT_PRIVATE,
diff --git a/library/std/src/sys/pal/unix/process/process_common.rs b/library/std/src/sys/pal/unix/process/process_common.rs
index 342818ac911..0ea9db211b3 100644
--- a/library/std/src/sys/pal/unix/process/process_common.rs
+++ b/library/std/src/sys/pal/unix/process/process_common.rs
@@ -43,10 +43,7 @@ cfg_if::cfg_if! {
 
         #[allow(dead_code)]
         pub unsafe fn sigaddset(set: *mut libc::sigset_t, signum: libc::c_int) -> libc::c_int {
-            use crate::{
-                mem::{align_of, size_of},
-                slice,
-            };
+            use crate::slice;
             use libc::{c_ulong, sigset_t};
 
             // The implementations from bionic (android libc) type pun `sigset_t` as an
diff --git a/library/std/src/sys/pal/unix/process/process_fuchsia.rs b/library/std/src/sys/pal/unix/process/process_fuchsia.rs
index 4ddc96356b9..05c9ace470e 100644
--- a/library/std/src/sys/pal/unix/process/process_fuchsia.rs
+++ b/library/std/src/sys/pal/unix/process/process_fuchsia.rs
@@ -179,7 +179,7 @@ impl Process {
                 self.handle.raw(),
                 ZX_INFO_PROCESS,
                 (&raw mut proc_info) as *mut libc::c_void,
-                mem::size_of::<zx_info_process_t>(),
+                size_of::<zx_info_process_t>(),
                 &mut actual,
                 &mut avail,
             ))?;
@@ -216,7 +216,7 @@ impl Process {
                 self.handle.raw(),
                 ZX_INFO_PROCESS,
                 (&raw mut proc_info) as *mut libc::c_void,
-                mem::size_of::<zx_info_process_t>(),
+                size_of::<zx_info_process_t>(),
                 &mut actual,
                 &mut avail,
             ))?;
diff --git a/library/std/src/sys/pal/unix/process/process_unix.rs b/library/std/src/sys/pal/unix/process/process_unix.rs
index ddea445bb17..25d9e935332 100644
--- a/library/std/src/sys/pal/unix/process/process_unix.rs
+++ b/library/std/src/sys/pal/unix/process/process_unix.rs
@@ -814,7 +814,7 @@ impl Command {
 
             let fds: [c_int; 1] = [pidfd as RawFd];
 
-            const SCM_MSG_LEN: usize = mem::size_of::<[c_int; 1]>();
+            const SCM_MSG_LEN: usize = size_of::<[c_int; 1]>();
 
             #[repr(C)]
             union Cmsg {
@@ -833,7 +833,7 @@ impl Command {
 
             // only attach cmsg if we successfully acquired the pidfd
             if pidfd >= 0 {
-                msg.msg_controllen = mem::size_of_val(&cmsg.buf) as _;
+                msg.msg_controllen = size_of_val(&cmsg.buf) as _;
                 msg.msg_control = (&raw mut cmsg.buf) as *mut _;
 
                 let hdr = CMSG_FIRSTHDR((&raw mut msg) as *mut _);
@@ -865,7 +865,7 @@ impl Command {
         use crate::sys::cvt_r;
 
         unsafe {
-            const SCM_MSG_LEN: usize = mem::size_of::<[c_int; 1]>();
+            const SCM_MSG_LEN: usize = size_of::<[c_int; 1]>();
 
             #[repr(C)]
             union Cmsg {
@@ -880,7 +880,7 @@ impl Command {
 
             msg.msg_iov = (&raw mut iov) as *mut _;
             msg.msg_iovlen = 1;
-            msg.msg_controllen = mem::size_of::<Cmsg>() as _;
+            msg.msg_controllen = size_of::<Cmsg>() as _;
             msg.msg_control = (&raw mut cmsg) as *mut _;
 
             match cvt_r(|| libc::recvmsg(sock.as_raw(), &mut msg, libc::MSG_CMSG_CLOEXEC)) {
diff --git a/library/std/src/sys/pal/unix/stack_overflow.rs b/library/std/src/sys/pal/unix/stack_overflow.rs
index 43ece63457f..0ecccdc8812 100644
--- a/library/std/src/sys/pal/unix/stack_overflow.rs
+++ b/library/std/src/sys/pal/unix/stack_overflow.rs
@@ -426,7 +426,7 @@ mod imp {
             use crate::sys::weak::dlsym;
             dlsym!(fn sysctlbyname(*const libc::c_char, *mut libc::c_void, *mut libc::size_t, *const libc::c_void, libc::size_t) -> libc::c_int);
             let mut guard: usize = 0;
-            let mut size = mem::size_of_val(&guard);
+            let mut size = size_of_val(&guard);
             let oid = c"security.bsd.stack_guard_page";
             match sysctlbyname.get() {
                 Some(fcn) if unsafe {
diff --git a/library/std/src/sys/pal/unix/thread.rs b/library/std/src/sys/pal/unix/thread.rs
index 3dedc8d1257..11f6998cac1 100644
--- a/library/std/src/sys/pal/unix/thread.rs
+++ b/library/std/src/sys/pal/unix/thread.rs
@@ -372,7 +372,7 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
                 quota = cgroups::quota().max(1);
                 let mut set: libc::cpu_set_t = unsafe { mem::zeroed() };
                 unsafe {
-                    if libc::sched_getaffinity(0, mem::size_of::<libc::cpu_set_t>(), &mut set) == 0 {
+                    if libc::sched_getaffinity(0, size_of::<libc::cpu_set_t>(), &mut set) == 0 {
                         let count = libc::CPU_COUNT(&set) as usize;
                         let count = count.min(quota);
 
@@ -412,7 +412,7 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
                         libc::CPU_LEVEL_WHICH,
                         libc::CPU_WHICH_PID,
                         -1,
-                        mem::size_of::<libc::cpuset_t>(),
+                        size_of::<libc::cpuset_t>(),
                         &mut set,
                     ) == 0 {
                         let count = libc::CPU_COUNT(&set) as usize;
@@ -447,7 +447,7 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
             }
 
             let mut cpus: libc::c_uint = 0;
-            let mut cpus_size = crate::mem::size_of_val(&cpus);
+            let mut cpus_size = size_of_val(&cpus);
 
             unsafe {
                 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
diff --git a/library/std/src/sys/pal/unix/weak.rs b/library/std/src/sys/pal/unix/weak.rs
index 5a37598f438..7ec4787f1ea 100644
--- a/library/std/src/sys/pal/unix/weak.rs
+++ b/library/std/src/sys/pal/unix/weak.rs
@@ -123,7 +123,7 @@ impl<F> DlsymWeak<F> {
     // Cold because it should only happen during first-time initialization.
     #[cold]
     unsafe fn initialize(&self) -> Option<F> {
-        assert_eq!(mem::size_of::<F>(), mem::size_of::<*mut libc::c_void>());
+        assert_eq!(size_of::<F>(), size_of::<*mut libc::c_void>());
 
         let val = fetch(self.name);
         // This synchronizes with the acquire fence in `get`.
diff --git a/library/std/src/sys/pal/wasi/fd.rs b/library/std/src/sys/pal/wasi/fd.rs
index 19b60157e2e..4b3dd1ce49e 100644
--- a/library/std/src/sys/pal/wasi/fd.rs
+++ b/library/std/src/sys/pal/wasi/fd.rs
@@ -14,8 +14,8 @@ pub struct WasiFd {
 }
 
 fn iovec<'a>(a: &'a mut [IoSliceMut<'_>]) -> &'a [wasi::Iovec] {
-    assert_eq!(mem::size_of::<IoSliceMut<'_>>(), mem::size_of::<wasi::Iovec>());
-    assert_eq!(mem::align_of::<IoSliceMut<'_>>(), mem::align_of::<wasi::Iovec>());
+    assert_eq!(size_of::<IoSliceMut<'_>>(), size_of::<wasi::Iovec>());
+    assert_eq!(align_of::<IoSliceMut<'_>>(), align_of::<wasi::Iovec>());
     // SAFETY: `IoSliceMut` and `IoVec` have exactly the same memory layout.
     // We decorate our `IoSliceMut` with `repr(transparent)` (see `io.rs`), and
     // `crate::io::IoSliceMut` is a `repr(transparent)` wrapper around our type, so this is
@@ -24,8 +24,8 @@ fn iovec<'a>(a: &'a mut [IoSliceMut<'_>]) -> &'a [wasi::Iovec] {
 }
 
 fn ciovec<'a>(a: &'a [IoSlice<'_>]) -> &'a [wasi::Ciovec] {
-    assert_eq!(mem::size_of::<IoSlice<'_>>(), mem::size_of::<wasi::Ciovec>());
-    assert_eq!(mem::align_of::<IoSlice<'_>>(), mem::align_of::<wasi::Ciovec>());
+    assert_eq!(size_of::<IoSlice<'_>>(), size_of::<wasi::Ciovec>());
+    assert_eq!(align_of::<IoSlice<'_>>(), align_of::<wasi::Ciovec>());
     // SAFETY: `IoSlice` and `CIoVec` have exactly the same memory layout.
     // We decorate our `IoSlice` with `repr(transparent)` (see `io.rs`), and
     // `crate::io::IoSlice` is a `repr(transparent)` wrapper around our type, so this is
diff --git a/library/std/src/sys/pal/wasi/fs.rs b/library/std/src/sys/pal/wasi/fs.rs
index 39978346d73..6d7d125fc4d 100644
--- a/library/std/src/sys/pal/wasi/fs.rs
+++ b/library/std/src/sys/pal/wasi/fs.rs
@@ -209,7 +209,7 @@ impl Iterator for ReadDir {
             }
             ReadDirState::ProcessEntry { buf, next_read_offset, offset } => {
                 let contents = &buf[*offset..];
-                const DIRENT_SIZE: usize = crate::mem::size_of::<wasi::Dirent>();
+                const DIRENT_SIZE: usize = size_of::<wasi::Dirent>();
                 if contents.len() >= DIRENT_SIZE {
                     let (dirent, data) = contents.split_at(DIRENT_SIZE);
                     let dirent =
diff --git a/library/std/src/sys/pal/wasi/thread.rs b/library/std/src/sys/pal/wasi/thread.rs
index 0ae02369410..c85b03d4a89 100644
--- a/library/std/src/sys/pal/wasi/thread.rs
+++ b/library/std/src/sys/pal/wasi/thread.rs
@@ -13,16 +13,15 @@ cfg_if::cfg_if! {
         // Add a few symbols not in upstream `libc` just yet.
         mod libc {
             pub use crate::ffi;
-            pub use crate::mem;
             pub use libc::*;
 
             // defined in wasi-libc
             // https://github.com/WebAssembly/wasi-libc/blob/a6f871343313220b76009827ed0153586361c0d5/libc-top-half/musl/include/alltypes.h.in#L108
             #[repr(C)]
             union pthread_attr_union {
-                __i: [ffi::c_int; if mem::size_of::<ffi::c_long>() == 8 { 14 } else { 9 }],
-                __vi: [ffi::c_int; if mem::size_of::<ffi::c_long>() == 8 { 14 } else { 9 }],
-                __s: [ffi::c_ulong; if mem::size_of::<ffi::c_long>() == 8 { 7 } else { 9 }],
+                __i: [ffi::c_int; if size_of::<ffi::c_long>() == 8 { 14 } else { 9 }],
+                __vi: [ffi::c_int; if size_of::<ffi::c_long>() == 8 { 14 } else { 9 }],
+                __s: [ffi::c_ulong; if size_of::<ffi::c_long>() == 8 { 7 } else { 9 }],
             }
 
             #[repr(C)]
diff --git a/library/std/src/sys/pal/windows/api.rs b/library/std/src/sys/pal/windows/api.rs
index ebe207fde93..6b5f9aeace2 100644
--- a/library/std/src/sys/pal/windows/api.rs
+++ b/library/std/src/sys/pal/windows/api.rs
@@ -137,7 +137,7 @@ pub const fn to_utf16<const UTF16_LEN: usize>(s: &str) -> [u16; UTF16_LEN] {
 /// use frequent `as` casts. This is risky because they are too powerful.
 /// For example, the following will compile today:
 ///
-/// `std::mem::size_of::<u64> as u32`
+/// `size_of::<u64> as u32`
 ///
 /// Note that `size_of` is never actually called, instead a function pointer is
 /// converted to a `u32`. Clippy would warn about this but, alas, it's not run
@@ -147,7 +147,7 @@ const fn win32_size_of<T: Sized>() -> u32 {
     // Uses a trait to workaround restriction on using generic types in inner items.
     trait Win32SizeOf: Sized {
         const WIN32_SIZE_OF: u32 = {
-            let size = core::mem::size_of::<Self>();
+            let size = size_of::<Self>();
             assert!(size <= u32::MAX as usize);
             size as u32
         };
diff --git a/library/std/src/sys/pal/windows/c.rs b/library/std/src/sys/pal/windows/c.rs
index 4fbdc839939..40b2bed73c0 100644
--- a/library/std/src/sys/pal/windows/c.rs
+++ b/library/std/src/sys/pal/windows/c.rs
@@ -6,7 +6,7 @@
 #![allow(clippy::style)]
 
 use core::ffi::{CStr, c_uint, c_ulong, c_ushort, c_void};
-use core::{mem, ptr};
+use core::ptr;
 
 mod windows_sys;
 pub use windows_sys::*;
@@ -39,7 +39,7 @@ pub fn nt_success(status: NTSTATUS) -> bool {
 
 impl UNICODE_STRING {
     pub fn from_ref(slice: &[u16]) -> Self {
-        let len = mem::size_of_val(slice);
+        let len = size_of_val(slice);
         Self { Length: len as _, MaximumLength: len as _, Buffer: slice.as_ptr() as _ }
     }
 }
@@ -47,7 +47,7 @@ impl UNICODE_STRING {
 impl Default for OBJECT_ATTRIBUTES {
     fn default() -> Self {
         Self {
-            Length: mem::size_of::<Self>() as _,
+            Length: size_of::<Self>() as _,
             RootDirectory: ptr::null_mut(),
             ObjectName: ptr::null_mut(),
             Attributes: 0,
diff --git a/library/std/src/sys/pal/windows/fs.rs b/library/std/src/sys/pal/windows/fs.rs
index 623a7d89ba5..17dc3e5c257 100644
--- a/library/std/src/sys/pal/windows/fs.rs
+++ b/library/std/src/sys/pal/windows/fs.rs
@@ -477,7 +477,7 @@ impl File {
                     self.handle.as_raw_handle(),
                     c::FileAttributeTagInfo,
                     (&raw mut attr_tag).cast(),
-                    mem::size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
+                    size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
                 ))?;
                 if attr_tag.FileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
                     reparse_tag = attr_tag.ReparseTag;
@@ -504,7 +504,7 @@ impl File {
     pub fn file_attr(&self) -> io::Result<FileAttr> {
         unsafe {
             let mut info: c::FILE_BASIC_INFO = mem::zeroed();
-            let size = mem::size_of_val(&info);
+            let size = size_of_val(&info);
             cvt(c::GetFileInformationByHandleEx(
                 self.handle.as_raw_handle(),
                 c::FileBasicInfo,
@@ -536,7 +536,7 @@ impl File {
                 file_index: None,
             };
             let mut info: c::FILE_STANDARD_INFO = mem::zeroed();
-            let size = mem::size_of_val(&info);
+            let size = size_of_val(&info);
             cvt(c::GetFileInformationByHandleEx(
                 self.handle.as_raw_handle(),
                 c::FileStandardInfo,
@@ -551,7 +551,7 @@ impl File {
                     self.handle.as_raw_handle(),
                     c::FileAttributeTagInfo,
                     (&raw mut attr_tag).cast(),
-                    mem::size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
+                    size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
                 ))?;
                 if attr_tag.FileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
                     attr.reparse_tag = attr_tag.ReparseTag;
@@ -649,7 +649,7 @@ impl File {
                     ptr::null_mut(),
                 )
             })?;
-            const _: () = assert!(core::mem::align_of::<c::REPARSE_DATA_BUFFER>() <= 8);
+            const _: () = assert!(align_of::<c::REPARSE_DATA_BUFFER>() <= 8);
             Ok((bytes, space.0.as_mut_ptr().cast::<c::REPARSE_DATA_BUFFER>()))
         }
     }
@@ -753,7 +753,7 @@ impl File {
     fn basic_info(&self) -> io::Result<c::FILE_BASIC_INFO> {
         unsafe {
             let mut info: c::FILE_BASIC_INFO = mem::zeroed();
-            let size = mem::size_of_val(&info);
+            let size = size_of_val(&info);
             cvt(c::GetFileInformationByHandleEx(
                 self.handle.as_raw_handle(),
                 c::FileBasicInfo,
@@ -886,7 +886,6 @@ impl<'a> DirBuffIter<'a> {
 impl<'a> Iterator for DirBuffIter<'a> {
     type Item = (Cow<'a, [u16]>, bool);
     fn next(&mut self) -> Option<Self::Item> {
-        use crate::mem::size_of;
         let buffer = &self.buffer?[self.cursor..];
 
         // Get the name and next entry from the buffer.
@@ -1249,8 +1248,8 @@ pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
     // Therefore we need to make sure to not allocate less than
     // size_of::<c::FILE_RENAME_INFO>() bytes, which would be the case with
     // 0 or 1 character paths + a null byte.
-    let struct_size = mem::size_of::<c::FILE_RENAME_INFO>()
-        .max(mem::offset_of!(c::FILE_RENAME_INFO, FileName) + new.len() * mem::size_of::<u16>());
+    let struct_size = size_of::<c::FILE_RENAME_INFO>()
+        .max(mem::offset_of!(c::FILE_RENAME_INFO, FileName) + new.len() * size_of::<u16>());
 
     let struct_size: u32 = struct_size.try_into().unwrap();
 
@@ -1282,7 +1281,7 @@ pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
                     handle.as_raw_handle(),
                     c::FileAttributeTagInfo,
                     file_attribute_tag_info.as_mut_ptr().cast(),
-                    mem::size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
+                    size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
                 ))
             };
 
@@ -1321,11 +1320,9 @@ pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
     }
     .unwrap_or_else(|| create_file(0, 0))?;
 
-    let layout = core::alloc::Layout::from_size_align(
-        struct_size as _,
-        mem::align_of::<c::FILE_RENAME_INFO>(),
-    )
-    .unwrap();
+    let layout =
+        core::alloc::Layout::from_size_align(struct_size as _, align_of::<c::FILE_RENAME_INFO>())
+            .unwrap();
 
     let file_rename_info = unsafe { alloc(layout) } as *mut c::FILE_RENAME_INFO;
 
diff --git a/library/std/src/sys/pal/windows/futex.rs b/library/std/src/sys/pal/windows/futex.rs
index 38afb8c043b..aebf638239c 100644
--- a/library/std/src/sys/pal/windows/futex.rs
+++ b/library/std/src/sys/pal/windows/futex.rs
@@ -1,10 +1,10 @@
 use core::ffi::c_void;
+use core::ptr;
 use core::sync::atomic::{
     AtomicBool, AtomicI8, AtomicI16, AtomicI32, AtomicI64, AtomicIsize, AtomicPtr, AtomicU8,
     AtomicU16, AtomicU32, AtomicU64, AtomicUsize,
 };
 use core::time::Duration;
-use core::{mem, ptr};
 
 use super::api::{self, WinError};
 use crate::sys::{c, dur2timeout};
@@ -61,7 +61,7 @@ pub fn wait_on_address<W: Waitable>(
 ) -> bool {
     unsafe {
         let addr = ptr::from_ref(address).cast::<c_void>();
-        let size = mem::size_of::<W>();
+        let size = size_of::<W>();
         let compare_addr = (&raw const compare).cast::<c_void>();
         let timeout = timeout.map(dur2timeout).unwrap_or(c::INFINITE);
         c::WaitOnAddress(addr, compare_addr, size, timeout) == c::TRUE
diff --git a/library/std/src/sys/pal/windows/pipe.rs b/library/std/src/sys/pal/windows/pipe.rs
index a8f6617c9dc..8521cf4162f 100644
--- a/library/std/src/sys/pal/windows/pipe.rs
+++ b/library/std/src/sys/pal/windows/pipe.rs
@@ -151,7 +151,7 @@ pub fn anon_pipe(ours_readable: bool, their_handle_inheritable: bool) -> io::Res
         opts.write(ours_readable);
         opts.read(!ours_readable);
         opts.share_mode(0);
-        let size = mem::size_of::<c::SECURITY_ATTRIBUTES>();
+        let size = size_of::<c::SECURITY_ATTRIBUTES>();
         let mut sa = c::SECURITY_ATTRIBUTES {
             nLength: size as u32,
             lpSecurityDescriptor: ptr::null_mut(),
diff --git a/library/std/src/sys/pal/windows/process.rs b/library/std/src/sys/pal/windows/process.rs
index 6eff471f386..c57ff355d12 100644
--- a/library/std/src/sys/pal/windows/process.rs
+++ b/library/std/src/sys/pal/windows/process.rs
@@ -24,7 +24,7 @@ use crate::sys::pipe::{self, AnonPipe};
 use crate::sys::{cvt, path, stdio};
 use crate::sys_common::IntoInner;
 use crate::sys_common::process::{CommandEnv, CommandEnvs};
-use crate::{cmp, env, fmt, mem, ptr};
+use crate::{cmp, env, fmt, ptr};
 
 ////////////////////////////////////////////////////////////////////////////////
 // Command
@@ -355,7 +355,7 @@ impl Command {
         let mut si_ex;
 
         if let Some(proc_thread_attribute_list) = proc_thread_attribute_list {
-            si.cb = mem::size_of::<c::STARTUPINFOEXW>() as u32;
+            si.cb = size_of::<c::STARTUPINFOEXW>() as u32;
             flags |= c::EXTENDED_STARTUPINFO_PRESENT;
 
             si_ex = c::STARTUPINFOEXW {
@@ -367,7 +367,7 @@ impl Command {
             };
             si_ptr = (&raw mut si_ex) as _;
         } else {
-            si.cb = mem::size_of::<c::STARTUPINFOW>() as u32;
+            si.cb = size_of::<c::STARTUPINFOW>() as u32;
             si_ptr = (&raw mut si) as _;
         }
 
@@ -599,7 +599,7 @@ impl Stdio {
             // permissions as well as the ability to be inherited to child
             // processes (as this is about to be inherited).
             Stdio::Null => {
-                let size = mem::size_of::<c::SECURITY_ATTRIBUTES>();
+                let size = size_of::<c::SECURITY_ATTRIBUTES>();
                 let mut sa = c::SECURITY_ATTRIBUTES {
                     nLength: size as u32,
                     lpSecurityDescriptor: ptr::null_mut(),
diff --git a/library/std/src/sys/pal/windows/stdio.rs b/library/std/src/sys/pal/windows/stdio.rs
index 1b245991aa7..58d3406e138 100644
--- a/library/std/src/sys/pal/windows/stdio.rs
+++ b/library/std/src/sys/pal/windows/stdio.rs
@@ -359,7 +359,7 @@ fn read_u16s(handle: c::HANDLE, buf: &mut [MaybeUninit<u16>]) -> io::Result<usiz
     const CTRL_Z: u16 = 0x1A;
     const CTRL_Z_MASK: u32 = 1 << CTRL_Z;
     let input_control = c::CONSOLE_READCONSOLE_CONTROL {
-        nLength: crate::mem::size_of::<c::CONSOLE_READCONSOLE_CONTROL>() as u32,
+        nLength: size_of::<c::CONSOLE_READCONSOLE_CONTROL>() as u32,
         nInitialChars: 0,
         dwCtrlWakeupMask: CTRL_Z_MASK,
         dwControlKeyState: 0,
diff --git a/library/std/src/sys/pal/xous/stdio.rs b/library/std/src/sys/pal/xous/stdio.rs
index dfd47a1775a..71736145221 100644
--- a/library/std/src/sys/pal/xous/stdio.rs
+++ b/library/std/src/sys/pal/xous/stdio.rs
@@ -87,7 +87,7 @@ pub struct PanicWriter {
 
 impl io::Write for PanicWriter {
     fn write(&mut self, s: &[u8]) -> core::result::Result<usize, io::Error> {
-        for c in s.chunks(core::mem::size_of::<usize>() * 4) {
+        for c in s.chunks(size_of::<usize>() * 4) {
             // Text is grouped into 4x `usize` words. The id is 1100 plus
             // the number of characters in this message.
             // Ignore errors since we're already panicking.
diff --git a/library/std/src/sys/pal/zkvm/mod.rs b/library/std/src/sys/pal/zkvm/mod.rs
index 054c867f90d..8d8fe321f66 100644
--- a/library/std/src/sys/pal/zkvm/mod.rs
+++ b/library/std/src/sys/pal/zkvm/mod.rs
@@ -8,7 +8,7 @@
 //! will likely change over time.
 #![forbid(unsafe_op_in_unsafe_fn)]
 
-const WORD_SIZE: usize = core::mem::size_of::<u32>();
+const WORD_SIZE: usize = size_of::<u32>();
 
 pub mod abi;
 #[path = "../zkvm/args.rs"]
diff --git a/library/std/src/sys/personality/dwarf/eh.rs b/library/std/src/sys/personality/dwarf/eh.rs
index 778d8686f02..ef5112ad74f 100644
--- a/library/std/src/sys/personality/dwarf/eh.rs
+++ b/library/std/src/sys/personality/dwarf/eh.rs
@@ -12,7 +12,7 @@
 #![allow(non_upper_case_globals)]
 #![allow(unused)]
 
-use core::{mem, ptr};
+use core::ptr;
 
 use super::DwarfReader;
 
@@ -245,8 +245,7 @@ unsafe fn read_encoded_pointer(
         DW_EH_PE_datarel => (*context.get_data_start)(),
         // aligned means the value is aligned to the size of a pointer
         DW_EH_PE_aligned => {
-            reader.ptr =
-                reader.ptr.with_addr(round_up(reader.ptr.addr(), mem::size_of::<*const u8>())?);
+            reader.ptr = reader.ptr.with_addr(round_up(reader.ptr.addr(), size_of::<*const u8>())?);
             core::ptr::null()
         }
         _ => return Err(()),
diff --git a/library/std/src/sys/personality/dwarf/mod.rs b/library/std/src/sys/personality/dwarf/mod.rs
index 5c52d96c4ca..2bc91951b49 100644
--- a/library/std/src/sys/personality/dwarf/mod.rs
+++ b/library/std/src/sys/personality/dwarf/mod.rs
@@ -12,8 +12,6 @@ mod tests;
 
 pub mod eh;
 
-use core::mem;
-
 pub struct DwarfReader {
     pub ptr: *const u8,
 }
@@ -29,7 +27,7 @@ impl DwarfReader {
     pub unsafe fn read<T: Copy>(&mut self) -> T {
         unsafe {
             let result = self.ptr.cast::<T>().read_unaligned();
-            self.ptr = self.ptr.byte_add(mem::size_of::<T>());
+            self.ptr = self.ptr.byte_add(size_of::<T>());
             result
         }
     }
diff --git a/library/std/src/sys/thread_local/key/xous.rs b/library/std/src/sys/thread_local/key/xous.rs
index 55ac5b20e1a..48dfe17ab32 100644
--- a/library/std/src/sys/thread_local/key/xous.rs
+++ b/library/std/src/sys/thread_local/key/xous.rs
@@ -85,7 +85,7 @@ fn tls_table() -> &'static mut [*mut u8] {
 
     if !tp.is_null() {
         return unsafe {
-            core::slice::from_raw_parts_mut(tp, TLS_MEMORY_SIZE / core::mem::size_of::<*mut u8>())
+            core::slice::from_raw_parts_mut(tp, TLS_MEMORY_SIZE / size_of::<*mut u8>())
         };
     }
     // If the TP register is `0`, then this thread hasn't initialized
@@ -94,7 +94,7 @@ fn tls_table() -> &'static mut [*mut u8] {
         map_memory(
             None,
             None,
-            TLS_MEMORY_SIZE / core::mem::size_of::<*mut u8>(),
+            TLS_MEMORY_SIZE / size_of::<*mut u8>(),
             MemoryFlags::R | MemoryFlags::W,
         )
         .expect("Unable to allocate memory for thread local storage")
@@ -177,11 +177,8 @@ pub unsafe fn destroy_tls() {
 
     // Finally, free the TLS array
     unsafe {
-        unmap_memory(core::slice::from_raw_parts_mut(
-            tp,
-            TLS_MEMORY_SIZE / core::mem::size_of::<usize>(),
-        ))
-        .unwrap()
+        unmap_memory(core::slice::from_raw_parts_mut(tp, TLS_MEMORY_SIZE / size_of::<usize>()))
+            .unwrap()
     };
 }
 
diff --git a/library/std/src/thread/tests.rs b/library/std/src/thread/tests.rs
index ff45e82bd9c..06c347af181 100644
--- a/library/std/src/thread/tests.rs
+++ b/library/std/src/thread/tests.rs
@@ -1,12 +1,12 @@
 use super::Builder;
 use crate::any::Any;
 use crate::panic::panic_any;
+use crate::result;
 use crate::sync::atomic::{AtomicBool, Ordering};
 use crate::sync::mpsc::{Sender, channel};
 use crate::sync::{Arc, Barrier};
 use crate::thread::{self, Scope, ThreadId};
 use crate::time::{Duration, Instant};
-use crate::{mem, result};
 
 // !!! These tests are dangerous. If something is buggy, they will hang, !!!
 // !!! instead of exiting cleanly. This might wedge the buildbots.       !!!
@@ -327,7 +327,7 @@ fn sleep_ms_smoke() {
 
 #[test]
 fn test_size_of_option_thread_id() {
-    assert_eq!(mem::size_of::<Option<ThreadId>>(), mem::size_of::<ThreadId>());
+    assert_eq!(size_of::<Option<ThreadId>>(), size_of::<ThreadId>());
 }
 
 #[test]
diff --git a/library/unwind/src/unwinding.rs b/library/unwind/src/unwinding.rs
index 1b94005ab6c..fa8a8c38583 100644
--- a/library/unwind/src/unwinding.rs
+++ b/library/unwind/src/unwinding.rs
@@ -39,9 +39,9 @@ pub type _Unwind_Exception_Class = u64;
 pub type _Unwind_Word = *const u8;
 pub type _Unwind_Ptr = *const u8;
 
-pub const unwinder_private_data_size: usize = core::mem::size_of::<UnwindException>()
-    - core::mem::size_of::<_Unwind_Exception_Class>()
-    - core::mem::size_of::<_Unwind_Exception_Cleanup_Fn>();
+pub const unwinder_private_data_size: usize = size_of::<UnwindException>()
+    - size_of::<_Unwind_Exception_Class>()
+    - size_of::<_Unwind_Exception_Cleanup_Fn>();
 
 pub type _Unwind_Exception_Cleanup_Fn =
     Option<extern "C" fn(unwind_code: _Unwind_Reason_Code, exception: *mut _Unwind_Exception)>;
diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs
index fd528786d03..97ff4c2ef40 100644
--- a/src/librustdoc/clean/mod.rs
+++ b/src/librustdoc/clean/mod.rs
@@ -2532,7 +2532,7 @@ fn clean_generic_args<'tcx>(
 ) -> GenericArgs {
     // FIXME(return_type_notation): Fix RTN parens rendering
     if let Some((inputs, output)) = generic_args.paren_sugar_inputs_output() {
-        let inputs = inputs.iter().map(|x| clean_ty(x, cx)).collect::<ThinVec<_>>().into();
+        let inputs = inputs.iter().map(|x| clean_ty(x, cx)).collect();
         let output = match output.kind {
             hir::TyKind::Tup(&[]) => None,
             _ => Some(Box::new(clean_ty(output, cx))),
@@ -2553,8 +2553,7 @@ fn clean_generic_args<'tcx>(
                 }
                 hir::GenericArg::Infer(_inf) => GenericArg::Infer,
             })
-            .collect::<ThinVec<_>>()
-            .into();
+            .collect();
         let constraints = generic_args
             .constraints
             .iter()
diff --git a/src/librustdoc/clean/types.rs b/src/librustdoc/clean/types.rs
index 0d33c234f93..9e9cd528834 100644
--- a/src/librustdoc/clean/types.rs
+++ b/src/librustdoc/clean/types.rs
@@ -2417,7 +2417,7 @@ impl ConstantKind {
             ConstantKind::Local { body, .. } | ConstantKind::Anonymous { body } => {
                 rendered_const(tcx, tcx.hir_body(body), tcx.hir_body_owner_def_id(body))
             }
-            ConstantKind::Infer { .. } => "_".to_string(),
+            ConstantKind::Infer => "_".to_string(),
         }
     }
 
diff --git a/src/librustdoc/clean/utils.rs b/src/librustdoc/clean/utils.rs
index a284de5229a..f81db58950c 100644
--- a/src/librustdoc/clean/utils.rs
+++ b/src/librustdoc/clean/utils.rs
@@ -223,7 +223,7 @@ fn clean_middle_generic_args_with_constraints<'tcx>(
 
     let args = clean_middle_generic_args(cx, args.map_bound(|args| &args[..]), has_self, did);
 
-    GenericArgs::AngleBracketed { args: args.into(), constraints }
+    GenericArgs::AngleBracketed { args, constraints }
 }
 
 pub(super) fn clean_middle_path<'tcx>(
@@ -394,7 +394,7 @@ pub(crate) fn print_evaluated_const(
 fn format_integer_with_underscore_sep(num: &str) -> String {
     let num_chars: Vec<_> = num.chars().collect();
     let mut num_start_index = if num_chars.first() == Some(&'-') { 1 } else { 0 };
-    let chunk_size = match num[num_start_index..].as_bytes() {
+    let chunk_size = match &num.as_bytes()[num_start_index..] {
         [b'0', b'b' | b'x', ..] => {
             num_start_index += 2;
             4
@@ -524,7 +524,7 @@ pub(crate) fn register_res(cx: &mut DocContext<'_>, res: Res) -> DefId {
             | AssocConst
             | Variant
             | Fn
-            | TyAlias { .. }
+            | TyAlias
             | Enum
             | Trait
             | Struct
diff --git a/src/librustdoc/core.rs b/src/librustdoc/core.rs
index 679921c3269..f95ae380fa8 100644
--- a/src/librustdoc/core.rs
+++ b/src/librustdoc/core.rs
@@ -154,7 +154,7 @@ pub(crate) fn new_dcx(
         false,
     );
     let emitter: Box<DynEmitter> = match error_format {
-        ErrorOutputType::HumanReadable(kind, color_config) => {
+        ErrorOutputType::HumanReadable { kind, color_config } => {
             let short = kind.short();
             Box::new(
                 HumanEmitter::new(stderr_destination(color_config), fallback_bundle)
diff --git a/src/librustdoc/display.rs b/src/librustdoc/display.rs
index ee8dde013ee..aa0fad26520 100644
--- a/src/librustdoc/display.rs
+++ b/src/librustdoc/display.rs
@@ -22,7 +22,7 @@ where
         let mut iter = self.into_iter();
         let Some(first) = iter.next() else { return Ok(()) };
         first.fmt(f)?;
-        while let Some(item) = iter.next() {
+        for item in iter {
             f.write_str(sep)?;
             item.fmt(f)?;
         }
diff --git a/src/librustdoc/doctest.rs b/src/librustdoc/doctest.rs
index 4a379b4235f..3d6e0330fff 100644
--- a/src/librustdoc/doctest.rs
+++ b/src/librustdoc/doctest.rs
@@ -580,7 +580,7 @@ fn run_test(
             path_for_rustdoc.to_str().expect("target path must be valid unicode")
         }
     });
-    if let ErrorOutputType::HumanReadable(kind, color_config) = rustdoc_options.error_format {
+    if let ErrorOutputType::HumanReadable { kind, color_config } = rustdoc_options.error_format {
         let short = kind.short();
         let unicode = kind == HumanReadableErrorType::Unicode;
 
diff --git a/src/librustdoc/doctest/extracted.rs b/src/librustdoc/doctest/extracted.rs
index 03c8814a4c9..ce362eabfc4 100644
--- a/src/librustdoc/doctest/extracted.rs
+++ b/src/librustdoc/doctest/extracted.rs
@@ -33,7 +33,7 @@ impl ExtractedDocTests {
         opts: &super::GlobalTestOptions,
         options: &RustdocOptions,
     ) {
-        let edition = scraped_test.edition(&options);
+        let edition = scraped_test.edition(options);
 
         let ScrapedDocTest { filename, line, langstr, text, name } = scraped_test;
 
@@ -48,7 +48,7 @@ impl ExtractedDocTests {
         let (full_test_code, size) = doctest.generate_unique_doctest(
             &text,
             langstr.test_harness,
-            &opts,
+            opts,
             Some(&opts.crate_name),
         );
         self.doctests.push(ExtractedDocTest {
diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs
index ea740508c58..8b8439a2535 100644
--- a/src/librustdoc/html/format.rs
+++ b/src/librustdoc/html/format.rs
@@ -623,10 +623,9 @@ pub(crate) fn href_relative_parts<'fqp>(
         // e.g. linking to std::iter from std::vec (`dissimilar_part_count` will be 1)
         if f != r {
             let dissimilar_part_count = relative_to_fqp.len() - i;
-            let fqp_module = &fqp[i..fqp.len()];
+            let fqp_module = &fqp[i..];
             return Box::new(
-                iter::repeat(sym::dotdot)
-                    .take(dissimilar_part_count)
+                iter::repeat_n(sym::dotdot, dissimilar_part_count)
                     .chain(fqp_module.iter().copied()),
             );
         }
@@ -639,7 +638,7 @@ pub(crate) fn href_relative_parts<'fqp>(
         Ordering::Greater => {
             // e.g. linking to std::sync from std::sync::atomic
             let dissimilar_part_count = relative_to_fqp.len() - fqp.len();
-            Box::new(iter::repeat(sym::dotdot).take(dissimilar_part_count))
+            Box::new(iter::repeat_n(sym::dotdot, dissimilar_part_count))
         }
         Ordering::Equal => {
             // linking to the same module
@@ -770,10 +769,9 @@ fn primitive_link_fragment(
                     ExternalLocation::Local => {
                         let cname_sym = ExternalCrate { crate_num: def_id.krate }.name(cx.tcx());
                         Some(if cx.current.first() == Some(&cname_sym) {
-                            iter::repeat(sym::dotdot).take(cx.current.len() - 1).collect()
+                            iter::repeat_n(sym::dotdot, cx.current.len() - 1).collect()
                         } else {
-                            iter::repeat(sym::dotdot)
-                                .take(cx.current.len())
+                            iter::repeat_n(sym::dotdot, cx.current.len())
                                 .chain(iter::once(cname_sym))
                                 .collect()
                         })
diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs
index b7a782e25f5..c943d3ad4d0 100644
--- a/src/librustdoc/html/highlight.rs
+++ b/src/librustdoc/html/highlight.rs
@@ -100,7 +100,7 @@ fn write_header(
     }
 
     if let Some(extra) = extra_content {
-        out.push_str(&extra);
+        out.push_str(extra);
     }
     if class.is_empty() {
         write_str(
@@ -131,7 +131,7 @@ fn write_header(
 /// * If the other `Class` is unclassified and only contains white characters (backline,
 ///   whitespace, etc), it can be merged.
 /// * `Class::Ident` is considered the same as unclassified (because it doesn't have an associated
-///    CSS class).
+///   CSS class).
 fn can_merge(class1: Option<Class>, class2: Option<Class>, text: &str) -> bool {
     match (class1, class2) {
         (Some(c1), Some(c2)) => c1.is_equal_to(c2),
@@ -233,7 +233,7 @@ impl<F: Write> TokenHandler<'_, '_, F> {
 
     #[inline]
     fn write_line_number(&mut self, line: u32, extra: &'static str) {
-        (self.write_line_number)(&mut self.out, line, extra);
+        (self.write_line_number)(self.out, line, extra);
     }
 }
 
@@ -610,7 +610,7 @@ impl Decorations {
         let (mut starts, mut ends): (Vec<_>, Vec<_>) = info
             .0
             .iter()
-            .flat_map(|(&kind, ranges)| ranges.into_iter().map(move |&(lo, hi)| ((lo, kind), hi)))
+            .flat_map(|(&kind, ranges)| ranges.iter().map(move |&(lo, hi)| ((lo, kind), hi)))
             .unzip();
 
         // Sort the sequences in document order.
diff --git a/src/librustdoc/html/markdown.rs b/src/librustdoc/html/markdown.rs
index 083b2c17a1d..079651e8603 100644
--- a/src/librustdoc/html/markdown.rs
+++ b/src/librustdoc/html/markdown.rs
@@ -1791,7 +1791,7 @@ pub(crate) fn markdown_links<'md, R>(
                     }
                 }
             } else if !c.is_ascii_whitespace() {
-                while let Some((j, c)) = iter.next() {
+                for (j, c) in iter.by_ref() {
                     if c.is_ascii_whitespace() {
                         return MarkdownLinkRange::Destination(i + span.start..j + span.start);
                     }
diff --git a/src/librustdoc/html/render/print_item.rs b/src/librustdoc/html/render/print_item.rs
index c599a84ee44..3c5c2ce1976 100644
--- a/src/librustdoc/html/render/print_item.rs
+++ b/src/librustdoc/html/render/print_item.rs
@@ -1,6 +1,6 @@
 use std::cmp::Ordering;
-use std::fmt;
-use std::fmt::{Display, Write as _};
+use std::fmt::{self, Display, Write as _};
+use std::iter;
 
 use rinja::Template;
 use rustc_abi::VariantIdx;
@@ -1192,10 +1192,8 @@ fn item_trait(cx: &Context<'_>, it: &clean::Item, t: &clean::Trait) -> impl fmt:
         // to already be in the HTML, and will be ignored.
         //
         // [JSONP]: https://en.wikipedia.org/wiki/JSONP
-        let mut js_src_path: UrlPartsBuilder = std::iter::repeat("..")
-            .take(cx.current.len())
-            .chain(std::iter::once("trait.impl"))
-            .collect();
+        let mut js_src_path: UrlPartsBuilder =
+            iter::repeat_n("..", cx.current.len()).chain(iter::once("trait.impl")).collect();
         if let Some(did) = it.item_id.as_def_id()
             && let get_extern = { || cx.shared.cache.external_paths.get(&did).map(|s| &s.0) }
             && let Some(fqp) = cx.shared.cache.exact_paths.get(&did).or_else(get_extern)
@@ -1446,10 +1444,8 @@ fn item_type_alias(cx: &Context<'_>, it: &clean::Item, t: &clean::TypeAlias) ->
             && let get_local = { || cache.paths.get(&self_did).map(|(p, _)| p) }
             && let Some(self_fqp) = cache.exact_paths.get(&self_did).or_else(get_local)
         {
-            let mut js_src_path: UrlPartsBuilder = std::iter::repeat("..")
-                .take(cx.current.len())
-                .chain(std::iter::once("type.impl"))
-                .collect();
+            let mut js_src_path: UrlPartsBuilder =
+                iter::repeat_n("..", cx.current.len()).chain(iter::once("type.impl")).collect();
             js_src_path.extend(target_fqp[..target_fqp.len() - 1].iter().copied());
             js_src_path.push_fmt(format_args!("{target_type}.{}.js", target_fqp.last().unwrap()));
             let self_path = fmt::from_fn(|f| self_fqp.iter().joined("::", f));
@@ -1493,7 +1489,7 @@ fn item_union(cx: &Context<'_>, it: &clean::Item, s: &clean::Union) -> impl fmt:
 
         fn fields_iter(
             &self,
-        ) -> std::iter::Peekable<impl Iterator<Item = (&'a clean::Item, &'a clean::Type)>> {
+        ) -> iter::Peekable<impl Iterator<Item = (&'a clean::Item, &'a clean::Type)>> {
             self.s
                 .fields
                 .iter()
diff --git a/src/librustdoc/html/render/search_index.rs b/src/librustdoc/html/render/search_index.rs
index 95f617c9839..b39701fae1d 100644
--- a/src/librustdoc/html/render/search_index.rs
+++ b/src/librustdoc/html/render/search_index.rs
@@ -842,10 +842,7 @@ pub(crate) fn get_function_type_for_search(
         }
         clean::ConstantItem(ref c) => make_nullary_fn(&c.type_),
         clean::StaticItem(ref s) => make_nullary_fn(&s.type_),
-        clean::StructFieldItem(ref t) => {
-            let Some(parent) = parent else {
-                return None;
-            };
+        clean::StructFieldItem(ref t) if let Some(parent) = parent => {
             let mut rgen: FxIndexMap<SimplifiedParam, (isize, Vec<RenderType>)> =
                 Default::default();
             let output = get_index_type(t, vec![], &mut rgen);
diff --git a/src/librustdoc/html/render/search_index/encode.rs b/src/librustdoc/html/render/search_index/encode.rs
index 8816ea65059..de2f54558ff 100644
--- a/src/librustdoc/html/render/search_index/encode.rs
+++ b/src/librustdoc/html/render/search_index/encode.rs
@@ -182,9 +182,9 @@ pub(crate) fn write_bitmap_to_bytes(
             out.write_all(&[b])?;
         }
         if size < NO_OFFSET_THRESHOLD {
-            4 + 4 * size + ((size + 7) / 8)
+            4 + 4 * size + size.div_ceil(8)
         } else {
-            4 + 8 * size + ((size + 7) / 8)
+            4 + 8 * size + size.div_ceil(8)
         }
     } else {
         out.write_all(&u32::to_le_bytes(SERIAL_COOKIE_NO_RUNCONTAINER))?;
diff --git a/src/librustdoc/html/render/sidebar.rs b/src/librustdoc/html/render/sidebar.rs
index 64dbaf9083e..3130815af0b 100644
--- a/src/librustdoc/html/render/sidebar.rs
+++ b/src/librustdoc/html/render/sidebar.rs
@@ -79,7 +79,7 @@ impl<'a> LinkBlock<'a> {
 }
 
 /// A link to an item. Content should not be escaped.
-#[derive(Ord, PartialEq, Eq, Hash, Clone)]
+#[derive(PartialEq, Eq, Hash, Clone)]
 pub(crate) struct Link<'a> {
     /// The content for the anchor tag and title attr
     name: Cow<'a, str>,
@@ -91,13 +91,13 @@ pub(crate) struct Link<'a> {
     children: Vec<Link<'a>>,
 }
 
-impl PartialOrd for Link<'_> {
-    fn partial_cmp(&self, other: &Link<'_>) -> Option<Ordering> {
+impl Ord for Link<'_> {
+    fn cmp(&self, other: &Self) -> Ordering {
         match compare_names(&self.name, &other.name) {
-            Ordering::Equal => (),
-            result => return Some(result),
+            Ordering::Equal => {}
+            result => return result,
         }
-        (&self.name_html, &self.href, &self.children).partial_cmp(&(
+        (&self.name_html, &self.href, &self.children).cmp(&(
             &other.name_html,
             &other.href,
             &other.children,
@@ -105,6 +105,12 @@ impl PartialOrd for Link<'_> {
     }
 }
 
+impl PartialOrd for Link<'_> {
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
 impl<'a> Link<'a> {
     pub fn new(href: impl Into<Cow<'a, str>>, name: impl Into<Cow<'a, str>>) -> Self {
         Self { href: href.into(), name: name.into(), children: vec![], name_html: None }
diff --git a/src/librustdoc/passes/collect_intra_doc_links.rs b/src/librustdoc/passes/collect_intra_doc_links.rs
index 4ffce85851c..fdbb792d25d 100644
--- a/src/librustdoc/passes/collect_intra_doc_links.rs
+++ b/src/librustdoc/passes/collect_intra_doc_links.rs
@@ -2057,7 +2057,7 @@ fn resolution_failure(
                                 return;
                             }
                             Trait
-                            | TyAlias { .. }
+                            | TyAlias
                             | ForeignTy
                             | OpaqueTy
                             | TraitAlias
diff --git a/src/librustdoc/passes/propagate_stability.rs b/src/librustdoc/passes/propagate_stability.rs
index 8cf39afd55c..fdab2b08779 100644
--- a/src/librustdoc/passes/propagate_stability.rs
+++ b/src/librustdoc/passes/propagate_stability.rs
@@ -39,15 +39,15 @@ impl DocFolder for StabilityPropagator<'_, '_> {
                 let item_stability = self.cx.tcx.lookup_stability(def_id);
                 let inline_stability =
                     item.inline_stmt_id.and_then(|did| self.cx.tcx.lookup_stability(did));
-                let is_glob_export = item.inline_stmt_id.and_then(|id| {
+                let is_glob_export = item.inline_stmt_id.map(|id| {
                     let hir_id = self.cx.tcx.local_def_id_to_hir_id(id);
-                    Some(matches!(
+                    matches!(
                         self.cx.tcx.hir_node(hir_id),
                         rustc_hir::Node::Item(rustc_hir::Item {
                             kind: rustc_hir::ItemKind::Use(_, rustc_hir::UseKind::Glob),
                             ..
                         })
-                    ))
+                    )
                 });
                 let own_stability = if let Some(item_stab) = item_stability
                     && let StabilityLevel::Stable { since: _, allowed_through_unstable_modules } =
diff --git a/tests/codegen/asm/critical.rs b/tests/codegen/asm/critical.rs
new file mode 100644
index 00000000000..8c039900cab
--- /dev/null
+++ b/tests/codegen/asm/critical.rs
@@ -0,0 +1,37 @@
+//@ only-x86_64
+//@ compile-flags: -C no-prepopulate-passes
+#![feature(asm_goto)]
+#![feature(asm_goto_with_outputs)]
+#![crate_type = "lib"]
+use std::arch::asm;
+
+// Regression test for #137867. Check that critical edges have been split before code generation,
+// and so all stores to the asm output occur on disjoint paths without any of them jumping to
+// another callbr label.
+//
+// CHECK-LABEL: @f(
+// CHECK:        [[OUT:%.*]] = callbr i32 asm
+// CHECK-NEXT:   to label %[[BB0:.*]] [label %[[BB1:.*]], label %[[BB2:.*]]],
+// CHECK:       [[BB1]]:
+// CHECK-NEXT:    store i32 [[OUT]], ptr %a
+// CHECK-NEXT:    br label %[[BBR:.*]]
+// CHECK:       [[BB2]]:
+// CHECK-NEXT:    store i32 [[OUT]], ptr %a
+// CHECK-NEXT:    br label %[[BBR]]
+// CHECK:       [[BB0]]:
+// CHECK-NEXT:    store i32 [[OUT]], ptr %a
+// CHECK-NEXT:    br label %[[BBR]]
+// CHECK:       [[BBR]]:
+// CHECK-NEXT:    [[RET:%.*]] = load i32, ptr %a
+// CHECK-NEXT:    ret i32 [[RET]]
+#[unsafe(no_mangle)]
+pub unsafe fn f(mut a: u32) -> u32 {
+    asm!(
+        "jmp {}
+         jmp {}",
+        label {},
+        label {},
+        inout("eax") a,
+    );
+    a
+}
diff --git a/tests/ui/unpretty/deprecated-attr.rs b/tests/ui/unpretty/deprecated-attr.rs
index dda362a595e..24a32d8a9ac 100644
--- a/tests/ui/unpretty/deprecated-attr.rs
+++ b/tests/ui/unpretty/deprecated-attr.rs
@@ -1,6 +1,8 @@
 //@ compile-flags: -Zunpretty=hir
 //@ check-pass
 
+// FIXME(jdonszelmann): the pretty printing output for deprecated (and possibly more attrs) is
+// slightly broken.
 #[deprecated]
 pub struct PlainDeprecated;
 
diff --git a/tests/ui/unpretty/deprecated-attr.stdout b/tests/ui/unpretty/deprecated-attr.stdout
index 60dbac1072b..675351351a0 100644
--- a/tests/ui/unpretty/deprecated-attr.stdout
+++ b/tests/ui/unpretty/deprecated-attr.stdout
@@ -5,17 +5,24 @@ extern crate std;
 //@ compile-flags: -Zunpretty=hir
 //@ check-pass
 
-#[deprecated]
+// FIXME(jdonszelmann): the pretty printing output for deprecated (and possibly more attrs) is
+// slightly broken.
+#[attr="Deprecation{deprecation: Deprecation{since: Unspecifiednote:
+suggestion: }span: }")]
 struct PlainDeprecated;
 
-#[deprecated = "here's why this is deprecated"]
+#[attr="Deprecation{deprecation: Deprecation{since: Unspecifiednote:
+here's why this is deprecatedsuggestion: }span: }")]
 struct DirectNote;
 
-#[deprecated = "here's why this is deprecated"]
+#[attr="Deprecation{deprecation: Deprecation{since: Unspecifiednote:
+here's why this is deprecatedsuggestion: }span: }")]
 struct ExplicitNote;
 
-#[deprecated(since = "1.2.3", note = "here's why this is deprecated"]
+#[attr="Deprecation{deprecation: Deprecation{since: NonStandard(1.2.3)note:
+here's why this is deprecatedsuggestion: }span: }")]
 struct SinceAndNote;
 
-#[deprecated(since = "1.2.3", note = "here's why this is deprecated"]
+#[attr="Deprecation{deprecation: Deprecation{since: NonStandard(1.2.3)note:
+here's why this is deprecatedsuggestion: }span: }")]
 struct FlippedOrder;