diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
index b21ccba93bb..3b44e996358 100644
--- a/compiler/rustc_arena/src/lib.rs
+++ b/compiler/rustc_arena/src/lib.rs
@@ -93,7 +93,7 @@ impl<T> ArenaChunk<T> {
     #[inline]
     fn end(&mut self) -> *mut T {
         unsafe {
-            if mem::size_of::<T>() == 0 {
+            if size_of::<T>() == 0 {
                 // A pointer as large as possible for zero-sized elements.
                 ptr::without_provenance_mut(!0)
             } else {
@@ -151,7 +151,7 @@ impl<T> TypedArena<T> {
         }
 
         unsafe {
-            if mem::size_of::<T>() == 0 {
+            if size_of::<T>() == 0 {
                 self.ptr.set(self.ptr.get().wrapping_byte_add(1));
                 let ptr = ptr::NonNull::<T>::dangling().as_ptr();
                 // Don't drop the object. This `write` is equivalent to `forget`.
@@ -173,13 +173,13 @@ impl<T> TypedArena<T> {
         // FIXME: this should *likely* use `offset_from`, but more
         // investigation is needed (including running tests in miri).
         let available_bytes = self.end.get().addr() - self.ptr.get().addr();
-        let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap();
+        let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
         available_bytes >= additional_bytes
     }
 
     #[inline]
     fn alloc_raw_slice(&self, len: usize) -> *mut T {
-        assert!(mem::size_of::<T>() != 0);
+        assert!(size_of::<T>() != 0);
         assert!(len != 0);
 
         // Ensure the current chunk can fit `len` objects.
@@ -213,7 +213,7 @@ impl<T> TypedArena<T> {
         // So we collect all the elements beforehand, which takes care of reentrancy and panic
         // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
         // doesn't need to be hyper-optimized.
-        assert!(mem::size_of::<T>() != 0);
+        assert!(size_of::<T>() != 0);
 
         let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
         if vec.is_empty() {
@@ -236,7 +236,7 @@ impl<T> TypedArena<T> {
         unsafe {
             // We need the element size to convert chunk sizes (ranging from
             // PAGE to HUGE_PAGE bytes) to element counts.
-            let elem_size = cmp::max(1, mem::size_of::<T>());
+            let elem_size = cmp::max(1, size_of::<T>());
             let mut chunks = self.chunks.borrow_mut();
             let mut new_cap;
             if let Some(last_chunk) = chunks.last_mut() {
@@ -246,7 +246,7 @@ impl<T> TypedArena<T> {
                     // FIXME: this should *likely* use `offset_from`, but more
                     // investigation is needed (including running tests in miri).
                     let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
-                    last_chunk.entries = used_bytes / mem::size_of::<T>();
+                    last_chunk.entries = used_bytes / size_of::<T>();
                 }
 
                 // If the previous chunk's len is less than HUGE_PAGE
@@ -276,7 +276,7 @@ impl<T> TypedArena<T> {
         let end = self.ptr.get().addr();
         // We then calculate the number of elements to be dropped in the last chunk,
         // which is the filled area's length.
-        let diff = if mem::size_of::<T>() == 0 {
+        let diff = if size_of::<T>() == 0 {
             // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
             // the number of zero-sized values in the last and only chunk, just out of caution.
             // Recall that `end` was incremented for each allocated value.
@@ -284,7 +284,7 @@ impl<T> TypedArena<T> {
         } else {
             // FIXME: this should *likely* use `offset_from`, but more
             // investigation is needed (including running tests in miri).
-            (end - start) / mem::size_of::<T>()
+            (end - start) / size_of::<T>()
         };
         // Pass that to the `destroy` method.
         unsafe {
@@ -329,7 +329,7 @@ fn align_up(val: usize, align: usize) -> usize {
 
 // Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
 // to optimize away alignment code.
-const DROPLESS_ALIGNMENT: usize = mem::align_of::<usize>();
+const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
 
 /// An arena that can hold objects of multiple different types that impl `Copy`
 /// and/or satisfy `!mem::needs_drop`.
@@ -447,7 +447,7 @@ impl DroplessArena {
     #[inline]
     pub fn alloc<T>(&self, object: T) -> &mut T {
         assert!(!mem::needs_drop::<T>());
-        assert!(mem::size_of::<T>() != 0);
+        assert!(size_of::<T>() != 0);
 
         let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
 
@@ -471,7 +471,7 @@ impl DroplessArena {
         T: Copy,
     {
         assert!(!mem::needs_drop::<T>());
-        assert!(mem::size_of::<T>() != 0);
+        assert!(size_of::<T>() != 0);
         assert!(!slice.is_empty());
 
         let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
@@ -546,7 +546,7 @@ impl DroplessArena {
         // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
         // allocate additional elements while we're iterating.
         let iter = iter.into_iter();
-        assert!(mem::size_of::<T>() != 0);
+        assert!(size_of::<T>() != 0);
         assert!(!mem::needs_drop::<T>());
 
         let size_hint = iter.size_hint();
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index c8b7616e645..6573b5b165e 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -2439,9 +2439,5 @@ fn get_maybe_pointer_size(value: RValue<'_>) -> u32 {
 #[cfg(not(feature = "master"))]
 fn get_maybe_pointer_size(value: RValue<'_>) -> u32 {
     let type_ = value.get_type();
-    if type_.get_pointee().is_some() {
-        std::mem::size_of::<*const ()>() as _
-    } else {
-        type_.get_size()
-    }
+    if type_.get_pointee().is_some() { size_of::<*const ()>() as _ } else { type_.get_size() }
 }
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index 3407117a06e..7d9971c021d 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -1177,7 +1177,7 @@ mod win {
             let mut cp: u32 = 0;
             // We're using the `LOCALE_RETURN_NUMBER` flag to return a u32.
             // But the API requires us to pass the data as though it's a [u16] string.
-            let len = std::mem::size_of::<u32>() / std::mem::size_of::<u16>();
+            let len = size_of::<u32>() / size_of::<u16>();
             let data = std::slice::from_raw_parts_mut(&mut cp as *mut u32 as *mut u16, len);
             let len_written = GetLocaleInfoEx(
                 LOCALE_NAME_SYSTEM_DEFAULT,
diff --git a/compiler/rustc_data_structures/src/aligned.rs b/compiler/rustc_data_structures/src/aligned.rs
index 0e5ecfd9bff..a636d09fcae 100644
--- a/compiler/rustc_data_structures/src/aligned.rs
+++ b/compiler/rustc_data_structures/src/aligned.rs
@@ -2,10 +2,8 @@ use std::ptr::Alignment;
 
 /// Returns the ABI-required minimum alignment of a type in bytes.
 ///
-/// This is equivalent to [`mem::align_of`], but also works for some unsized
+/// This is equivalent to [`align_of`], but also works for some unsized
 /// types (e.g. slices or rustc's `List`s).
-///
-/// [`mem::align_of`]: std::mem::align_of
 pub const fn align_of<T: ?Sized + Aligned>() -> Alignment {
     T::ALIGN
 }
@@ -15,10 +13,10 @@ pub const fn align_of<T: ?Sized + Aligned>() -> Alignment {
 /// # Safety
 ///
 /// `Self::ALIGN` must be equal to the alignment of `Self`. For sized types it
-/// is [`mem::align_of<Self>()`], for unsized types it depends on the type, for
+/// is [`align_of::<Self>()`], for unsized types it depends on the type, for
 /// example `[T]` has alignment of `T`.
 ///
-/// [`mem::align_of<Self>()`]: std::mem::align_of
+/// [`align_of::<Self>()`]: align_of
 pub unsafe trait Aligned {
     /// Alignment of `Self`.
     const ALIGN: Alignment;
diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs
index 39db551adfb..60f007083ba 100644
--- a/compiler/rustc_data_structures/src/profiling.rs
+++ b/compiler/rustc_data_structures/src/profiling.rs
@@ -863,15 +863,13 @@ fn get_thread_id() -> u32 {
 cfg_match! {
     windows => {
         pub fn get_resident_set_size() -> Option<usize> {
-            use std::mem;
-
             use windows::{
                 Win32::System::ProcessStatus::{K32GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS},
                 Win32::System::Threading::GetCurrentProcess,
             };
 
             let mut pmc = PROCESS_MEMORY_COUNTERS::default();
-            let pmc_size = mem::size_of_val(&pmc);
+            let pmc_size = size_of_val(&pmc);
             unsafe {
                 K32GetProcessMemoryInfo(
                     GetCurrentProcess(),
@@ -889,7 +887,7 @@ cfg_match! {
         pub fn get_resident_set_size() -> Option<usize> {
             use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO};
             use std::mem;
-            const PROC_TASKINFO_SIZE: c_int = mem::size_of::<proc_taskinfo>() as c_int;
+            const PROC_TASKINFO_SIZE: c_int = size_of::<proc_taskinfo>() as c_int;
 
             unsafe {
                 let mut info: proc_taskinfo = mem::zeroed();
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
index e6be9c256f0..3016348f224 100644
--- a/compiler/rustc_data_structures/src/sharded.rs
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -1,7 +1,7 @@
 use std::borrow::Borrow;
 use std::collections::hash_map::RawEntryMut;
 use std::hash::{Hash, Hasher};
-use std::{iter, mem};
+use std::iter;
 
 use either::Either;
 
@@ -221,7 +221,7 @@ pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
 /// consistently for each `Sharded` instance.
 #[inline]
 fn get_shard_hash(hash: u64) -> usize {
-    let hash_len = mem::size_of::<usize>();
+    let hash_len = size_of::<usize>();
     // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
     // hashbrown also uses the lowest bits, so we can't use those
     (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize
diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs
index 7fffeaddb86..9f4d2ea5c1a 100644
--- a/compiler/rustc_errors/src/diagnostic.rs
+++ b/compiler/rustc_errors/src/diagnostic.rs
@@ -490,7 +490,7 @@ pub struct Diag<'a, G: EmissionGuarantee = ErrorGuaranteed> {
 // would be bad.
 impl<G> !Clone for Diag<'_, G> {}
 
-rustc_data_structures::static_assert_size!(Diag<'_, ()>, 3 * std::mem::size_of::<usize>());
+rustc_data_structures::static_assert_size!(Diag<'_, ()>, 3 * size_of::<usize>());
 
 impl<G: EmissionGuarantee> Deref for Diag<'_, G> {
     type Target = DiagInner;
diff --git a/compiler/rustc_hir/src/def.rs b/compiler/rustc_hir/src/def.rs
index ea2581787e4..5f8941d4754 100644
--- a/compiler/rustc_hir/src/def.rs
+++ b/compiler/rustc_hir/src/def.rs
@@ -435,7 +435,7 @@ pub enum Res<Id = hir::HirId> {
         /// mention any generic parameters to allow the following with `min_const_generics`:
         /// ```
         /// # struct Foo;
-        /// impl Foo { fn test() -> [u8; std::mem::size_of::<Self>()] { todo!() } }
+        /// impl Foo { fn test() -> [u8; size_of::<Self>()] { todo!() } }
         ///
         /// struct Bar([u8; baz::<Self>()]);
         /// const fn baz<T>() -> usize { 10 }
@@ -445,7 +445,7 @@ pub enum Res<Id = hir::HirId> {
         /// compat lint:
         /// ```
         /// fn foo<T>() {
-        ///     let _bar = [1_u8; std::mem::size_of::<*mut T>()];
+        ///     let _bar = [1_u8; size_of::<*mut T>()];
         /// }
         /// ```
         // FIXME(generic_const_exprs): Remove this bodge once that feature is stable.
diff --git a/compiler/rustc_incremental/src/persist/file_format.rs b/compiler/rustc_incremental/src/persist/file_format.rs
index 4c664ea6ba0..9cec2702a41 100644
--- a/compiler/rustc_incremental/src/persist/file_format.rs
+++ b/compiler/rustc_incremental/src/persist/file_format.rs
@@ -123,7 +123,7 @@ pub(crate) fn read_file(
 
     // Check HEADER_FORMAT_VERSION
     {
-        debug_assert!(::std::mem::size_of_val(&HEADER_FORMAT_VERSION) == 2);
+        debug_assert!(size_of_val(&HEADER_FORMAT_VERSION) == 2);
         let mut header_format_version = [0u8; 2];
         file.read_exact(&mut header_format_version)?;
         let header_format_version =
diff --git a/compiler/rustc_index/src/bit_set.rs b/compiler/rustc_index/src/bit_set.rs
index 6dc044a6d38..598409c9051 100644
--- a/compiler/rustc_index/src/bit_set.rs
+++ b/compiler/rustc_index/src/bit_set.rs
@@ -1,7 +1,9 @@
 use std::marker::PhantomData;
+#[cfg(not(feature = "nightly"))]
+use std::mem;
 use std::ops::{BitAnd, BitAndAssign, BitOrAssign, Bound, Not, Range, RangeBounds, Shl};
 use std::rc::Rc;
-use std::{fmt, iter, mem, slice};
+use std::{fmt, iter, slice};
 
 use Chunk::*;
 #[cfg(feature = "nightly")]
@@ -14,7 +16,7 @@ use crate::{Idx, IndexVec};
 mod tests;
 
 type Word = u64;
-const WORD_BYTES: usize = mem::size_of::<Word>();
+const WORD_BYTES: usize = size_of::<Word>();
 const WORD_BITS: usize = WORD_BYTES * 8;
 
 // The choice of chunk size has some trade-offs.
diff --git a/compiler/rustc_index/src/vec/tests.rs b/compiler/rustc_index/src/vec/tests.rs
index 381d79c24fc..5b0ca4b2b92 100644
--- a/compiler/rustc_index/src/vec/tests.rs
+++ b/compiler/rustc_index/src/vec/tests.rs
@@ -9,8 +9,6 @@ crate::newtype_index! {
 
 #[test]
 fn index_size_is_optimized() {
-    use std::mem::size_of;
-
     assert_eq!(size_of::<MyIdx>(), 4);
     // Uses 0xFFFF_FFFB
     assert_eq!(size_of::<Option<MyIdx>>(), 4);
diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs
index db773ef597a..592c934997c 100644
--- a/compiler/rustc_lint_defs/src/builtin.rs
+++ b/compiler/rustc_lint_defs/src/builtin.rs
@@ -2673,7 +2673,7 @@ declare_lint! {
     ///
     /// ```rust
     /// const fn foo<T>() -> usize {
-    ///     if std::mem::size_of::<*mut T>() < 8 { // size of *mut T does not depend on T
+    ///     if size_of::<*mut T>() < 8 { // size of *mut T does not depend on T
     ///         4
     ///     } else {
     ///         8
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
index cc6389e2989..fea5038e6dd 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
@@ -223,8 +223,8 @@ impl<D: TyDecoder> Decodable<D> for InitMaskMaterialized {
 // large.
 impl hash::Hash for InitMaskMaterialized {
     fn hash<H: hash::Hasher>(&self, state: &mut H) {
-        const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
-        const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
+        const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / size_of::<Block>();
+        const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / size_of::<Block>();
 
         // Partially hash the `blocks` buffer when it is large. To limit collisions with common
         // prefixes and suffixes, we hash the length and some slices of the buffer.
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
index c48cfffa05c..2675b7e0fc5 100644
--- a/compiler/rustc_middle/src/mir/interpret/mod.rs
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -573,7 +573,7 @@ pub fn write_target_uint(
 #[inline]
 pub fn read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, io::Error> {
     // This u128 holds an "any-size uint" (since smaller uints can fits in it)
-    let mut buf = [0u8; std::mem::size_of::<u128>()];
+    let mut buf = [0u8; size_of::<u128>()];
     // So we do not read exactly 16 bytes into the u128, just the "payload".
     let uint = match endianness {
         Endian::Little => {
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index ea0bb5feb12..83857ab6c5c 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -332,13 +332,13 @@ pub struct Body<'tcx> {
     ///
     /// ```rust
     /// fn test<T>() {
-    ///     let _ = [0; std::mem::size_of::<*mut T>()];
+    ///     let _ = [0; size_of::<*mut T>()];
     /// }
     /// ```
     ///
     /// **WARNING**: Do not change this flags after the MIR was originally created, even if an optimization
     /// removed the last mention of all generic params. We do not want to rely on optimizations and
-    /// potentially allow things like `[u8; std::mem::size_of::<T>() * 0]` due to this.
+    /// potentially allow things like `[u8; size_of::<T>() * 0]` due to this.
     pub is_polymorphic: bool,
 
     /// The phase at which this MIR should be "injected" into the compilation process.
diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs
index cbd60920bc5..7e1209a2a7f 100644
--- a/compiler/rustc_middle/src/query/erase.rs
+++ b/compiler/rustc_middle/src/query/erase.rs
@@ -27,7 +27,7 @@ pub type Erase<T: EraseType> = Erased<impl Copy>;
 pub fn erase<T: EraseType>(src: T) -> Erase<T> {
     // Ensure the sizes match
     const {
-        if std::mem::size_of::<T>() != std::mem::size_of::<T::Result>() {
+        if size_of::<T>() != size_of::<T::Result>() {
             panic!("size of T must match erased type T::Result")
         }
     };
diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs
index 66a9e5fed4c..d78a589f203 100644
--- a/compiler/rustc_middle/src/query/plumbing.rs
+++ b/compiler/rustc_middle/src/query/plumbing.rs
@@ -370,7 +370,7 @@ macro_rules! define_callbacks {
                 // Increase this limit if necessary, but do try to keep the size low if possible
                 #[cfg(target_pointer_width = "64")]
                 const _: () = {
-                    if mem::size_of::<Key<'static>>() > 88 {
+                    if size_of::<Key<'static>>() > 88 {
                         panic!("{}", concat!(
                             "the query `",
                             stringify!($name),
@@ -386,7 +386,7 @@ macro_rules! define_callbacks {
                 #[cfg(target_pointer_width = "64")]
                 #[cfg(not(feature = "rustc_randomized_layouts"))]
                 const _: () = {
-                    if mem::size_of::<Value<'static>>() > 64 {
+                    if size_of::<Value<'static>>() > 64 {
                         panic!("{}", concat!(
                             "the query `",
                             stringify!($name),
diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs
index 7c9280fae16..9f5e31d894c 100644
--- a/compiler/rustc_middle/src/ty/consts/int.rs
+++ b/compiler/rustc_middle/src/ty/consts/int.rs
@@ -408,7 +408,7 @@ macro_rules! from_x_for_scalar_int {
                 fn from(u: $ty) -> Self {
                     Self {
                         data: u128::from(u),
-                        size: NonZero::new(std::mem::size_of::<$ty>() as u8).unwrap(),
+                        size: NonZero::new(size_of::<$ty>() as u8).unwrap(),
                     }
                 }
             }
@@ -424,7 +424,7 @@ macro_rules! from_scalar_int_for_x {
                 fn from(int: ScalarInt) -> Self {
                     // The `unwrap` cannot fail because to_bits (if it succeeds)
                     // is guaranteed to return a value that fits into the size.
-                    int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
+                    int.to_bits(Size::from_bytes(size_of::<$ty>()))
                        .try_into().unwrap()
                 }
             }
diff --git a/compiler/rustc_middle/src/ty/generic_args.rs b/compiler/rustc_middle/src/ty/generic_args.rs
index 27576a2ec4a..f24910477dc 100644
--- a/compiler/rustc_middle/src/ty/generic_args.rs
+++ b/compiler/rustc_middle/src/ty/generic_args.rs
@@ -2,7 +2,6 @@
 
 use core::intrinsics;
 use std::marker::PhantomData;
-use std::mem;
 use std::num::NonZero;
 use std::ptr::NonNull;
 
@@ -176,17 +175,17 @@ impl<'tcx> GenericArgKind<'tcx> {
         let (tag, ptr) = match self {
             GenericArgKind::Lifetime(lt) => {
                 // Ensure we can use the tag bits.
-                assert_eq!(mem::align_of_val(&*lt.0.0) & TAG_MASK, 0);
+                assert_eq!(align_of_val(&*lt.0.0) & TAG_MASK, 0);
                 (REGION_TAG, NonNull::from(lt.0.0).cast())
             }
             GenericArgKind::Type(ty) => {
                 // Ensure we can use the tag bits.
-                assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
+                assert_eq!(align_of_val(&*ty.0.0) & TAG_MASK, 0);
                 (TYPE_TAG, NonNull::from(ty.0.0).cast())
             }
             GenericArgKind::Const(ct) => {
                 // Ensure we can use the tag bits.
-                assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
+                assert_eq!(align_of_val(&*ct.0.0) & TAG_MASK, 0);
                 (CONST_TAG, NonNull::from(ct.0.0).cast())
             }
         };
diff --git a/compiler/rustc_middle/src/ty/list.rs b/compiler/rustc_middle/src/ty/list.rs
index 6718493f6b3..0fd370a5619 100644
--- a/compiler/rustc_middle/src/ty/list.rs
+++ b/compiler/rustc_middle/src/ty/list.rs
@@ -93,7 +93,7 @@ impl<H, T> RawList<H, T> {
         T: Copy,
     {
         assert!(!mem::needs_drop::<T>());
-        assert!(mem::size_of::<T>() != 0);
+        assert!(size_of::<T>() != 0);
         assert!(!slice.is_empty());
 
         let (layout, _offset) =
@@ -155,7 +155,7 @@ macro_rules! impl_list_empty {
                 static EMPTY: ListSkeleton<$header_ty, MaxAlign> =
                     ListSkeleton { header: $header_init, len: 0, data: [] };
 
-                assert!(mem::align_of::<T>() <= mem::align_of::<MaxAlign>());
+                assert!(align_of::<T>() <= align_of::<MaxAlign>());
 
                 // SAFETY: `EMPTY` is sufficiently aligned to be an empty list for all
                 // types with `align_of(T) <= align_of(MaxAlign)`, which we checked above.
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index 9ed15d9b849..f0ec8abce6d 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -17,7 +17,7 @@ use std::hash::{Hash, Hasher};
 use std::marker::PhantomData;
 use std::num::NonZero;
 use std::ptr::NonNull;
-use std::{fmt, mem, str};
+use std::{fmt, str};
 
 pub use adt::*;
 pub use assoc::*;
@@ -637,12 +637,12 @@ impl<'tcx> TermKind<'tcx> {
         let (tag, ptr) = match self {
             TermKind::Ty(ty) => {
                 // Ensure we can use the tag bits.
-                assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
+                assert_eq!(align_of_val(&*ty.0.0) & TAG_MASK, 0);
                 (TYPE_TAG, NonNull::from(ty.0.0).cast())
             }
             TermKind::Const(ct) => {
                 // Ensure we can use the tag bits.
-                assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
+                assert_eq!(align_of_val(&*ct.0.0) & TAG_MASK, 0);
                 (CONST_TAG, NonNull::from(ct.0.0).cast())
             }
         };
diff --git a/compiler/rustc_parse/src/parser/token_type.rs b/compiler/rustc_parse/src/parser/token_type.rs
index 110546d0ba6..886438fd583 100644
--- a/compiler/rustc_parse/src/parser/token_type.rs
+++ b/compiler/rustc_parse/src/parser/token_type.rs
@@ -619,7 +619,7 @@ impl Iterator for TokenTypeSetIter {
     type Item = TokenType;
 
     fn next(&mut self) -> Option<TokenType> {
-        let num_bits: u32 = (std::mem::size_of_val(&self.0.0) * 8) as u32;
+        let num_bits: u32 = (size_of_val(&self.0.0) * 8) as u32;
         assert_eq!(num_bits, 128);
         let z = self.0.0.trailing_zeros();
         if z == num_bits {
diff --git a/compiler/rustc_passes/src/input_stats.rs b/compiler/rustc_passes/src/input_stats.rs
index 844f74372de..1278e98afcf 100644
--- a/compiler/rustc_passes/src/input_stats.rs
+++ b/compiler/rustc_passes/src/input_stats.rs
@@ -107,12 +107,12 @@ impl<'k> StatCollector<'k> {
 
         let node = self.nodes.entry(label1).or_insert(Node::new());
         node.stats.count += 1;
-        node.stats.size = std::mem::size_of_val(val);
+        node.stats.size = size_of_val(val);
 
         if let Some(label2) = label2 {
             let subnode = node.subnodes.entry(label2).or_insert(NodeStats::new());
             subnode.count += 1;
-            subnode.size = std::mem::size_of_val(val);
+            subnode.size = size_of_val(val);
         }
     }
 
diff --git a/compiler/rustc_passes/src/liveness/rwu_table.rs b/compiler/rustc_passes/src/liveness/rwu_table.rs
index 6e2f976e5b0..4c1f6ea141e 100644
--- a/compiler/rustc_passes/src/liveness/rwu_table.rs
+++ b/compiler/rustc_passes/src/liveness/rwu_table.rs
@@ -39,7 +39,7 @@ impl RWUTable {
     /// Size of packed RWU in bits.
     const RWU_BITS: usize = 4;
     /// Size of a word in bits.
-    const WORD_BITS: usize = std::mem::size_of::<u8>() * 8;
+    const WORD_BITS: usize = size_of::<u8>() * 8;
     /// Number of packed RWUs that fit into a single word.
     const WORD_RWU_COUNT: usize = Self::WORD_BITS / Self::RWU_BITS;
 
diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs
index bc78878a84a..2c6fd7d494f 100644
--- a/compiler/rustc_query_system/src/dep_graph/serialized.rs
+++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs
@@ -63,7 +63,7 @@ rustc_index::newtype_index! {
     pub struct SerializedDepNodeIndex {}
 }
 
-const DEP_NODE_SIZE: usize = std::mem::size_of::<SerializedDepNodeIndex>();
+const DEP_NODE_SIZE: usize = size_of::<SerializedDepNodeIndex>();
 /// Amount of padding we need to add to the edge list data so that we can retrieve every
 /// SerializedDepNodeIndex with a fixed-size read then mask.
 const DEP_NODE_PAD: usize = DEP_NODE_SIZE - 1;
@@ -175,7 +175,7 @@ impl EdgeHeader {
 
 #[inline]
 fn mask(bits: usize) -> usize {
-    usize::MAX >> ((std::mem::size_of::<usize>() * 8) - bits)
+    usize::MAX >> ((size_of::<usize>() * 8) - bits)
 }
 
 impl SerializedDepGraph {
@@ -208,9 +208,8 @@ impl SerializedDepGraph {
         // for a node with length 64, which means the spilled 1-byte leb128 length is 1 byte of at
         // least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128
         // length is about the same fractional overhead and it amortizes for yet greater lengths.
-        let mut edge_list_data = Vec::with_capacity(
-            graph_bytes - node_count * std::mem::size_of::<SerializedNodeHeader<D>>(),
-        );
+        let mut edge_list_data =
+            Vec::with_capacity(graph_bytes - node_count * size_of::<SerializedNodeHeader<D>>());
 
         for _index in 0..node_count {
             // Decode the header for this edge; the header packs together as many of the fixed-size
@@ -300,7 +299,7 @@ struct Unpacked {
 // M..M+N  bytes per index
 // M+N..16 kind
 impl<D: Deps> SerializedNodeHeader<D> {
-    const TOTAL_BITS: usize = std::mem::size_of::<DepKind>() * 8;
+    const TOTAL_BITS: usize = size_of::<DepKind>() * 8;
     const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS;
     const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS;
     const KIND_BITS: usize = Self::TOTAL_BITS - D::DEP_KIND_MAX.leading_zeros() as usize;
diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs
index 92f5fba1f9b..79479986d07 100644
--- a/compiler/rustc_resolve/src/late.rs
+++ b/compiler/rustc_resolve/src/late.rs
@@ -2995,7 +2995,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
     }
 
     // HACK(min_const_generics, generic_const_exprs): We
-    // want to keep allowing `[0; std::mem::size_of::<*mut T>()]`
+    // want to keep allowing `[0; size_of::<*mut T>()]`
     // with a future compat lint for now. We do this by adding an
     // additional special case for repeat expressions.
     //
diff --git a/compiler/rustc_serialize/src/leb128.rs b/compiler/rustc_serialize/src/leb128.rs
index aa7c2858466..4a475805697 100644
--- a/compiler/rustc_serialize/src/leb128.rs
+++ b/compiler/rustc_serialize/src/leb128.rs
@@ -7,7 +7,7 @@ use crate::serialize::Decoder;
 /// Returns the length of the longest LEB128 encoding for `T`, assuming `T` is an integer type
 pub const fn max_leb128_len<T>() -> usize {
     // The longest LEB128 encoding for an integer uses 7 bits per byte.
-    (std::mem::size_of::<T>() * 8 + 6) / 7
+    (size_of::<T>() * 8 + 6) / 7
 }
 
 /// Returns the length of the longest LEB128 encoding of all supported integer types.
diff --git a/compiler/rustc_session/src/filesearch.rs b/compiler/rustc_session/src/filesearch.rs
index ec83761da4a..cc2decc2fe4 100644
--- a/compiler/rustc_session/src/filesearch.rs
+++ b/compiler/rustc_session/src/filesearch.rs
@@ -92,7 +92,7 @@ fn current_dll_path() -> Result<PathBuf, String> {
             if libc::loadquery(
                 libc::L_GETINFO,
                 buffer.as_mut_ptr() as *mut u8,
-                (std::mem::size_of::<libc::ld_info>() * buffer.len()) as u32,
+                (size_of::<libc::ld_info>() * buffer.len()) as u32,
             ) >= 0
             {
                 break;
diff --git a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
index 75f53b063d1..39333082acd 100644
--- a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
+++ b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
@@ -2,7 +2,7 @@
 //!
 //! For concrete constants, this is fairly simple as we can just try and evaluate it.
 //!
-//! When dealing with polymorphic constants, for example `std::mem::size_of::<T>() - 1`,
+//! When dealing with polymorphic constants, for example `size_of::<T>() - 1`,
 //! this is not as easy.
 //!
 //! In this case we try to build an abstract representation of this constant using
diff --git a/compiler/stable_mir/src/mir/alloc.rs b/compiler/stable_mir/src/mir/alloc.rs
index 7e0c4a479b8..023807b76ae 100644
--- a/compiler/stable_mir/src/mir/alloc.rs
+++ b/compiler/stable_mir/src/mir/alloc.rs
@@ -58,7 +58,7 @@ impl IndexedVal for AllocId {
 
 /// Utility function used to read an allocation data into a unassigned integer.
 pub(crate) fn read_target_uint(mut bytes: &[u8]) -> Result<u128, Error> {
-    let mut buf = [0u8; std::mem::size_of::<u128>()];
+    let mut buf = [0u8; size_of::<u128>()];
     match MachineInfo::target_endianness() {
         Endian::Little => {
             bytes.read_exact(&mut buf[..bytes.len()])?;
@@ -73,7 +73,7 @@ pub(crate) fn read_target_uint(mut bytes: &[u8]) -> Result<u128, Error> {
 
 /// Utility function used to read an allocation data into an assigned integer.
 pub(crate) fn read_target_int(mut bytes: &[u8]) -> Result<i128, Error> {
-    let mut buf = [0u8; std::mem::size_of::<i128>()];
+    let mut buf = [0u8; size_of::<i128>()];
     match MachineInfo::target_endianness() {
         Endian::Little => {
             bytes.read_exact(&mut buf[..bytes.len()])?;