From f4426c189f2587fc5e8f48bf518cc75a37d93d0f Mon Sep 17 00:00:00 2001
From: Erik Desjardins <erikdesjardins@users.noreply.github.com>
Date: Sat, 24 Feb 2024 00:48:20 -0500
Subject: [PATCH 1/5] use [N x i8] for alloca types

---
 compiler/rustc_codegen_gcc/src/builder.rs     |  20 +--
 .../rustc_codegen_gcc/src/intrinsic/mod.rs    |   2 +-
 .../rustc_codegen_gcc/src/intrinsic/simd.rs   |   4 +-
 compiler/rustc_codegen_llvm/src/abi.rs        |   2 +-
 compiler/rustc_codegen_llvm/src/builder.rs    |   7 +-
 compiler/rustc_codegen_llvm/src/intrinsic.rs  |  20 +--
 compiler/rustc_codegen_ssa/src/base.rs        |   2 +-
 compiler/rustc_codegen_ssa/src/mir/block.rs   |   2 +-
 compiler/rustc_codegen_ssa/src/mir/operand.rs |   4 +-
 compiler/rustc_codegen_ssa/src/mir/place.rs   |   2 +-
 .../rustc_codegen_ssa/src/traits/builder.rs   |   4 +-
 .../codegen/align-byval-alignment-mismatch.rs |   6 +-
 tests/codegen/align-byval.rs                  |  12 +-
 tests/codegen/align-enum.rs                   |   4 +-
 tests/codegen/align-struct.rs                 |   8 +-
 tests/codegen/array-codegen.rs                |   2 +-
 tests/codegen/array-map.rs                    |   2 +-
 tests/codegen/cast-target-abi.rs              | 121 +++++++++---------
 tests/codegen/cffi/ffi-out-of-bounds-loads.rs |   2 +-
 tests/codegen/debug-fndef-size.rs             |   2 +-
 tests/codegen/emcripten-catch-unwind.rs       |  59 +++++++++
 tests/codegen/enum/enum-match.rs              |   2 +-
 tests/codegen/i128-x86-align.rs               |   9 +-
 tests/codegen/intrinsics/transmute.rs         |  16 +--
 .../issues/issue-105386-ub-in-debuginfo.rs    |   2 +-
 tests/codegen/issues/issue-111603.rs          |   2 +-
 tests/codegen/overaligned-constant.rs         |   9 +-
 tests/codegen/packed.rs                       |   4 +-
 tests/codegen/personality_lifetimes.rs        |   2 +-
 tests/codegen/sroa-fragment-debuginfo.rs      |   6 +-
 tests/codegen/stores.rs                       |   8 +-
 tests/codegen/swap-large-types.rs             |   2 +-
 tests/codegen/swap-small-types.rs             |   2 +-
 33 files changed, 203 insertions(+), 148 deletions(-)
 create mode 100644 tests/codegen/emcripten-catch-unwind.rs

diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 43cc46cfe68..cf1aa3f4793 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -898,26 +898,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         self.gcc_checked_binop(oop, typ, lhs, rhs)
     }
 
-    fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
-        // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
-        // Ideally, we shouldn't need to do this check.
-        let aligned_type = if ty == self.cx.u128_type || ty == self.cx.i128_type {
-            ty
-        } else {
-            ty.get_aligned(align.bytes())
-        };
+    fn alloca(&mut self, size: Size, align: Align) -> RValue<'gcc> {
+        let ty = self.cx.type_array(self.cx.type_i8(), size.bytes()).get_aligned(align.bytes());
         // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
         self.stack_var_count.set(self.stack_var_count.get() + 1);
-        self.current_func()
-            .new_local(
-                self.location,
-                aligned_type,
-                &format!("stack_var_{}", self.stack_var_count.get()),
-            )
-            .get_address(self.location)
+        self.current_func().new_local(None, ty, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
     }
 
-    fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
+    fn dynamic_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
         unimplemented!();
     }
 
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index cebd45c09aa..0c50b2bb5fe 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -531,7 +531,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
                 // We instead thus allocate some scratch space...
                 let scratch_size = cast.size(bx);
                 let scratch_align = cast.align(bx);
-                let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
+                let llscratch = bx.alloca(scratch_size, scratch_align);
                 bx.lifetime_start(llscratch, scratch_size);
 
                 // ... where we first store the value...
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
index 60361a44c2d..1d9ff257a76 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
@@ -18,7 +18,7 @@ use rustc_middle::span_bug;
 use rustc_middle::ty::layout::HasTyCtxt;
 use rustc_middle::ty::{self, Ty};
 use rustc_span::{sym, Span, Symbol};
-use rustc_target::abi::Align;
+use rustc_target::abi::{Align, Size};
 
 use crate::builder::Builder;
 #[cfg(not(feature = "master"))]
@@ -558,7 +558,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
                 let ze = bx.zext(result, bx.type_ix(expected_bytes * 8));
 
                 // Convert the integer to a byte array
-                let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
+                let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
                 bx.store(ze, ptr, Align::ONE);
                 let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
                 let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index f918facc86d..68afcd08f04 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -227,7 +227,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
                 //   when passed by value, making it larger.
                 let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes());
                 // Allocate some scratch space...
-                let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
+                let llscratch = bx.alloca(scratch_size, scratch_align);
                 bx.lifetime_start(llscratch, scratch_size);
                 // ...store the value...
                 bx.store(val, llscratch, scratch_align);
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index b7235972204..0956542337f 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -468,9 +468,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         val
     }
 
-    fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
+    fn alloca(&mut self, size: Size, align: Align) -> &'ll Value {
         let mut bx = Builder::with_cx(self.cx);
         bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
+        let ty = self.cx().type_array(self.cx().type_i8(), size.bytes());
         unsafe {
             let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED);
             llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
@@ -478,10 +479,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         }
     }
 
-    fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value {
+    fn dynamic_alloca(&mut self, size: &'ll Value, align: Align) -> &'ll Value {
         unsafe {
             let alloca =
-                llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED);
+                llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), size, UNNAMED);
             llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
             alloca
         }
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index dc52dd156b7..9bf9c6ec0bc 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -18,7 +18,7 @@ use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
 use rustc_middle::ty::{self, GenericArgsRef, Ty};
 use rustc_middle::{bug, span_bug};
 use rustc_span::{sym, Span, Symbol};
-use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
+use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size};
 use rustc_target::spec::{HasTargetSpec, PanicStrategy};
 
 use std::cmp::Ordering;
@@ -638,8 +638,9 @@ fn codegen_msvc_try<'ll>(
         //      }
         //
         // More information can be found in libstd's seh.rs implementation.
+        let ptr_size = bx.tcx().data_layout.pointer_size;
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
-        let slot = bx.alloca(bx.type_ptr(), ptr_align);
+        let slot = bx.alloca(ptr_size, ptr_align);
         let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
         bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
 
@@ -909,15 +910,14 @@ fn codegen_emcc_try<'ll>(
 
         // We need to pass two values to catch_func (ptr and is_rust_panic), so
         // create an alloca and pass a pointer to that.
+        let ptr_size = bx.tcx().data_layout.pointer_size;
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
         let i8_align = bx.tcx().data_layout.i8_align.abi;
-        let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false);
-        let catch_data = bx.alloca(catch_data_type, ptr_align);
-        let catch_data_0 =
-            bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
-        bx.store(ptr, catch_data_0, ptr_align);
-        let catch_data_1 =
-            bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
+        // Required in order for there to be no padding between the fields.
+        assert!(i8_align <= ptr_align);
+        let catch_data = bx.alloca(2 * ptr_size, ptr_align);
+        bx.store(ptr, catch_data, ptr_align);
+        let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
         bx.store(is_rust_panic, catch_data_1, i8_align);
 
         let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
@@ -1363,7 +1363,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
 
                 // Convert the integer to a byte array
-                let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
+                let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
                 bx.store(ze, ptr, Align::ONE);
                 let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
                 return Ok(bx.load(array_ty, ptr, Align::ONE));
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index 930b9b8c0db..ae7c0f24b40 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -508,7 +508,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
         let ptr_size = bx.tcx().data_layout.pointer_size;
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
         let arg_argc = bx.const_int(cx.type_isize(), 2);
-        let arg_argv = bx.alloca(cx.type_array(cx.type_ptr(), 2), ptr_align);
+        let arg_argv = bx.alloca(2 * ptr_size, ptr_align);
         bx.store(param_handle, arg_argv, ptr_align);
         let arg_argv_el1 = bx.inbounds_ptradd(arg_argv, bx.const_usize(ptr_size.bytes()));
         bx.store(param_system_table, arg_argv_el1, ptr_align);
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index c3137f0628e..14fae453bcc 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -1512,7 +1512,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 //   when passed by value, making it larger.
                 let copy_bytes = cmp::min(scratch_size.bytes(), arg.layout.size.bytes());
                 // Allocate some scratch space...
-                let llscratch = bx.alloca(bx.cast_backend_type(cast), scratch_align);
+                let llscratch = bx.alloca(scratch_size, scratch_align);
                 bx.lifetime_start(llscratch, scratch_size);
                 // ...memcpy the value...
                 bx.memcpy(
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index ac38b91c5e0..13e3debc363 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -323,7 +323,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
                 let llfield_ty = bx.cx().backend_type(field);
 
                 // Can't bitcast an aggregate, so round trip through memory.
-                let llptr = bx.alloca(llfield_ty, field.align.abi);
+                let llptr = bx.alloca(field.size, field.align.abi);
                 bx.store(*llval, llptr, field.align.abi);
                 *llval = bx.load(llfield_ty, llptr, field.align.abi);
             }
@@ -466,7 +466,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
         let align_minus_1 = bx.sub(align, one);
         let size_extra = bx.add(size, align_minus_1);
         let min_align = Align::ONE;
-        let alloca = bx.byte_array_alloca(size_extra, min_align);
+        let alloca = bx.dynamic_alloca(size_extra, min_align);
         let address = bx.ptrtoint(alloca, bx.type_isize());
         let neg_address = bx.neg(address);
         let offset = bx.and(neg_address, align_minus_1);
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index 1ec6c351e25..a4f57f25b7a 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -57,7 +57,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
         align: Align,
     ) -> Self {
         assert!(layout.is_sized(), "tried to statically allocate unsized place");
-        let tmp = bx.alloca(bx.cx().backend_type(layout), align);
+        let tmp = bx.alloca(layout.size, align);
         Self::new_sized_aligned(tmp, layout, align)
     }
 
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
index c0281e75d9d..f7a3d5eaff5 100644
--- a/compiler/rustc_codegen_ssa/src/traits/builder.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -144,8 +144,8 @@ pub trait BuilderMethods<'a, 'tcx>:
     }
     fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
 
-    fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
-    fn byte_array_alloca(&mut self, len: Self::Value, align: Align) -> Self::Value;
+    fn alloca(&mut self, size: Size, align: Align) -> Self::Value;
+    fn dynamic_alloca(&mut self, size: Self::Value, align: Align) -> Self::Value;
 
     fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
     fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
diff --git a/tests/codegen/align-byval-alignment-mismatch.rs b/tests/codegen/align-byval-alignment-mismatch.rs
index 306e3ce1358..71f2dd42ec2 100644
--- a/tests/codegen/align-byval-alignment-mismatch.rs
+++ b/tests/codegen/align-byval-alignment-mismatch.rs
@@ -56,7 +56,7 @@ extern "C" {
 #[no_mangle]
 pub unsafe fn rust_to_c_increases_alignment(x: Align1) {
     // i686-linux: start:
-    // i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align1, align 4
+    // i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 4
     // i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 4 {{.*}}[[ALLOCA]], ptr {{.*}}align 1 {{.*}}%x
     // i686-linux-NEXT: call void @extern_c_align1({{.+}} [[ALLOCA]])
 
@@ -90,7 +90,7 @@ pub unsafe extern "C" fn c_to_rust_decreases_alignment(x: Align1) {
 #[no_mangle]
 pub unsafe extern "C" fn c_to_rust_increases_alignment(x: Align16) {
     // i686-linux: start:
-    // i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align16, align 16
+    // i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 16
     // i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 16 {{.*}}[[ALLOCA]], ptr {{.*}}align 4 {{.*}}%0
     // i686-linux-NEXT: call void @extern_rust_align16({{.+}} [[ALLOCA]])
 
@@ -116,7 +116,7 @@ pub unsafe extern "C" fn c_to_rust_ref_decreases_alignment(x: Align1) {
 #[no_mangle]
 pub unsafe extern "C" fn c_to_rust_ref_increases_alignment(x: Align16) {
     // i686-linux: start:
-    // i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align16, align 16
+    // i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 16
     // i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 16 {{.*}}[[ALLOCA]], ptr {{.*}}align 4 {{.*}}%0
     // i686-linux-NEXT: call void @extern_rust_ref_align16({{.+}} [[ALLOCA]])
 
diff --git a/tests/codegen/align-byval.rs b/tests/codegen/align-byval.rs
index c74e236f29d..3a2be2b2b9c 100644
--- a/tests/codegen/align-byval.rs
+++ b/tests/codegen/align-byval.rs
@@ -106,20 +106,20 @@ pub struct ForceAlign16 {
 pub unsafe fn call_na1(x: NaturalAlign1) {
     // CHECK: start:
 
-    // m68k: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1
+    // m68k: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1
     // m68k: call void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}} [[ALLOCA]])
 
-    // wasm: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1
+    // wasm: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1
     // wasm: call void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}} [[ALLOCA]])
 
     // x86_64-linux: call void @natural_align_1(i16
 
     // x86_64-windows: call void @natural_align_1(i16
 
-    // i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4
+    // i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 4
     // i686-linux: call void @natural_align_1({{.*}}byval([2 x i8]) align 4{{.*}} [[ALLOCA]])
 
-    // i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4
+    // i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 4
     // i686-windows: call void @natural_align_1({{.*}}byval([2 x i8]) align 4{{.*}} [[ALLOCA]])
     natural_align_1(x);
 }
@@ -134,10 +134,10 @@ pub unsafe fn call_na2(x: NaturalAlign2) {
     // x86_64-linux-NEXT: call void @natural_align_2
     // x86_64-windows-NEXT: call void @natural_align_2
 
-    // i686-linux: [[ALLOCA:%[0-9]+]] = alloca %NaturalAlign2, align 4
+    // i686-linux: [[ALLOCA:%[0-9]+]] = alloca [34 x i8], align 4
     // i686-linux: call void @natural_align_2({{.*}}byval([34 x i8]) align 4{{.*}} [[ALLOCA]])
 
-    // i686-windows: [[ALLOCA:%[0-9]+]] = alloca %NaturalAlign2, align 4
+    // i686-windows: [[ALLOCA:%[0-9]+]] = alloca [34 x i8], align 4
     // i686-windows: call void @natural_align_2({{.*}}byval([34 x i8]) align 4{{.*}} [[ALLOCA]])
     natural_align_2(x);
 }
diff --git a/tests/codegen/align-enum.rs b/tests/codegen/align-enum.rs
index b40168d77a4..93d5a87fb30 100644
--- a/tests/codegen/align-enum.rs
+++ b/tests/codegen/align-enum.rs
@@ -18,7 +18,7 @@ pub struct Nested64 {
 // CHECK-LABEL: @align64
 #[no_mangle]
 pub fn align64(a: u32) -> Align64 {
-// CHECK: %a64 = alloca %Align64, align 64
+// CHECK: %a64 = alloca [64 x i8], align 64
 // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 64 %{{.*}}, ptr align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false)
     let a64 = Align64::A(a);
     a64
@@ -27,7 +27,7 @@ pub fn align64(a: u32) -> Align64 {
 // CHECK-LABEL: @nested64
 #[no_mangle]
 pub fn nested64(a: u8, b: u32, c: u16) -> Nested64 {
-// CHECK: %n64 = alloca %Nested64, align 64
+// CHECK: %n64 = alloca [128 x i8], align 64
     let n64 = Nested64 { a, b: Align64::B(b), c };
     n64
 }
diff --git a/tests/codegen/align-struct.rs b/tests/codegen/align-struct.rs
index dbbb85bee6f..e70b42b47db 100644
--- a/tests/codegen/align-struct.rs
+++ b/tests/codegen/align-struct.rs
@@ -26,7 +26,7 @@ pub enum Enum64 {
 // CHECK-LABEL: @align64
 #[no_mangle]
 pub fn align64(i : i32) -> Align64 {
-// CHECK: %a64 = alloca %Align64, align 64
+// CHECK: %a64 = alloca [64 x i8], align 64
 // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 64 %{{.*}}, ptr align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false)
     let a64 = Align64(i);
     a64
@@ -44,7 +44,7 @@ pub fn align64_load(a: Align64) -> i32 {
 // CHECK-LABEL: @nested64
 #[no_mangle]
 pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
-// CHECK: %n64 = alloca %Nested64, align 64
+// CHECK: %n64 = alloca [128 x i8], align 64
     let n64 = Nested64 { a, b, c, d };
     n64
 }
@@ -52,7 +52,7 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
 // CHECK-LABEL: @enum4
 #[no_mangle]
 pub fn enum4(a: i32) -> Enum4 {
-// CHECK: %e4 = alloca %Enum4, align 4
+// CHECK: %e4 = alloca [8 x i8], align 4
     let e4 = Enum4::A(a);
     e4
 }
@@ -60,7 +60,7 @@ pub fn enum4(a: i32) -> Enum4 {
 // CHECK-LABEL: @enum64
 #[no_mangle]
 pub fn enum64(a: Align64) -> Enum64 {
-// CHECK: %e64 = alloca %Enum64, align 64
+// CHECK: %e64 = alloca [128 x i8], align 64
     let e64 = Enum64::A(a);
     e64
 }
diff --git a/tests/codegen/array-codegen.rs b/tests/codegen/array-codegen.rs
index 1310e61c41d..fc272f2556c 100644
--- a/tests/codegen/array-codegen.rs
+++ b/tests/codegen/array-codegen.rs
@@ -18,7 +18,7 @@ pub fn array_load(a: &[u8; 4]) -> [u8; 4] {
 #[no_mangle]
 pub fn array_store(a: [u8; 4], p: &mut [u8; 4]) {
     // CHECK-NOT: alloca
-    // CHECK: %[[TEMP:.+]] = alloca i32, [[TEMPALIGN:align [0-9]+]]
+    // CHECK: %[[TEMP:.+]] = alloca [4 x i8], [[TEMPALIGN:align [0-9]+]]
     // CHECK-NOT: alloca
     // CHECK: %a = alloca [4 x i8]
     // CHECK-NOT: alloca
diff --git a/tests/codegen/array-map.rs b/tests/codegen/array-map.rs
index 743a15989f7..f49dddcfc20 100644
--- a/tests/codegen/array-map.rs
+++ b/tests/codegen/array-map.rs
@@ -27,7 +27,7 @@ pub fn short_integer_map(x: [u32; 8]) -> [u32; 8] {
 #[no_mangle]
 pub fn long_integer_map(x: [u32; 512]) -> [u32; 512] {
     // CHECK: start:
-    // CHECK-NEXT: alloca [512 x i32]
+    // CHECK-NEXT: alloca [2048 x i8]
     // CHECK-NOT: alloca
     // CHECK: mul <{{[0-9]+}} x i32>
     // CHECK: add <{{[0-9]+}} x i32>
diff --git a/tests/codegen/cast-target-abi.rs b/tests/codegen/cast-target-abi.rs
index e6024f03425..9c31acc9bb7 100644
--- a/tests/codegen/cast-target-abi.rs
+++ b/tests/codegen/cast-target-abi.rs
@@ -77,15 +77,20 @@ extern "C" {
 // CHECK-LABEL: @call_twou16s
 #[no_mangle]
 pub unsafe fn call_twou16s() {
-    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
-    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
-    // powerpc64:   [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i32]], align [[ABI_ALIGN:4]]
-    // sparc64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
+    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
+    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
+    // powerpc64:   [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]]
+    // sparc64:     [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
 
-    // CHECK: [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
+    // CHECK: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
 
     // CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 4, i1 false)
-    // CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+
+    // aarch64:     [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+    // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+    // powerpc64:   [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i32]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+    // sparc64:     [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+
     // CHECK: call void @receives_twou16s([[ABI_TYPE]] [[ABI_VALUE]])
     let x = TwoU16s { a: 1, b: 2 };
     receives_twou16s(x);
@@ -96,23 +101,23 @@ pub unsafe fn call_twou16s() {
 pub unsafe fn return_twou16s() -> TwoU16s {
     // powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
 
-    // powerpc64: [[RETVAL:%.+]] = alloca %TwoU16s, align 2
+    // powerpc64: [[RETVAL:%.+]] = alloca [4 x i8], align 2
     // powerpc64: call void @returns_twou16s(ptr {{.+}} [[RETVAL]])
 
 
     // The other targets copy the cast ABI type to an alloca.
 
-    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
-    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
-    // sparc64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
+    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
+    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
+    // sparc64:     [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]]
 
-    // aarch64:     [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
-    // loongarch64: [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
-    // sparc64:     [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
+    // aarch64:     [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
+    // loongarch64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
+    // sparc64:     [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]]
 
-    // aarch64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_twou16s()
-    // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_twou16s()
-    // sparc64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_twou16s()
+    // aarch64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s()
+    // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s()
+    // sparc64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s()
 
     // aarch64:     store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
     // loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
@@ -127,12 +132,12 @@ pub unsafe fn return_twou16s() -> TwoU16s {
 // CHECK-LABEL: @call_fiveu16s
 #[no_mangle]
 pub unsafe fn call_fiveu16s() {
-    // CHECK: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
+    // CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
 
-    // CHECK: [[RUST_ALLOCA:%.+]] = alloca %FiveU16s, align 2
+    // CHECK: [[RUST_ALLOCA:%.+]] = alloca [10 x i8], align 2
 
     // CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 10, i1 false)
-    // CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+    // CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
     // CHECK: call void @receives_fiveu16s([[ABI_TYPE]] [[ABI_VALUE]])
     let x = FiveU16s { a: 1, b: 2, c: 3, d: 4, e: 5 };
     receives_fiveu16s(x);
@@ -149,13 +154,13 @@ pub unsafe fn return_fiveu16s() -> FiveU16s {
 
     // The other targets copy the cast ABI type to the sret pointer.
 
-    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
-    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
-    // sparc64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
+    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
+    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
+    // sparc64:     [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
 
-    // aarch64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_fiveu16s()
-    // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_fiveu16s()
-    // sparc64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_fiveu16s()
+    // aarch64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
+    // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
+    // sparc64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
 
     // aarch64:     store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
     // loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
@@ -170,15 +175,17 @@ pub unsafe fn return_fiveu16s() -> FiveU16s {
 // CHECK-LABEL: @call_doubledouble
 #[no_mangle]
 pub unsafe fn call_doubledouble() {
-    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x double\]]], align [[ABI_ALIGN:8]]
-    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
-    // powerpc64:   [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
-    // sparc64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
+    // CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
 
-    // CHECK: [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
+    // CHECK: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
 
     // CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
-    // CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+
+    // aarch64:     [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x double\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+    // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+    // powerpc64:   [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+    // sparc64:     [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+
     // CHECK: call void @receives_doubledouble([[ABI_TYPE]] [[ABI_VALUE]])
     let x = DoubleDouble { f: 1., g: 2. };
     receives_doubledouble(x);
@@ -189,23 +196,23 @@ pub unsafe fn call_doubledouble() {
 pub unsafe fn return_doubledouble() -> DoubleDouble {
     // powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
 
-    // powerpc64: [[RETVAL:%.+]] = alloca %DoubleDouble, align 8
+    // powerpc64: [[RETVAL:%.+]] = alloca [16 x i8], align 8
     // powerpc64: call void @returns_doubledouble(ptr {{.+}} [[RETVAL]])
 
 
     // The other targets copy the cast ABI type to an alloca.
 
-    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x double\]]], align [[ABI_ALIGN:8]]
-    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
-    // sparc64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
+    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
+    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
+    // sparc64:     [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
 
-    // aarch64:     [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
-    // loongarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
-    // sparc64:     [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
+    // aarch64:     [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
+    // loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
+    // sparc64:     [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
 
-    // aarch64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doubledouble()
-    // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doubledouble()
-    // sparc64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doubledouble()
+    // aarch64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x double\]]] @returns_doubledouble()
+    // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doubledouble()
+    // sparc64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doubledouble()
 
     // aarch64:     store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
     // loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
@@ -224,21 +231,21 @@ pub unsafe fn return_doubledouble() -> DoubleDouble {
 // powerpc64-LABEL:   @call_doublefloat
 #[no_mangle]
 pub unsafe fn call_doublefloat() {
-    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
-    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, float }]], align [[ABI_ALIGN:8]]
-    // powerpc64:   [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
+    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
+    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [12 x i8], align [[ABI_ALIGN:8]]
+    // powerpc64:   [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
 
-    // aarch64:     [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
-    // loongarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
-    // powerpc64:   [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
+    // aarch64:     [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
+    // loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
+    // powerpc64:   [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
 
     // aarch64:     call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
     // loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 12, i1 false)
     // powerpc64:   call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
 
-    // aarch64:     [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
-    // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
-    // powerpc64:   [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+    // aarch64:     [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+    // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, float }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
+    // powerpc64:   [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
 
     // aarch64:     call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]])
     // loongarch64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]])
@@ -256,20 +263,20 @@ pub unsafe fn call_doublefloat() {
 pub unsafe fn return_doublefloat() -> DoubleFloat {
     // powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
 
-    // powerpc64: [[RETVAL:%.+]] = alloca %DoubleFloat, align 8
+    // powerpc64: [[RETVAL:%.+]] = alloca [16 x i8], align 8
     // powerpc64: call void @returns_doublefloat(ptr {{.+}} [[RETVAL]])
 
 
     // The other targets copy the cast ABI type to an alloca.
 
-    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
-    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, float }]], align [[ABI_ALIGN:8]]
+    // aarch64:     [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]]
+    // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [12 x i8], align [[ABI_ALIGN:8]]
 
-    // aarch64:     [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
-    // loongarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
+    // aarch64:     [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
+    // loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]]
 
-    // aarch64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doublefloat()
-    // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doublefloat()
+    // aarch64:     [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_doublefloat()
+    // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, float }]] @returns_doublefloat()
 
     // aarch64:     store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
     // loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
diff --git a/tests/codegen/cffi/ffi-out-of-bounds-loads.rs b/tests/codegen/cffi/ffi-out-of-bounds-loads.rs
index 8b32e902b3f..35bf00f8f3c 100644
--- a/tests/codegen/cffi/ffi-out-of-bounds-loads.rs
+++ b/tests/codegen/cffi/ffi-out-of-bounds-loads.rs
@@ -33,7 +33,7 @@ extern "C" {
 pub fn test() {
     let s = S { f1: 1, f2: 2, f3: 3 };
     unsafe {
-        // CHECK: [[ALLOCA:%.+]] = alloca { i64, i32 }, align 8
+        // CHECK: [[ALLOCA:%.+]] = alloca [12 x i8], align 8
         // CHECK: [[LOAD:%.+]] = load { i64, i32 }, ptr [[ALLOCA]], align 8
         // CHECK: call void @foo({ i64, i32 } [[LOAD]])
         foo(s);
diff --git a/tests/codegen/debug-fndef-size.rs b/tests/codegen/debug-fndef-size.rs
index b3cc45614bc..5551d2cc39c 100644
--- a/tests/codegen/debug-fndef-size.rs
+++ b/tests/codegen/debug-fndef-size.rs
@@ -12,7 +12,7 @@ pub fn main() {
     foo(0, 1, i32::cmp);
 }
 
-// CHECK: %compare.dbg.spill = alloca {}, align 1
+// CHECK: %compare.dbg.spill = alloca [0 x i8], align 1
 // CHECK: call void @llvm.dbg.declare(metadata ptr %compare.dbg.spill, metadata ![[VAR:.*]], metadata !DIExpression()), !dbg !{{.*}}
 // CHECK: ![[TYPE:.*]] = !DIDerivedType(tag: DW_TAG_pointer_type, name: "fn(&i32, &i32) -> core::cmp::Ordering", baseType: !{{.*}}, align: 1, dwarfAddressSpace: {{.*}})
 // CHECK: ![[VAR]] = !DILocalVariable(name: "compare", scope: !{{.*}}, file: !{{.*}}, line: {{.*}}, type: ![[TYPE]], align: 1)
diff --git a/tests/codegen/emcripten-catch-unwind.rs b/tests/codegen/emcripten-catch-unwind.rs
new file mode 100644
index 00000000000..7de7bd81b5c
--- /dev/null
+++ b/tests/codegen/emcripten-catch-unwind.rs
@@ -0,0 +1,59 @@
+//@ compile-flags: -O --target wasm32-unknown-emscripten
+//@ needs-llvm-components: webassembly
+
+// Emscripten has its own unique implementation of catch_unwind (in `codegen_emcc_try`),
+// make sure it generates something reasonable.
+
+#![feature(no_core, lang_items, intrinsics, rustc_attrs)]
+#![crate_type = "lib"]
+#![no_std]
+#![no_core]
+
+#[lang="sized"] trait Sized { }
+#[lang="freeze"] trait Freeze { }
+#[lang="copy"] trait Copy { }
+
+#[rustc_intrinsic]
+fn size_of<T>() -> usize { loop {} }
+
+extern "rust-intrinsic" {
+    fn catch_unwind(
+        try_fn: fn(_: *mut u8),
+        data: *mut u8,
+        catch_fn: fn(_: *mut u8, _: *mut u8)
+    ) -> i32;
+}
+
+// CHECK-LABEL: @ptr_size
+#[no_mangle]
+pub fn ptr_size() -> usize {
+    // CHECK: ret [[PTR_SIZE:.*]]
+    size_of::<*mut u8>()
+}
+
+// CHECK-LABEL: @test_catch_unwind
+#[no_mangle]
+pub unsafe fn test_catch_unwind(
+    try_fn: fn(_: *mut u8),
+    data: *mut u8,
+    catch_fn: fn(_: *mut u8, _: *mut u8)
+) -> i32 {
+    // CHECK: start:
+    // CHECK: [[ALLOCA:%.*]] = alloca
+
+    // CHECK: catch.i:
+    // CHECK: [[LANDINGPAD:%.*]] = landingpad
+    // CHECK: [[EXCEPTION:%.*]] = extractvalue {{.*}} [[LANDINGPAD]], 0
+    // CHECK: [[SELECTOR:%.*]] = extractvalue {{.*}} [[LANDINGPAD]], 1
+
+    // CHECK: [[IS_RUST_EXN:%.*]] = icmp eq {{.*}}[[SELECTOR]]
+    // CHECK: [[IS_RUST_EXN_I8:%.*]] = zext i1 [[IS_RUST_EXN]] to i8
+
+    // CHECK: store ptr [[EXCEPTION]], ptr [[ALLOCA]]
+    // CHECK: [[IS_RUST_SLOT:%.*]] = getelementptr inbounds i8, ptr [[ALLOCA]], [[PTR_SIZE]]
+    // CHECK: store i8 [[IS_RUST_EXN_I8]], ptr [[IS_RUST_SLOT]]
+
+    // CHECK: call void %catch_fn(ptr %data, ptr nonnull [[ALLOCA]])
+
+    catch_unwind(try_fn, data, catch_fn)
+}
diff --git a/tests/codegen/enum/enum-match.rs b/tests/codegen/enum/enum-match.rs
index 2e6dad8791b..f1c40f6695b 100644
--- a/tests/codegen/enum/enum-match.rs
+++ b/tests/codegen/enum/enum-match.rs
@@ -15,7 +15,7 @@ pub enum Enum0 {
 // CHECK-NEXT: start:
 // CHECK-NEXT: %1 = icmp eq i8 %0, 2
 // CHECK-NEXT: %2 = and i8 %0, 1
-// CHECK-NEXT: %_0.0 = select i1 %1, i8 13, i8 %2
+// CHECK-NEXT: %{{.+}} = select i1 %1, i8 13, i8 %2
 #[no_mangle]
 pub fn match0(e: Enum0) -> u8 {
     use Enum0::*;
diff --git a/tests/codegen/i128-x86-align.rs b/tests/codegen/i128-x86-align.rs
index b2e0c294c39..3e6ed2b8e16 100644
--- a/tests/codegen/i128-x86-align.rs
+++ b/tests/codegen/i128-x86-align.rs
@@ -6,7 +6,6 @@
 // correctly.
 
 // CHECK: %ScalarPair = type { i32, [3 x i32], i128 }
-// CHECK: %Struct = type { i32, i32, [2 x i32], i128 }
 
 #![feature(core_intrinsics)]
 
@@ -43,7 +42,7 @@ pub fn store(x: &mut ScalarPair) {
 #[no_mangle]
 pub fn alloca() {
     // CHECK-LABEL: @alloca(
-    // CHECK:      [[X:%.*]] = alloca %ScalarPair, align 16
+    // CHECK:      [[X:%.*]] = alloca [32 x i8], align 16
     // CHECK:      store i32 1, ptr %x, align 16
     // CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %x, i64 16
     // CHECK-NEXT: store i128 2, ptr [[GEP]], align 16
@@ -55,7 +54,7 @@ pub fn alloca() {
 pub fn load_volatile(x: &ScalarPair) -> ScalarPair {
     // CHECK-LABEL: @load_volatile(
     // CHECK-SAME: align 16 dereferenceable(32) %x
-    // CHECK:      [[TMP:%.*]] = alloca %ScalarPair, align 16
+    // CHECK:      [[TMP:%.*]] = alloca [32 x i8], align 16
     // CHECK:      [[LOAD:%.*]] = load volatile %ScalarPair, ptr %x, align 16
     // CHECK-NEXT: store %ScalarPair [[LOAD]], ptr [[TMP]], align 16
     // CHECK-NEXT: [[A:%.*]] = load i32, ptr [[TMP]], align 16
@@ -67,7 +66,7 @@ pub fn load_volatile(x: &ScalarPair) -> ScalarPair {
 #[no_mangle]
 pub fn transmute(x: ScalarPair) -> (std::mem::MaybeUninit<i128>, i128) {
     // CHECK-LABEL: define { i128, i128 } @transmute(i32 noundef %x.0, i128 noundef %x.1)
-    // CHECK:       [[TMP:%.*]] = alloca { i128, i128 }, align 16
+    // CHECK:       [[TMP:%.*]] = alloca [32 x i8], align 16
     // CHECK-NEXT:  store i32 %x.0, ptr [[TMP]], align 16
     // CHECK-NEXT:  [[GEP:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 16
     // CHECK-NEXT:  store i128 %x.1, ptr [[GEP]], align 16
@@ -92,7 +91,7 @@ pub struct Struct {
 pub fn store_struct(x: &mut Struct) {
     // CHECK-LABEL: @store_struct(
     // CHECK-SAME: align 16 dereferenceable(32) %x
-    // CHECK:      [[TMP:%.*]] = alloca %Struct, align 16
+    // CHECK:      [[TMP:%.*]] = alloca [32 x i8], align 16
     // CHECK:      store i32 1, ptr [[TMP]], align 16
     // CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 4
     // CHECK-NEXT: store i32 2, ptr [[GEP1]], align 4
diff --git a/tests/codegen/intrinsics/transmute.rs b/tests/codegen/intrinsics/transmute.rs
index f858562b5f1..76cdf7e191e 100644
--- a/tests/codegen/intrinsics/transmute.rs
+++ b/tests/codegen/intrinsics/transmute.rs
@@ -153,7 +153,7 @@ pub unsafe fn check_from_newtype(x: Scalar64) -> u64 {
 // CHECK-LABEL: @check_aggregate_to_bool(
 #[no_mangle]
 pub unsafe fn check_aggregate_to_bool(x: Aggregate8) -> bool {
-    // CHECK: %x = alloca %Aggregate8, align 1
+    // CHECK: %x = alloca [1 x i8], align 1
     // CHECK: %[[BYTE:.+]] = load i8, ptr %x, align 1
     // CHECK: %[[BOOL:.+]] = trunc i8 %[[BYTE]] to i1
     // CHECK: ret i1 %[[BOOL]]
@@ -163,7 +163,7 @@ pub unsafe fn check_aggregate_to_bool(x: Aggregate8) -> bool {
 // CHECK-LABEL: @check_aggregate_from_bool(
 #[no_mangle]
 pub unsafe fn check_aggregate_from_bool(x: bool) -> Aggregate8 {
-    // CHECK: %_0 = alloca %Aggregate8, align 1
+    // CHECK: %_0 = alloca [1 x i8], align 1
     // CHECK: %[[BYTE:.+]] = zext i1 %x to i8
     // CHECK: store i8 %[[BYTE]], ptr %_0, align 1
     transmute(x)
@@ -190,7 +190,7 @@ pub unsafe fn check_byte_from_bool(x: bool) -> u8 {
 // CHECK-LABEL: @check_to_pair(
 #[no_mangle]
 pub unsafe fn check_to_pair(x: u64) -> Option<i32> {
-    // CHECK: %_0 = alloca %"core::option::Option<i32>", align 4
+    // CHECK: %_0 = alloca [8 x i8], align 4
     // CHECK: store i64 %x, ptr %_0, align 4
     transmute(x)
 }
@@ -202,7 +202,7 @@ pub unsafe fn check_from_pair(x: Option<i32>) -> u64 {
     // immediates so we can write using the destination alloca's alignment.
     const { assert!(std::mem::align_of::<Option<i32>>() == 4) };
 
-    // CHECK: %_0 = alloca i64, align 8
+    // CHECK: %_0 = alloca [8 x i8], align 8
     // CHECK: store i32 %x.0, ptr %_0, align 8
     // CHECK: store i32 %x.1, ptr %0, align 4
     // CHECK: %[[R:.+]] = load i64, ptr %_0, align 8
@@ -248,7 +248,7 @@ pub unsafe fn check_from_bytes(x: [u8; 4]) -> u32 {
 // CHECK-LABEL: @check_to_aggregate(
 #[no_mangle]
 pub unsafe fn check_to_aggregate(x: u64) -> Aggregate64 {
-    // CHECK: %_0 = alloca %Aggregate64, align 4
+    // CHECK: %_0 = alloca [8 x i8], align 4
     // CHECK: store i64 %x, ptr %_0, align 4
     // CHECK: %0 = load i64, ptr %_0, align 4
     // CHECK: ret i64 %0
@@ -258,7 +258,7 @@ pub unsafe fn check_to_aggregate(x: u64) -> Aggregate64 {
 // CHECK-LABEL: @check_from_aggregate(
 #[no_mangle]
 pub unsafe fn check_from_aggregate(x: Aggregate64) -> u64 {
-    // CHECK: %x = alloca %Aggregate64, align 4
+    // CHECK: %x = alloca [8 x i8], align 4
     // CHECK: %[[VAL:.+]] = load i64, ptr %x, align 4
     // CHECK: ret i64 %[[VAL]]
     transmute(x)
@@ -452,7 +452,7 @@ pub struct HighAlignScalar(u8);
 // CHECK-LABEL: @check_to_overalign(
 #[no_mangle]
 pub unsafe fn check_to_overalign(x: u64) -> HighAlignScalar {
-    // CHECK: %_0 = alloca %HighAlignScalar, align 8
+    // CHECK: %_0 = alloca [8 x i8], align 8
     // CHECK: store i64 %x, ptr %_0, align 8
     // CHECK: %0 = load i64, ptr %_0, align 8
     // CHECK: ret i64 %0
@@ -462,7 +462,7 @@ pub unsafe fn check_to_overalign(x: u64) -> HighAlignScalar {
 // CHECK-LABEL: @check_from_overalign(
 #[no_mangle]
 pub unsafe fn check_from_overalign(x: HighAlignScalar) -> u64 {
-    // CHECK: %x = alloca %HighAlignScalar, align 8
+    // CHECK: %x = alloca [8 x i8], align 8
     // CHECK: %[[VAL:.+]] = load i64, ptr %x, align 8
     // CHECK: ret i64 %[[VAL]]
     transmute(x)
diff --git a/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs b/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs
index 0bd43dc50b2..4d1c0a83b55 100644
--- a/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs
+++ b/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs
@@ -15,7 +15,7 @@ pub fn outer_function(x: S, y: S) -> usize {
 // Check that we do not attempt to load from the spilled arg before it is assigned to
 // when generating debuginfo.
 // CHECK-LABEL: @outer_function
-// CHECK: [[spill:%.*]] = alloca %"{closure@{{.*.rs}}:9:23: 9:25}"
+// CHECK: [[spill:%.*]] = alloca [72 x i8]
 // CHECK-NOT: [[ptr_tmp:%.*]] = getelementptr inbounds i8, ptr [[spill]]
 // CHECK-NOT: [[load:%.*]] = load ptr, ptr
 // CHECK: call void @llvm.lifetime.start{{.*}}({{.*}}, ptr [[spill]])
diff --git a/tests/codegen/issues/issue-111603.rs b/tests/codegen/issues/issue-111603.rs
index 3f4c7e7d542..41bfb493ff5 100644
--- a/tests/codegen/issues/issue-111603.rs
+++ b/tests/codegen/issues/issue-111603.rs
@@ -11,7 +11,7 @@ pub fn new_from_array(x: u64) -> Arc<[u64]> {
     // Ensure that we only generate one alloca for the array.
 
     // CHECK: alloca
-    // CHECK-SAME: [1000 x i64]
+    // CHECK-SAME: [8000 x i8]
     // CHECK-NOT: alloca
     let array = [x; 1000];
     Arc::new(array)
diff --git a/tests/codegen/overaligned-constant.rs b/tests/codegen/overaligned-constant.rs
index 9e5b69ff267..7cd8d19c211 100644
--- a/tests/codegen/overaligned-constant.rs
+++ b/tests/codegen/overaligned-constant.rs
@@ -2,7 +2,7 @@
 // do not ICE during codegen, and that the LLVM constant has the higher alignment.
 //
 //@ compile-flags: -Zmir-opt-level=0 -Zmir-enable-passes=+GVN
-//@ compile-flags: -Cno-prepopulate-passes
+//@ compile-flags: -Cno-prepopulate-passes --crate-type=lib
 //@ only-64bit
 
 struct S(i32);
@@ -12,9 +12,10 @@ struct SmallStruct(f32, Option<S>, &'static [f32]);
 // CHECK: @0 = private unnamed_addr constant
 // CHECK-SAME: , align 8
 
-fn main() {
-    // CHECK-LABEL: @_ZN20overaligned_constant4main
-    // CHECK: [[full:%_.*]] = alloca %SmallStruct, align 8
+#[no_mangle]
+pub fn overaligned_constant() {
+    // CHECK-LABEL: @overaligned_constant
+    // CHECK: [[full:%_.*]] = alloca [32 x i8], align 8
     // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[full]], ptr align 8 @0, i64 32, i1 false)
     // CHECK: %b.0 = load i32, ptr @0, align 4
     // CHECK: %b.1 = load i32, ptr getelementptr inbounds ({{.*}}), align 4
diff --git a/tests/codegen/packed.rs b/tests/codegen/packed.rs
index 764476b0aa1..5142df9c488 100644
--- a/tests/codegen/packed.rs
+++ b/tests/codegen/packed.rs
@@ -51,7 +51,7 @@ pub struct BigPacked2 {
 // CHECK-LABEL: @call_pkd1
 #[no_mangle]
 pub fn call_pkd1(f: fn() -> Array) -> BigPacked1 {
-// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array
+// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca [32 x i8]
 // CHECK: call void %{{.*}}(ptr noalias nocapture noundef sret{{.*}} dereferenceable(32) [[ALLOCA]])
 // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 1 %{{.*}}, ptr align 4 %{{.*}}, i{{[0-9]+}} 32, i1 false)
     // check that calls whose destination is a field of a packed struct
@@ -63,7 +63,7 @@ pub fn call_pkd1(f: fn() -> Array) -> BigPacked1 {
 // CHECK-LABEL: @call_pkd2
 #[no_mangle]
 pub fn call_pkd2(f: fn() -> Array) -> BigPacked2 {
-// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array
+// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca [32 x i8]
 // CHECK: call void %{{.*}}(ptr noalias nocapture noundef sret{{.*}} dereferenceable(32) [[ALLOCA]])
 // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 2 %{{.*}}, ptr align 4 %{{.*}}, i{{[0-9]+}} 32, i1 false)
     // check that calls whose destination is a field of a packed struct
diff --git a/tests/codegen/personality_lifetimes.rs b/tests/codegen/personality_lifetimes.rs
index f2ab9c3bb82..0ef4aa424d8 100644
--- a/tests/codegen/personality_lifetimes.rs
+++ b/tests/codegen/personality_lifetimes.rs
@@ -23,7 +23,7 @@ pub fn test() {
     let _s = S;
     // Check that the personality slot alloca gets a lifetime start in each cleanup block, not just
     // in the first one.
-    // CHECK: [[SLOT:%[0-9]+]] = alloca { ptr, i32{{.*}} }
+    // CHECK: [[SLOT:%[0-9]+]] = alloca [{{[0-9]+}} x i8]
     // CHECK-LABEL: cleanup:
     // CHECK: call void @llvm.lifetime.start.{{.*}}({{.*}})
     // CHECK-LABEL: cleanup1:
diff --git a/tests/codegen/sroa-fragment-debuginfo.rs b/tests/codegen/sroa-fragment-debuginfo.rs
index d8c2d2c6f9e..32786d2a76a 100644
--- a/tests/codegen/sroa-fragment-debuginfo.rs
+++ b/tests/codegen/sroa-fragment-debuginfo.rs
@@ -14,9 +14,9 @@ pub struct ExtraSlice<'input> {
 #[no_mangle]
 pub fn extra(s: &[u8]) {
 // CHECK: void @extra(
-// CHECK: %slice.dbg.spill1 = alloca i32,
-// CHECK: %slice.dbg.spill = alloca { ptr, i64 },
-// CHECK: %s.dbg.spill = alloca { ptr, i64 },
+// CHECK: %slice.dbg.spill1 = alloca [4 x i8],
+// CHECK: %slice.dbg.spill = alloca [16 x i8],
+// CHECK: %s.dbg.spill = alloca [16 x i8],
 // CHECK: call void @llvm.dbg.declare(metadata ptr %s.dbg.spill, metadata ![[S_EXTRA:.*]], metadata !DIExpression()),
 // CHECK: call void @llvm.dbg.declare(metadata ptr %slice.dbg.spill, metadata ![[SLICE_EXTRA:.*]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 128)),
 // CHECK: call void @llvm.dbg.declare(metadata ptr %slice.dbg.spill1, metadata ![[SLICE_EXTRA]], metadata !DIExpression(DW_OP_LLVM_fragment, 128, 32)),
diff --git a/tests/codegen/stores.rs b/tests/codegen/stores.rs
index 3fda5aa47ea..86ec52fa101 100644
--- a/tests/codegen/stores.rs
+++ b/tests/codegen/stores.rs
@@ -15,8 +15,8 @@ pub struct Bytes {
 // dependent alignment
 #[no_mangle]
 pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
-// CHECK: [[TMP:%.+]] = alloca i32
-// CHECK: %y = alloca [4 x i8]
+// CHECK: [[TMP:%.+]] = alloca [4 x i8], align 4
+// CHECK: %y = alloca [4 x i8], align 1
 // CHECK: store i32 %0, ptr [[TMP]]
 // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 1 {{.+}}, ptr align 4 {{.+}}, i{{[0-9]+}} 4, i1 false)
     *x = y;
@@ -27,8 +27,8 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
 // dependent alignment
 #[no_mangle]
 pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) {
-// CHECK: [[TMP:%.+]] = alloca i32
-// CHECK: %y = alloca %Bytes
+// CHECK: [[TMP:%.+]] = alloca [4 x i8], align 4
+// CHECK: %y = alloca [4 x i8], align 1
 // CHECK: store i32 %0, ptr [[TMP]]
 // CHECK: call void @llvm.memcpy.{{.*}}(ptr align 1 {{.+}}, ptr align 4 {{.+}}, i{{[0-9]+}} 4, i1 false)
     *x = y;
diff --git a/tests/codegen/swap-large-types.rs b/tests/codegen/swap-large-types.rs
index b182f3ed947..b976f6fe207 100644
--- a/tests/codegen/swap-large-types.rs
+++ b/tests/codegen/swap-large-types.rs
@@ -15,7 +15,7 @@ type KeccakBuffer = [[u64; 5]; 5];
 // CHECK-LABEL: @swap_basic
 #[no_mangle]
 pub fn swap_basic(x: &mut KeccakBuffer, y: &mut KeccakBuffer) {
-// CHECK: alloca [5 x [5 x i64]]
+// CHECK: alloca [200 x i8]
 
     // SAFETY: exclusive references are always valid to read/write,
     // are non-overlapping, and nothing here panics so it's drop-safe.
diff --git a/tests/codegen/swap-small-types.rs b/tests/codegen/swap-small-types.rs
index 4dcfed2a53a..1a48c63d813 100644
--- a/tests/codegen/swap-small-types.rs
+++ b/tests/codegen/swap-small-types.rs
@@ -12,7 +12,7 @@ type RGB48 = [u16; 3];
 pub fn swap_rgb48_manually(x: &mut RGB48, y: &mut RGB48) {
     // FIXME: See #115212 for why this has an alloca again
 
-    // CHECK: alloca [3 x i16], align 2
+    // CHECK: alloca [6 x i8], align 2
     // CHECK: call void @llvm.memcpy.p0.p0.i64({{.+}}, i64 6, i1 false)
     // CHECK: call void @llvm.memcpy.p0.p0.i64({{.+}}, i64 6, i1 false)
     // CHECK: call void @llvm.memcpy.p0.p0.i64({{.+}}, i64 6, i1 false)

From d46b6889451de69a9cf7808a042d8292fc2c341a Mon Sep 17 00:00:00 2001
From: Erik Desjardins <erikdesjardins@users.noreply.github.com>
Date: Tue, 5 Mar 2024 20:55:42 -0500
Subject: [PATCH 2/5] adjust stack-protector test (which inappropriately
 depends on IR types)

---
 .../stack-protector-heuristics-effect.rs      | 55 +++++--------------
 1 file changed, 15 insertions(+), 40 deletions(-)

diff --git a/tests/assembly/stack-protector/stack-protector-heuristics-effect.rs b/tests/assembly/stack-protector/stack-protector-heuristics-effect.rs
index e63adc88ff5..8e32d170244 100644
--- a/tests/assembly/stack-protector/stack-protector-heuristics-effect.rs
+++ b/tests/assembly/stack-protector/stack-protector-heuristics-effect.rs
@@ -11,6 +11,11 @@
 //@ compile-flags: -C opt-level=2 -Z merge-functions=disabled
 //@ min-llvm-version: 17.0.2
 
+// NOTE: the heuristics for stack smash protection inappropriately rely on types in LLVM IR,
+// despite those types having no semantic meaning. This means that the `basic` and `strong`
+// settings do not behave in a coherent way. This is a known issue in LLVM.
+// See comments on https://github.com/rust-lang/rust/issues/114903.
+
 #![crate_type = "lib"]
 
 #![allow(incomplete_features)]
@@ -39,23 +44,9 @@ pub fn array_char(f: fn(*const char)) {
     f(&b as *const _);
     f(&c as *const _);
 
-    // Any type of local array variable leads to stack protection with the
-    // "strong" heuristic. The 'basic' heuristic only adds stack protection to
-    // functions with local array variables of a byte-sized type, however. Since
-    // 'char' is 4 bytes in Rust, this function is not protected by the 'basic'
-    // heuristic
-    //
-    // (This test *also* takes the address of the local stack variables. We
-    // cannot know that this isn't what triggers the `strong` heuristic.
-    // However, the test strategy of passing the address of a stack array to an
-    // external function is sufficient to trigger the `basic` heuristic (see
-    // test `array_u8_large()`). Since the `basic` heuristic only checks for the
-    // presence of stack-local array variables, we can be confident that this
-    // test also captures this part of the `strong` heuristic specification.)
-
     // all: __stack_chk_fail
     // strong: __stack_chk_fail
-    // basic-NOT: __stack_chk_fail
+    // basic: __stack_chk_fail
     // none-NOT: __stack_chk_fail
     // missing-NOT: __stack_chk_fail
 }
@@ -163,26 +154,11 @@ pub fn local_string_addr_taken(f: fn(&String)) {
     f(&x);
 
     // Taking the address of the local variable `x` leads to stack smash
-    // protection with the `strong` heuristic, but not with the `basic`
-    // heuristic. It does not matter that the reference is not mut.
-    //
-    // An interesting note is that a similar function in C++ *would* be
-    // protected by the `basic` heuristic, because `std::string` has a char
-    // array internally as a small object optimization:
-    // ```
-    // cat <<EOF | clang++ -O2 -fstack-protector -S -x c++ - -o - | grep stack_chk
-    // #include <string>
-    // void f(void (*g)(const std::string&)) {
-    //     std::string x;
-    //     g(x);
-    // }
-    // EOF
-    // ```
-    //
+    // protection. It does not matter that the reference is not mut.
 
     // all: __stack_chk_fail
     // strong: __stack_chk_fail
-    // basic-NOT: __stack_chk_fail
+    // basic: __stack_chk_fail
     // none-NOT: __stack_chk_fail
     // missing-NOT: __stack_chk_fail
 }
@@ -233,8 +209,8 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
     // Even though the local variable conceptually doesn't have its address
     // taken, it's so large that the "move" is implemented with a reference to a
     // stack-local variable in the ABI. Consequently, this function *is*
-    // protected by the `strong` heuristic. This is also the case for
-    // rvalue-references in C++, regardless of struct size:
+    // protected. This is also the case for rvalue-references in C++,
+    // regardless of struct size:
     // ```
     // cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
     // #include <cstdint>
@@ -248,7 +224,7 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
 
     // all: __stack_chk_fail
     // strong: __stack_chk_fail
-    // basic-NOT: __stack_chk_fail
+    // basic: __stack_chk_fail
     // none-NOT: __stack_chk_fail
     // missing-NOT: __stack_chk_fail
 }
@@ -261,9 +237,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
     // A new instance of `Gigastruct` is passed to `f()`, without any apparent
     // connection to this stack frame. Still, since instances of `Gigastruct`
     // are sufficiently large, it is allocated in the caller stack frame and
-    // passed as a pointer. As such, this function is *also* protected by the
-    // `strong` heuristic, just like `local_large_var_moved`. This is also the
-    // case for pass-by-value of sufficiently large structs in C++:
+    // passed as a pointer. As such, this function is *also* protected, just
+    // like `local_large_var_moved`. This is also the case for pass-by-value
+    // of sufficiently large structs in C++:
     // ```
     // cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
     // #include <cstdint>
@@ -275,10 +251,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
     // EOF
     // ```
 
-
     // all: __stack_chk_fail
     // strong: __stack_chk_fail
-    // basic-NOT: __stack_chk_fail
+    // basic: __stack_chk_fail
     // none-NOT: __stack_chk_fail
     // missing-NOT: __stack_chk_fail
 }

From 1ce5dc8d9c638194306e63abfe81748c77e7386e Mon Sep 17 00:00:00 2001
From: Erik Desjardins <erikdesjardins@users.noreply.github.com>
Date: Fri, 12 Apr 2024 08:29:06 -0400
Subject: [PATCH 3/5] restore location in gcc alloca codegen

---
 compiler/rustc_codegen_gcc/src/builder.rs | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index cf1aa3f4793..8d6c16ebe04 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -902,7 +902,13 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let ty = self.cx.type_array(self.cx.type_i8(), size.bytes()).get_aligned(align.bytes());
         // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
         self.stack_var_count.set(self.stack_var_count.get() + 1);
-        self.current_func().new_local(None, ty, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
+        self.current_func()
+            .new_local(
+                self.location,
+                ty,
+                &format!("stack_var_{}", self.stack_var_count.get()),
+            )
+            .get_address(self.location)
     }
 
     fn dynamic_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {

From daaaacdcb31addf5a56fe70884d72cf28998bdc2 Mon Sep 17 00:00:00 2001
From: Erik Desjardins <erikdesjardins@users.noreply.github.com>
Date: Fri, 12 Apr 2024 08:31:35 -0400
Subject: [PATCH 4/5] remove alloca type from issue-105386-ub-in-debuginfo

It's irrelevant for the purposes of this test (there is only one alloca)
and its size changes depending on the target, so it can't be matched
easily.
---
 tests/codegen/issues/issue-105386-ub-in-debuginfo.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs b/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs
index 4d1c0a83b55..56b4330b1a6 100644
--- a/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs
+++ b/tests/codegen/issues/issue-105386-ub-in-debuginfo.rs
@@ -15,7 +15,7 @@ pub fn outer_function(x: S, y: S) -> usize {
 // Check that we do not attempt to load from the spilled arg before it is assigned to
 // when generating debuginfo.
 // CHECK-LABEL: @outer_function
-// CHECK: [[spill:%.*]] = alloca [72 x i8]
+// CHECK: [[spill:%.*]] = alloca
 // CHECK-NOT: [[ptr_tmp:%.*]] = getelementptr inbounds i8, ptr [[spill]]
 // CHECK-NOT: [[load:%.*]] = load ptr, ptr
 // CHECK: call void @llvm.lifetime.start{{.*}}({{.*}}, ptr [[spill]])

From 6df27ef88f2ce082c8f3b9db134bfa3c0c28cf38 Mon Sep 17 00:00:00 2001
From: Erik Desjardins <erikdesjardins@users.noreply.github.com>
Date: Mon, 22 Apr 2024 23:45:04 -0400
Subject: [PATCH 5/5] also update windows slack-protector tests

---
 ...otector-heuristics-effect-windows-32bit.rs | 30 +++++--------------
 ...otector-heuristics-effect-windows-64bit.rs | 30 +++++--------------
 2 files changed, 16 insertions(+), 44 deletions(-)

diff --git a/tests/assembly/stack-protector/stack-protector-heuristics-effect-windows-32bit.rs b/tests/assembly/stack-protector/stack-protector-heuristics-effect-windows-32bit.rs
index 12339cb4415..51b4dc4e169 100644
--- a/tests/assembly/stack-protector/stack-protector-heuristics-effect-windows-32bit.rs
+++ b/tests/assembly/stack-protector/stack-protector-heuristics-effect-windows-32bit.rs
@@ -37,23 +37,9 @@ pub fn array_char(f: fn(*const char)) {
     f(&b as *const _);
     f(&c as *const _);
 
-    // Any type of local array variable leads to stack protection with the
-    // "strong" heuristic. The 'basic' heuristic only adds stack protection to
-    // functions with local array variables of a byte-sized type, however. Since
-    // 'char' is 4 bytes in Rust, this function is not protected by the 'basic'
-    // heuristic
-    //
-    // (This test *also* takes the address of the local stack variables. We
-    // cannot know that this isn't what triggers the `strong` heuristic.
-    // However, the test strategy of passing the address of a stack array to an
-    // external function is sufficient to trigger the `basic` heuristic (see
-    // test `array_u8_large()`). Since the `basic` heuristic only checks for the
-    // presence of stack-local array variables, we can be confident that this
-    // test also captures this part of the `strong` heuristic specification.)
-
     // all: __security_check_cookie
     // strong: __security_check_cookie
-    // basic-NOT: __security_check_cookie
+    // basic: __security_check_cookie
     // none-NOT: __security_check_cookie
     // missing-NOT: __security_check_cookie
 }
@@ -231,8 +217,8 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
     // Even though the local variable conceptually doesn't have its address
     // taken, it's so large that the "move" is implemented with a reference to a
     // stack-local variable in the ABI. Consequently, this function *is*
-    // protected by the `strong` heuristic. This is also the case for
-    // rvalue-references in C++, regardless of struct size:
+    // protected. This is also the case for rvalue-references in C++,
+    // regardless of struct size:
     // ```
     // cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
     // #include <cstdint>
@@ -246,7 +232,7 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
 
     // all: __security_check_cookie
     // strong: __security_check_cookie
-    // basic-NOT: __security_check_cookie
+    // basic: __security_check_cookie
     // none-NOT: __security_check_cookie
     // missing-NOT: __security_check_cookie
 }
@@ -259,9 +245,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
     // A new instance of `Gigastruct` is passed to `f()`, without any apparent
     // connection to this stack frame. Still, since instances of `Gigastruct`
     // are sufficiently large, it is allocated in the caller stack frame and
-    // passed as a pointer. As such, this function is *also* protected by the
-    // `strong` heuristic, just like `local_large_var_moved`. This is also the
-    // case for pass-by-value of sufficiently large structs in C++:
+    // passed as a pointer. As such, this function is *also* protected, just
+    // like `local_large_var_moved`. This is also the case for pass-by-value
+    // of sufficiently large structs in C++:
     // ```
     // cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
     // #include <cstdint>
@@ -276,7 +262,7 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
 
     // all: __security_check_cookie
     // strong: __security_check_cookie
-    // basic-NOT: __security_check_cookie
+    // basic: __security_check_cookie
     // none-NOT: __security_check_cookie
     // missing-NOT: __security_check_cookie
 }
diff --git a/tests/assembly/stack-protector/stack-protector-heuristics-effect-windows-64bit.rs b/tests/assembly/stack-protector/stack-protector-heuristics-effect-windows-64bit.rs
index 46c77511251..c5915262c09 100644
--- a/tests/assembly/stack-protector/stack-protector-heuristics-effect-windows-64bit.rs
+++ b/tests/assembly/stack-protector/stack-protector-heuristics-effect-windows-64bit.rs
@@ -37,23 +37,9 @@ pub fn array_char(f: fn(*const char)) {
     f(&b as *const _);
     f(&c as *const _);
 
-    // Any type of local array variable leads to stack protection with the
-    // "strong" heuristic. The 'basic' heuristic only adds stack protection to
-    // functions with local array variables of a byte-sized type, however. Since
-    // 'char' is 4 bytes in Rust, this function is not protected by the 'basic'
-    // heuristic
-    //
-    // (This test *also* takes the address of the local stack variables. We
-    // cannot know that this isn't what triggers the `strong` heuristic.
-    // However, the test strategy of passing the address of a stack array to an
-    // external function is sufficient to trigger the `basic` heuristic (see
-    // test `array_u8_large()`). Since the `basic` heuristic only checks for the
-    // presence of stack-local array variables, we can be confident that this
-    // test also captures this part of the `strong` heuristic specification.)
-
     // all: __security_check_cookie
     // strong: __security_check_cookie
-    // basic-NOT: __security_check_cookie
+    // basic: __security_check_cookie
     // none-NOT: __security_check_cookie
     // missing-NOT: __security_check_cookie
 }
@@ -239,8 +225,8 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
     // Even though the local variable conceptually doesn't have its address
     // taken, it's so large that the "move" is implemented with a reference to a
     // stack-local variable in the ABI. Consequently, this function *is*
-    // protected by the `strong` heuristic. This is also the case for
-    // rvalue-references in C++, regardless of struct size:
+    // protected. This is also the case for rvalue-references in C++,
+    // regardless of struct size:
     // ```
     // cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
     // #include <cstdint>
@@ -254,7 +240,7 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
 
     // all: __security_check_cookie
     // strong: __security_check_cookie
-    // basic-NOT: __security_check_cookie
+    // basic: __security_check_cookie
     // none-NOT: __security_check_cookie
     // missing-NOT: __security_check_cookie
 }
@@ -267,9 +253,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
     // A new instance of `Gigastruct` is passed to `f()`, without any apparent
     // connection to this stack frame. Still, since instances of `Gigastruct`
     // are sufficiently large, it is allocated in the caller stack frame and
-    // passed as a pointer. As such, this function is *also* protected by the
-    // `strong` heuristic, just like `local_large_var_moved`. This is also the
-    // case for pass-by-value of sufficiently large structs in C++:
+    // passed as a pointer. As such, this function is *also* protected, just
+    // like `local_large_var_moved`. This is also the case for pass-by-value
+    // of sufficiently large structs in C++:
     // ```
     // cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
     // #include <cstdint>
@@ -284,7 +270,7 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
 
     // all: __security_check_cookie
     // strong: __security_check_cookie
-    // basic-NOT: __security_check_cookie
+    // basic: __security_check_cookie
     // none-NOT: __security_check_cookie
     // missing-NOT: __security_check_cookie
 }