diff --git a/src/test/codegen/tuple-layout-opt.rs b/src/test/codegen/tuple-layout-opt.rs new file mode 100644 index 00000000000..e86c75f3f48 --- /dev/null +++ b/src/test/codegen/tuple-layout-opt.rs @@ -0,0 +1,36 @@ +// ignore-emscripten +// compile-flags: -C no-prepopulate-passes + +// Test that tuples get optimized layout, in particular with a ZST in the last field (#63244) + +#![crate_type="lib"] + +type ScalarZstLast = (u128, ()); +// CHECK: define i128 @test_ScalarZstLast(i128 %_1) +#[no_mangle] +pub fn test_ScalarZstLast(_: ScalarZstLast) -> ScalarZstLast { loop {} } + +type ScalarZstFirst = ((), u128); +// CHECK: define i128 @test_ScalarZstFirst(i128 %_1) +#[no_mangle] +pub fn test_ScalarZstFirst(_: ScalarZstFirst) -> ScalarZstFirst { loop {} } + +type ScalarPairZstLast = (u8, u128, ()); +// CHECK: define { i128, i8 } @test_ScalarPairZstLast(i128 %_1.0, i8 %_1.1) +#[no_mangle] +pub fn test_ScalarPairZstLast(_: ScalarPairZstLast) -> ScalarPairZstLast { loop {} } + +type ScalarPairZstFirst = ((), u8, u128); +// CHECK: define { i8, i128 } @test_ScalarPairZstFirst(i8 %_1.0, i128 %_1.1) +#[no_mangle] +pub fn test_ScalarPairZstFirst(_: ScalarPairZstFirst) -> ScalarPairZstFirst { loop {} } + +type ScalarPairLotsOfZsts = ((), u8, (), u128, ()); +// CHECK: define { i128, i8 } @test_ScalarPairLotsOfZsts(i128 %_1.0, i8 %_1.1) +#[no_mangle] +pub fn test_ScalarPairLotsOfZsts(_: ScalarPairLotsOfZsts) -> ScalarPairLotsOfZsts { loop {} } + +type ScalarPairLottaNesting = (((), ((), u8, (), u128, ())), ()); +// CHECK: define { i128, i8 } @test_ScalarPairLottaNesting(i128 %_1.0, i8 %_1.1) +#[no_mangle] +pub fn test_ScalarPairLottaNesting(_: ScalarPairLottaNesting) -> ScalarPairLottaNesting { loop {} } diff --git a/src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs b/src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs new file mode 100644 index 00000000000..26b923f431f --- /dev/null +++ b/src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs @@ -0,0 +1,26 @@ +// run-pass + +#![feature(unsized_tuple_coercion)] + +// Ensure that unsizable fields that might be accessed don't get reordered + +fn nonzero_size() { + let sized: (u8, [u32; 2]) = (123, [456, 789]); + let unsize: &(u8, [u32]) = &sized; + assert_eq!(unsize.0, 123); + assert_eq!(unsize.1.len(), 2); + assert_eq!(unsize.1[0], 456); + assert_eq!(unsize.1[1], 789); +} + +fn zst() { + let sized: (u8, [u32; 0]) = (123, []); + let unsize: &(u8, [u32]) = &sized; + assert_eq!(unsize.0, 123); + assert_eq!(unsize.1.len(), 0); +} + +pub fn main() { + nonzero_size(); + zst(); +} diff --git a/src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs b/src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs new file mode 100644 index 00000000000..629b50dec65 --- /dev/null +++ b/src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs @@ -0,0 +1,27 @@ +// compile-flags: -Z mir-opt-level=2 +// build-pass +#![crate_type="lib"] + +// This used to ICE: const-prop did not account for field reordering of scalar pairs, +// and would generate a tuple like `(0x1337, VariantBar): (FooEnum, isize)`, +// causing assertion failures in codegen when trying to read 0x1337 at the wrong type. + +pub enum FooEnum { + VariantBar, + VariantBaz, + VariantBuz, +} + +pub fn wrong_index() -> isize { + let (_, b) = id((FooEnum::VariantBar, 0x1337)); + b +} + +pub fn wrong_index_two() -> isize { + let (_, (_, b)) = id(((), (FooEnum::VariantBar, 0x1338))); + b +} + +fn id(x: T) -> T { + x +}