Rollup merge of #57053 - nikic:fix-gep-align, r=nagisa

Fix alignment for array indexing

We need to reduce the alignment with the used offset. If the offset isn't known, use the element size, as this will yield the minimum possible alignment.

This handles both direct array indexing, and array repeat expressions.

Fixes #56927.

r? @nagisa
This commit is contained in:
Mazdak Farrokhzad 2018-12-23 23:09:12 +01:00 committed by GitHub
commit 4ebc7b7e63
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 93 additions and 3 deletions

View File

@ -335,11 +335,20 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx,
llindex: V
) -> Self {
// Statically compute the offset if we can, otherwise just use the element size,
// as this will yield the lowest alignment.
let layout = self.layout.field(bx, 0);
let offset = if bx.is_const_integral(llindex) {
layout.size.checked_mul(bx.const_to_uint(llindex), bx).unwrap_or(layout.size)
} else {
layout.size
};
PlaceRef {
llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
llextra: None,
layout: self.layout.field(bx.cx(), 0),
align: self.align
layout,
align: self.align.restrict_for_offset(offset),
}
}

View File

@ -131,8 +131,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
let align = dest.align.restrict_for_offset(dest.layout.field(bx.cx(), 0).size);
cg_elem.val.store(&mut body_bx,
PlaceRef::new_sized(current, cg_elem.layout, dest.align));
PlaceRef::new_sized(current, cg_elem.layout, align));
let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]);
body_bx.br(header_bx.llbb());

View File

@ -0,0 +1,44 @@
// compile-flags: -C no-prepopulate-passes
#![crate_type="rlib"]
use std::usize;
#[repr(align(16))]
pub struct S {
arr: [u32; 4],
}
// CHECK-LABEL: @test1
// CHECK: store i32 0, i32* %{{.+}}, align 16
// CHECK: store i32 1, i32* %{{.+}}, align 4
// CHECK: store i32 2, i32* %{{.+}}, align 8
// CHECK: store i32 3, i32* %{{.+}}, align 4
#[no_mangle]
pub fn test1(s: &mut S) {
s.arr[0] = 0;
s.arr[1] = 1;
s.arr[2] = 2;
s.arr[3] = 3;
}
// CHECK-LABEL: @test2
// CHECK: store i32 4, i32* %{{.+}}, align 4
#[allow(const_err)]
#[no_mangle]
pub fn test2(s: &mut S) {
s.arr[usize::MAX / 4 + 1] = 4;
}
// CHECK-LABEL: @test3
// CHECK: store i32 5, i32* %{{.+}}, align 4
#[no_mangle]
pub fn test3(s: &mut S, i: usize) {
s.arr[i] = 5;
}
// CHECK-LABEL: @test4
// CHECK: store i32 6, i32* %{{.+}}, align 4
#[no_mangle]
pub fn test4(s: &mut S) {
s.arr = [6; 4];
}

View File

@ -83,6 +83,42 @@ pub fn call_pkd2(f: fn() -> Array) -> BigPacked2 {
BigPacked2 { dealign: 0, data: f() }
}
// CHECK-LABEL: @write_packed_array1
// CHECK: store i32 0, i32* %{{.+}}, align 1
// CHECK: store i32 1, i32* %{{.+}}, align 1
// CHECK: store i32 2, i32* %{{.+}}, align 1
#[no_mangle]
pub fn write_packed_array1(p: &mut BigPacked1) {
p.data.0[0] = 0;
p.data.0[1] = 1;
p.data.0[2] = 2;
}
// CHECK-LABEL: @write_packed_array2
// CHECK: store i32 0, i32* %{{.+}}, align 2
// CHECK: store i32 1, i32* %{{.+}}, align 2
// CHECK: store i32 2, i32* %{{.+}}, align 2
#[no_mangle]
pub fn write_packed_array2(p: &mut BigPacked2) {
p.data.0[0] = 0;
p.data.0[1] = 1;
p.data.0[2] = 2;
}
// CHECK-LABEL: @repeat_packed_array1
// CHECK: store i32 42, i32* %{{.+}}, align 1
#[no_mangle]
pub fn repeat_packed_array1(p: &mut BigPacked1) {
p.data.0 = [42; 8];
}
// CHECK-LABEL: @repeat_packed_array2
// CHECK: store i32 42, i32* %{{.+}}, align 2
#[no_mangle]
pub fn repeat_packed_array2(p: &mut BigPacked2) {
p.data.0 = [42; 8];
}
#[repr(packed)]
#[derive(Copy, Clone)]
pub struct Packed1Pair(u8, u32);