// This snapshot tests accessing various containers, dereferencing pointers. struct AlignedWrapper { @align(8) value: i32 } struct Bar { matrix: mat4x3, matrix_array: array, 2>, atom: atomic, arr: array, 2>, data: array, } @group(0) @binding(0) var bar: Bar; struct Baz { m: mat3x2, } @group(0) @binding(1) var baz: Baz; fn test_matrix_within_struct_accesses() { var idx = 9; idx--; // loads var _ = baz.m; var _ = baz.m[0]; var _ = baz.m[idx]; var _ = baz.m[0][1]; var _ = baz.m[0][idx]; var _ = baz.m[idx][1]; var _ = baz.m[idx][idx]; var t = Baz(mat3x2(vec2(1.0), vec2(2.0), vec2(3.0))); idx++; // stores t.m = mat3x2(vec2(6.0), vec2(5.0), vec2(4.0)); t.m[0] = vec2(9.0); t.m[idx] = vec2(90.0); t.m[0][1] = 10.0; t.m[0][idx] = 20.0; t.m[idx][1] = 30.0; t.m[idx][idx] = 40.0; } fn read_from_private(foo: ptr) -> f32 { return *foo; } fn test_arr_as_arg(a: array, 5>) -> f32 { return a[4][9]; } @vertex fn foo_vert(@builtin(vertex_index) vi: u32) -> @builtin(position) vec4 { var foo: f32 = 0.0; // We should check that backed doesn't skip this expression let baz: f32 = foo; foo = 1.0; test_matrix_within_struct_accesses(); // test storage loads let matrix = bar.matrix; let arr = bar.arr; let index = 3u; let b = bar.matrix[index].x; let a = bar.data[arrayLength(&bar.data) - 2u].value; // test pointer types let data_pointer: ptr = &bar.data[0].value; let foo_value = read_from_private(&foo); // test array indexing var c = array(a, i32(b), 3, 4, 5); c[vi + 1u] = 42; let value = c[vi]; var _ = test_arr_as_arg(array, 5>()); return vec4(matrix * vec4(vec4(value)), 2.0); } @fragment fn foo_frag() -> @location(0) vec4 { // test storage stores bar.matrix[1].z = 1.0; bar.matrix = mat4x3(vec3(0.0), vec3(1.0), vec3(2.0), vec3(3.0)); bar.arr = array, 2>(vec2(0u), vec2(1u)); bar.data[1].value = 1; return vec4(0.0); } @compute @workgroup_size(1) fn atomics() { var tmp: i32; let value = atomicLoad(&bar.atom); tmp = atomicAdd(&bar.atom, 5); tmp = atomicSub(&bar.atom, 5); tmp = atomicAnd(&bar.atom, 5); tmp = atomicOr(&bar.atom, 5); tmp = atomicXor(&bar.atom, 5); tmp = atomicMin(&bar.atom, 5); tmp = atomicMax(&bar.atom, 5); tmp = atomicExchange(&bar.atom, 5); // https://github.com/gpuweb/gpuweb/issues/2021 // tmp = atomicCompareExchangeWeak(&bar.atom, 5, 5); atomicStore(&bar.atom, value); }