wgpu/tests/in/access.wgsl

187 lines
4.3 KiB
WebGPU Shading Language
Raw Normal View History

// This snapshot tests accessing various containers, dereferencing pointers.
struct GlobalConst {
a: u32,
b: vec3<u32>,
c: i32,
}
// tests msl padding insertion for global constants
var<private> global_const: GlobalConst = GlobalConst(0u, vec3<u32>(0u, 0u, 0u), 0);
2022-01-21 15:28:50 +00:00
struct AlignedWrapper {
@align(8) value: i32
}
2022-01-21 15:28:50 +00:00
struct Bar {
_matrix: mat4x3<f32>,
matrix_array: array<mat2x2<f32>, 2>,
atom: atomic<i32>,
atom_arr: array<atomic<i32>, 10>,
arr: array<vec2<u32>, 2>,
data: array<AlignedWrapper>,
}
2022-01-19 15:33:06 +00:00
@group(0) @binding(0)
var<storage,read_write> bar: Bar;
struct Baz {
m: mat3x2<f32>,
}
@group(0) @binding(1)
var<uniform> baz: Baz;
@group(0) @binding(2)
var<storage,read_write> qux: vec2<i32>;
fn test_matrix_within_struct_accesses() {
var idx = 1;
idx--;
// loads
2022-04-26 16:40:37 +00:00
_ = baz.m;
_ = baz.m[0];
_ = baz.m[idx];
_ = baz.m[0][1];
_ = baz.m[0][idx];
_ = baz.m[idx][1];
_ = baz.m[idx][idx];
var t = Baz(mat3x2<f32>(vec2<f32>(1.0), vec2<f32>(2.0), vec2<f32>(3.0)));
idx++;
// stores
t.m = mat3x2<f32>(vec2<f32>(6.0), vec2<f32>(5.0), vec2<f32>(4.0));
t.m[0] = vec2<f32>(9.0);
t.m[idx] = vec2<f32>(90.0);
t.m[0][1] = 10.0;
t.m[0][idx] = 20.0;
t.m[idx][1] = 30.0;
t.m[idx][idx] = 40.0;
}
struct MatCx2InArray {
am: array<mat4x2<f32>, 2>,
}
@group(0) @binding(3)
var<uniform> nested_mat_cx2: MatCx2InArray;
fn test_matrix_within_array_within_struct_accesses() {
var idx = 1;
idx--;
// loads
_ = nested_mat_cx2.am;
_ = nested_mat_cx2.am[0];
_ = nested_mat_cx2.am[0][0];
_ = nested_mat_cx2.am[0][idx];
_ = nested_mat_cx2.am[0][0][1];
_ = nested_mat_cx2.am[0][0][idx];
_ = nested_mat_cx2.am[0][idx][1];
_ = nested_mat_cx2.am[0][idx][idx];
var t = MatCx2InArray(array<mat4x2<f32>, 2>());
idx++;
// stores
t.am = array<mat4x2<f32>, 2>();
t.am[0] = mat4x2<f32>(vec2<f32>(8.0), vec2<f32>(7.0), vec2<f32>(6.0), vec2<f32>(5.0));
t.am[0][0] = vec2<f32>(9.0);
t.am[0][idx] = vec2<f32>(90.0);
t.am[0][0][1] = 10.0;
t.am[0][0][idx] = 20.0;
t.am[0][idx][1] = 30.0;
t.am[0][idx][idx] = 40.0;
}
fn read_from_private(foo: ptr<function, f32>) -> f32 {
return *foo;
}
fn test_arr_as_arg(a: array<array<f32, 10>, 5>) -> f32 {
return a[4][9];
}
@vertex
fn foo_vert(@builtin(vertex_index) vi: u32) -> @builtin(position) vec4<f32> {
var foo: f32 = 0.0;
// We should check that backed doesn't skip this expression
let baz: f32 = foo;
foo = 1.0;
test_matrix_within_struct_accesses();
test_matrix_within_array_within_struct_accesses();
2021-07-25 05:36:38 +00:00
// test storage loads
let _matrix = bar._matrix;
let arr = bar.arr;
2021-05-04 02:47:33 +00:00
let index = 3u;
let b = bar._matrix[index].x;
2022-01-21 15:28:50 +00:00
let a = bar.data[arrayLength(&bar.data) - 2u].value;
let c = qux;
// test pointer types
2022-01-21 15:28:50 +00:00
let data_pointer: ptr<storage, i32, read_write> = &bar.data[0].value;
let foo_value = read_from_private(&foo);
2021-07-25 05:36:38 +00:00
// test array indexing
var c2 = array<i32, 5>(a, i32(b), 3, 4, 5);
c2[vi + 1u] = 42;
let value = c2[vi];
2021-05-04 02:47:33 +00:00
2022-04-26 16:40:37 +00:00
_ = test_arr_as_arg(array<array<f32, 10>, 5>());
return vec4<f32>(_matrix * vec4<f32>(vec4<i32>(value)), 2.0);
}
@fragment
fn foo_frag() -> @location(0) vec4<f32> {
// test storage stores
bar._matrix[1].z = 1.0;
bar._matrix = mat4x3<f32>(vec3<f32>(0.0), vec3<f32>(1.0), vec3<f32>(2.0), vec3<f32>(3.0));
bar.arr = array<vec2<u32>, 2>(vec2<u32>(0u), vec2<u32>(1u));
bar.data[1].value = 1;
qux = vec2<i32>();
return vec4<f32>(0.0);
}
@compute @workgroup_size(1)
fn atomics() {
var tmp: i32;
let value = atomicLoad(&bar.atom);
2021-08-05 19:13:25 +00:00
tmp = atomicAdd(&bar.atom, 5);
2021-08-24 21:45:29 +00:00
tmp = atomicSub(&bar.atom, 5);
2021-08-05 19:13:25 +00:00
tmp = atomicAnd(&bar.atom, 5);
tmp = atomicOr(&bar.atom, 5);
tmp = atomicXor(&bar.atom, 5);
tmp = atomicMin(&bar.atom, 5);
tmp = atomicMax(&bar.atom, 5);
tmp = atomicExchange(&bar.atom, 5);
// https://github.com/gpuweb/gpuweb/issues/2021
// tmp = atomicCompareExchangeWeak(&bar.atom, 5, 5);
atomicStore(&bar.atom, value);
}
var<workgroup> val: u32;
fn assign_through_ptr_fn(p: ptr<workgroup, u32>) {
*p = 42u;
}
fn assign_array_through_ptr_fn(foo: ptr<function, array<vec4<f32>, 2>>) {
*foo = array<vec4<f32>, 2>(vec4(1.0), vec4(2.0));
}
@compute @workgroup_size(1)
fn assign_through_ptr() {
var arr = array<vec4<f32>, 2>(vec4(6.0), vec4(7.0));
assign_through_ptr_fn(&val);
assign_array_through_ptr_fn(&arr);
}