2021-04-16 21:11:18 +00:00
|
|
|
// This snapshot tests accessing various containers, dereferencing pointers.
|
|
|
|
|
2022-01-21 15:28:50 +00:00
|
|
|
struct AlignedWrapper {
|
|
|
|
@align(8) value: i32;
|
|
|
|
};
|
|
|
|
|
2021-05-01 02:40:32 +00:00
|
|
|
struct Bar {
|
2021-05-04 02:47:33 +00:00
|
|
|
matrix: mat4x4<f32>;
|
2021-12-27 00:30:57 +00:00
|
|
|
matrix_array: array<mat2x2<f32>, 2>;
|
2021-08-05 05:21:16 +00:00
|
|
|
atom: atomic<i32>;
|
2022-01-21 15:28:50 +00:00
|
|
|
arr: array<vec2<u32>, 2>;
|
|
|
|
data: array<AlignedWrapper>;
|
2021-05-01 02:40:32 +00:00
|
|
|
};
|
|
|
|
|
2022-01-19 15:33:06 +00:00
|
|
|
@group(0) @binding(0)
|
2021-07-28 05:47:18 +00:00
|
|
|
var<storage,read_write> bar: Bar;
|
2021-05-01 02:40:32 +00:00
|
|
|
|
2021-09-27 22:49:28 +00:00
|
|
|
fn read_from_private(foo: ptr<function, f32>) -> f32 {
|
|
|
|
return *foo;
|
|
|
|
}
|
|
|
|
|
2022-01-19 15:33:06 +00:00
|
|
|
@stage(vertex)
|
2022-03-06 17:08:40 +00:00
|
|
|
fn foo_vert(@builtin(vertex_index) vi: u32) -> @builtin(position) vec4<f32> {
|
2021-06-08 14:15:26 +00:00
|
|
|
var foo: f32 = 0.0;
|
|
|
|
// We should check that backed doesn't skip this expression
|
|
|
|
let baz: f32 = foo;
|
|
|
|
foo = 1.0;
|
|
|
|
|
2021-07-25 05:36:38 +00:00
|
|
|
// test storage loads
|
2021-07-24 06:21:54 +00:00
|
|
|
let matrix = bar.matrix;
|
|
|
|
let arr = bar.arr;
|
2021-05-04 02:47:33 +00:00
|
|
|
let index = 3u;
|
|
|
|
let b = bar.matrix[index].x;
|
2022-01-21 15:28:50 +00:00
|
|
|
let a = bar.data[arrayLength(&bar.data) - 2u].value;
|
2021-06-04 16:57:20 +00:00
|
|
|
|
2021-09-27 22:49:28 +00:00
|
|
|
// test pointer types
|
2022-01-21 15:28:50 +00:00
|
|
|
let data_pointer: ptr<storage, i32, read_write> = &bar.data[0].value;
|
2021-09-27 22:49:28 +00:00
|
|
|
let foo_value = read_from_private(&foo);
|
|
|
|
|
2021-07-25 05:36:38 +00:00
|
|
|
// test array indexing
|
2021-07-06 05:16:15 +00:00
|
|
|
var c = array<i32, 5>(a, i32(b), 3, 4, 5);
|
|
|
|
c[vi + 1u] = 42;
|
2021-06-04 16:57:20 +00:00
|
|
|
let value = c[vi];
|
2021-05-04 02:47:33 +00:00
|
|
|
|
2021-07-24 06:21:54 +00:00
|
|
|
return matrix * vec4<f32>(vec4<i32>(value));
|
2021-04-16 21:11:18 +00:00
|
|
|
}
|
2021-08-05 05:21:16 +00:00
|
|
|
|
2022-03-06 17:08:40 +00:00
|
|
|
@stage(fragment)
|
|
|
|
fn foo_frag() -> @location(0) vec4<f32> {
|
|
|
|
// test storage stores
|
|
|
|
bar.matrix[1].z = 1.0;
|
|
|
|
bar.matrix = mat4x4<f32>(vec4<f32>(0.0), vec4<f32>(1.0), vec4<f32>(2.0), vec4<f32>(3.0));
|
|
|
|
bar.arr = array<vec2<u32>, 2>(vec2<u32>(0u), vec2<u32>(1u));
|
|
|
|
bar.data[1].value = 1;
|
|
|
|
|
|
|
|
return vec4<f32>(0.0);
|
|
|
|
}
|
|
|
|
|
2022-01-19 15:33:06 +00:00
|
|
|
@stage(compute) @workgroup_size(1)
|
2021-08-05 05:21:16 +00:00
|
|
|
fn atomics() {
|
|
|
|
var tmp: i32;
|
|
|
|
let value = atomicLoad(&bar.atom);
|
2021-08-05 19:13:25 +00:00
|
|
|
tmp = atomicAdd(&bar.atom, 5);
|
2021-08-24 21:45:29 +00:00
|
|
|
tmp = atomicSub(&bar.atom, 5);
|
2021-08-05 19:13:25 +00:00
|
|
|
tmp = atomicAnd(&bar.atom, 5);
|
|
|
|
tmp = atomicOr(&bar.atom, 5);
|
|
|
|
tmp = atomicXor(&bar.atom, 5);
|
|
|
|
tmp = atomicMin(&bar.atom, 5);
|
|
|
|
tmp = atomicMax(&bar.atom, 5);
|
|
|
|
tmp = atomicExchange(&bar.atom, 5);
|
|
|
|
// https://github.com/gpuweb/gpuweb/issues/2021
|
|
|
|
// tmp = atomicCompareExchangeWeak(&bar.atom, 5, 5);
|
2021-08-05 05:21:16 +00:00
|
|
|
atomicStore(&bar.atom, value);
|
|
|
|
}
|