vulkano/examples/src/bin/shader-include/main.rs

134 lines
5.1 KiB
Rust
Raw Normal View History

// Copyright (c) 2017 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
// This example demonstrates how to use the standard and relative include directives within
// shader source code. The boilerplate is taken from the "basic-compute-shader.rs" example, where
// most of the boilerplate is explained.
use std::sync::Arc;
use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer};
use vulkano::command_buffer::{AutoCommandBufferBuilder, CommandBufferUsage};
use vulkano::descriptor_set::PersistentDescriptorSet;
use vulkano::device::physical::{PhysicalDevice, PhysicalDeviceType};
use vulkano::device::{Device, DeviceExtensions, Features};
use vulkano::instance::{Instance, InstanceExtensions};
use vulkano::pipeline::{ComputePipeline, ComputePipelineAbstract};
use vulkano::sync;
use vulkano::sync::GpuFuture;
use vulkano::Version;
fn main() {
let instance = Instance::new(None, Version::V1_1, &InstanceExtensions::none(), None).unwrap();
let device_extensions = DeviceExtensions {
khr_storage_buffer_storage_class: true,
..DeviceExtensions::none()
};
let (physical_device, queue_family) = PhysicalDevice::enumerate(&instance)
.filter(|&p| p.supported_extensions().is_superset_of(&device_extensions))
.filter_map(|p| {
p.queue_families()
.find(|&q| q.supports_compute())
.map(|q| (p, q))
})
.min_by_key(|(p, _)| match p.properties().device_type.unwrap() {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
})
.unwrap();
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name.as_ref().unwrap(),
physical_device.properties().device_type.unwrap()
);
let (device, mut queues) = Device::new(
physical_device,
&Features::none(),
&physical_device
.required_extensions()
.union(&device_extensions),
[(queue_family, 0.5)].iter().cloned(),
)
.unwrap();
let queue = queues.next().unwrap();
let pipeline = Arc::new({
mod cs {
vulkano_shaders::shader! {
ty: "compute",
// We declare what directories to search for when using the `#include <...>`
// syntax. Specified directories have descending priorities based on their order.
include: [ "src/bin/shader-include/standard-shaders" ],
src: "
#version 450
// Substitutes this line with the contents of the file `common.glsl` found in one of the standard
// `include` directories specified above.
// Note, that relative inclusion (`#include \"...\"`), although it falls back to standard
// inclusion, should not be used for **embedded** shader source, as it may be misleading and/or
// confusing.
#include <common.glsl>
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
layout(set = 0, binding = 0) buffer Data {
uint data[];
} data;
void main() {
uint idx = gl_GlobalInvocationID.x;
data.data[idx] = multiply_by_12(data.data[idx]);
}
"
}
}
let shader = cs::Shader::load(device.clone()).unwrap();
ComputePipeline::new(device.clone(), &shader.main_entry_point(), &(), None).unwrap()
});
let data_buffer = {
let data_iter = (0..65536u32).map(|n| n);
CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), false, data_iter)
.unwrap()
};
let layout = pipeline.layout().descriptor_set_layout(0).unwrap();
let set = Arc::new(
PersistentDescriptorSet::start(layout.clone())
.add_buffer(data_buffer.clone())
.unwrap()
.build()
.unwrap(),
);
let mut builder = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
builder
.dispatch([1024, 1, 1], pipeline.clone(), set.clone(), (), vec![])
.unwrap();
let command_buffer = builder.build().unwrap();
let future = sync::now(device.clone())
.then_execute(queue.clone(), command_buffer)
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
future.wait(None).unwrap();
let data_buffer_content = data_buffer.read().unwrap();
for n in 0..65536u32 {
assert_eq!(data_buffer_content[n as usize], n * 12);
}
}